Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / net / bluetooth / mgmt.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43
44 #define MGMT_VERSION    1
45 #define MGMT_REVISION   22
46
47 static const u16 mgmt_commands[] = {
48         MGMT_OP_READ_INDEX_LIST,
49         MGMT_OP_READ_INFO,
50         MGMT_OP_SET_POWERED,
51         MGMT_OP_SET_DISCOVERABLE,
52         MGMT_OP_SET_CONNECTABLE,
53         MGMT_OP_SET_FAST_CONNECTABLE,
54         MGMT_OP_SET_BONDABLE,
55         MGMT_OP_SET_LINK_SECURITY,
56         MGMT_OP_SET_SSP,
57         MGMT_OP_SET_HS,
58         MGMT_OP_SET_LE,
59         MGMT_OP_SET_DEV_CLASS,
60         MGMT_OP_SET_LOCAL_NAME,
61         MGMT_OP_ADD_UUID,
62         MGMT_OP_REMOVE_UUID,
63         MGMT_OP_LOAD_LINK_KEYS,
64         MGMT_OP_LOAD_LONG_TERM_KEYS,
65         MGMT_OP_DISCONNECT,
66         MGMT_OP_GET_CONNECTIONS,
67         MGMT_OP_PIN_CODE_REPLY,
68         MGMT_OP_PIN_CODE_NEG_REPLY,
69         MGMT_OP_SET_IO_CAPABILITY,
70         MGMT_OP_PAIR_DEVICE,
71         MGMT_OP_CANCEL_PAIR_DEVICE,
72         MGMT_OP_UNPAIR_DEVICE,
73         MGMT_OP_USER_CONFIRM_REPLY,
74         MGMT_OP_USER_CONFIRM_NEG_REPLY,
75         MGMT_OP_USER_PASSKEY_REPLY,
76         MGMT_OP_USER_PASSKEY_NEG_REPLY,
77         MGMT_OP_READ_LOCAL_OOB_DATA,
78         MGMT_OP_ADD_REMOTE_OOB_DATA,
79         MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80         MGMT_OP_START_DISCOVERY,
81         MGMT_OP_STOP_DISCOVERY,
82         MGMT_OP_CONFIRM_NAME,
83         MGMT_OP_BLOCK_DEVICE,
84         MGMT_OP_UNBLOCK_DEVICE,
85         MGMT_OP_SET_DEVICE_ID,
86         MGMT_OP_SET_ADVERTISING,
87         MGMT_OP_SET_BREDR,
88         MGMT_OP_SET_STATIC_ADDRESS,
89         MGMT_OP_SET_SCAN_PARAMS,
90         MGMT_OP_SET_SECURE_CONN,
91         MGMT_OP_SET_DEBUG_KEYS,
92         MGMT_OP_SET_PRIVACY,
93         MGMT_OP_LOAD_IRKS,
94         MGMT_OP_GET_CONN_INFO,
95         MGMT_OP_GET_CLOCK_INFO,
96         MGMT_OP_ADD_DEVICE,
97         MGMT_OP_REMOVE_DEVICE,
98         MGMT_OP_LOAD_CONN_PARAM,
99         MGMT_OP_READ_UNCONF_INDEX_LIST,
100         MGMT_OP_READ_CONFIG_INFO,
101         MGMT_OP_SET_EXTERNAL_CONFIG,
102         MGMT_OP_SET_PUBLIC_ADDRESS,
103         MGMT_OP_START_SERVICE_DISCOVERY,
104         MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105         MGMT_OP_READ_EXT_INDEX_LIST,
106         MGMT_OP_READ_ADV_FEATURES,
107         MGMT_OP_ADD_ADVERTISING,
108         MGMT_OP_REMOVE_ADVERTISING,
109         MGMT_OP_GET_ADV_SIZE_INFO,
110         MGMT_OP_START_LIMITED_DISCOVERY,
111         MGMT_OP_READ_EXT_INFO,
112         MGMT_OP_SET_APPEARANCE,
113         MGMT_OP_GET_PHY_CONFIGURATION,
114         MGMT_OP_SET_PHY_CONFIGURATION,
115         MGMT_OP_SET_BLOCKED_KEYS,
116         MGMT_OP_SET_WIDEBAND_SPEECH,
117         MGMT_OP_READ_CONTROLLER_CAP,
118         MGMT_OP_READ_EXP_FEATURES_INFO,
119         MGMT_OP_SET_EXP_FEATURE,
120         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121         MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123         MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124         MGMT_OP_GET_DEVICE_FLAGS,
125         MGMT_OP_SET_DEVICE_FLAGS,
126         MGMT_OP_READ_ADV_MONITOR_FEATURES,
127         MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128         MGMT_OP_REMOVE_ADV_MONITOR,
129         MGMT_OP_ADD_EXT_ADV_PARAMS,
130         MGMT_OP_ADD_EXT_ADV_DATA,
131         MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133
134 static const u16 mgmt_events[] = {
135         MGMT_EV_CONTROLLER_ERROR,
136         MGMT_EV_INDEX_ADDED,
137         MGMT_EV_INDEX_REMOVED,
138         MGMT_EV_NEW_SETTINGS,
139         MGMT_EV_CLASS_OF_DEV_CHANGED,
140         MGMT_EV_LOCAL_NAME_CHANGED,
141         MGMT_EV_NEW_LINK_KEY,
142         MGMT_EV_NEW_LONG_TERM_KEY,
143         MGMT_EV_DEVICE_CONNECTED,
144         MGMT_EV_DEVICE_DISCONNECTED,
145         MGMT_EV_CONNECT_FAILED,
146         MGMT_EV_PIN_CODE_REQUEST,
147         MGMT_EV_USER_CONFIRM_REQUEST,
148         MGMT_EV_USER_PASSKEY_REQUEST,
149         MGMT_EV_AUTH_FAILED,
150         MGMT_EV_DEVICE_FOUND,
151         MGMT_EV_DISCOVERING,
152         MGMT_EV_DEVICE_BLOCKED,
153         MGMT_EV_DEVICE_UNBLOCKED,
154         MGMT_EV_DEVICE_UNPAIRED,
155         MGMT_EV_PASSKEY_NOTIFY,
156         MGMT_EV_NEW_IRK,
157         MGMT_EV_NEW_CSRK,
158         MGMT_EV_DEVICE_ADDED,
159         MGMT_EV_DEVICE_REMOVED,
160         MGMT_EV_NEW_CONN_PARAM,
161         MGMT_EV_UNCONF_INDEX_ADDED,
162         MGMT_EV_UNCONF_INDEX_REMOVED,
163         MGMT_EV_NEW_CONFIG_OPTIONS,
164         MGMT_EV_EXT_INDEX_ADDED,
165         MGMT_EV_EXT_INDEX_REMOVED,
166         MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167         MGMT_EV_ADVERTISING_ADDED,
168         MGMT_EV_ADVERTISING_REMOVED,
169         MGMT_EV_EXT_INFO_CHANGED,
170         MGMT_EV_PHY_CONFIGURATION_CHANGED,
171         MGMT_EV_EXP_FEATURE_CHANGED,
172         MGMT_EV_DEVICE_FLAGS_CHANGED,
173         MGMT_EV_ADV_MONITOR_ADDED,
174         MGMT_EV_ADV_MONITOR_REMOVED,
175         MGMT_EV_CONTROLLER_SUSPEND,
176         MGMT_EV_CONTROLLER_RESUME,
177         MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178         MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180
181 static const u16 mgmt_untrusted_commands[] = {
182         MGMT_OP_READ_INDEX_LIST,
183         MGMT_OP_READ_INFO,
184         MGMT_OP_READ_UNCONF_INDEX_LIST,
185         MGMT_OP_READ_CONFIG_INFO,
186         MGMT_OP_READ_EXT_INDEX_LIST,
187         MGMT_OP_READ_EXT_INFO,
188         MGMT_OP_READ_CONTROLLER_CAP,
189         MGMT_OP_READ_EXP_FEATURES_INFO,
190         MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191         MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193
194 static const u16 mgmt_untrusted_events[] = {
195         MGMT_EV_INDEX_ADDED,
196         MGMT_EV_INDEX_REMOVED,
197         MGMT_EV_NEW_SETTINGS,
198         MGMT_EV_CLASS_OF_DEV_CHANGED,
199         MGMT_EV_LOCAL_NAME_CHANGED,
200         MGMT_EV_UNCONF_INDEX_ADDED,
201         MGMT_EV_UNCONF_INDEX_REMOVED,
202         MGMT_EV_NEW_CONFIG_OPTIONS,
203         MGMT_EV_EXT_INDEX_ADDED,
204         MGMT_EV_EXT_INDEX_REMOVED,
205         MGMT_EV_EXT_INFO_CHANGED,
206         MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208
209 #define CACHE_TIMEOUT   msecs_to_jiffies(2 * 1000)
210
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212                  "\x00\x00\x00\x00\x00\x00\x00\x00"
213
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216         MGMT_STATUS_SUCCESS,
217         MGMT_STATUS_UNKNOWN_COMMAND,    /* Unknown Command */
218         MGMT_STATUS_NOT_CONNECTED,      /* No Connection */
219         MGMT_STATUS_FAILED,             /* Hardware Failure */
220         MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
221         MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
222         MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
223         MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
224         MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
225         MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
226         MGMT_STATUS_NO_RESOURCES,       /* Max Number of SCO Connections */
227         MGMT_STATUS_ALREADY_CONNECTED,  /* ACL Connection Exists */
228         MGMT_STATUS_BUSY,               /* Command Disallowed */
229         MGMT_STATUS_NO_RESOURCES,       /* Rejected Limited Resources */
230         MGMT_STATUS_REJECTED,           /* Rejected Security */
231         MGMT_STATUS_REJECTED,           /* Rejected Personal */
232         MGMT_STATUS_TIMEOUT,            /* Host Timeout */
233         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Feature */
234         MGMT_STATUS_INVALID_PARAMS,     /* Invalid Parameters */
235         MGMT_STATUS_DISCONNECTED,       /* OE User Ended Connection */
236         MGMT_STATUS_NO_RESOURCES,       /* OE Low Resources */
237         MGMT_STATUS_DISCONNECTED,       /* OE Power Off */
238         MGMT_STATUS_DISCONNECTED,       /* Connection Terminated */
239         MGMT_STATUS_BUSY,               /* Repeated Attempts */
240         MGMT_STATUS_REJECTED,           /* Pairing Not Allowed */
241         MGMT_STATUS_FAILED,             /* Unknown LMP PDU */
242         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported Remote Feature */
243         MGMT_STATUS_REJECTED,           /* SCO Offset Rejected */
244         MGMT_STATUS_REJECTED,           /* SCO Interval Rejected */
245         MGMT_STATUS_REJECTED,           /* Air Mode Rejected */
246         MGMT_STATUS_INVALID_PARAMS,     /* Invalid LMP Parameters */
247         MGMT_STATUS_FAILED,             /* Unspecified Error */
248         MGMT_STATUS_NOT_SUPPORTED,      /* Unsupported LMP Parameter Value */
249         MGMT_STATUS_FAILED,             /* Role Change Not Allowed */
250         MGMT_STATUS_TIMEOUT,            /* LMP Response Timeout */
251         MGMT_STATUS_FAILED,             /* LMP Error Transaction Collision */
252         MGMT_STATUS_FAILED,             /* LMP PDU Not Allowed */
253         MGMT_STATUS_REJECTED,           /* Encryption Mode Not Accepted */
254         MGMT_STATUS_FAILED,             /* Unit Link Key Used */
255         MGMT_STATUS_NOT_SUPPORTED,      /* QoS Not Supported */
256         MGMT_STATUS_TIMEOUT,            /* Instant Passed */
257         MGMT_STATUS_NOT_SUPPORTED,      /* Pairing Not Supported */
258         MGMT_STATUS_FAILED,             /* Transaction Collision */
259         MGMT_STATUS_FAILED,             /* Reserved for future use */
260         MGMT_STATUS_INVALID_PARAMS,     /* Unacceptable Parameter */
261         MGMT_STATUS_REJECTED,           /* QoS Rejected */
262         MGMT_STATUS_NOT_SUPPORTED,      /* Classification Not Supported */
263         MGMT_STATUS_REJECTED,           /* Insufficient Security */
264         MGMT_STATUS_INVALID_PARAMS,     /* Parameter Out Of Range */
265         MGMT_STATUS_FAILED,             /* Reserved for future use */
266         MGMT_STATUS_BUSY,               /* Role Switch Pending */
267         MGMT_STATUS_FAILED,             /* Reserved for future use */
268         MGMT_STATUS_FAILED,             /* Slot Violation */
269         MGMT_STATUS_FAILED,             /* Role Switch Failed */
270         MGMT_STATUS_INVALID_PARAMS,     /* EIR Too Large */
271         MGMT_STATUS_NOT_SUPPORTED,      /* Simple Pairing Not Supported */
272         MGMT_STATUS_BUSY,               /* Host Busy Pairing */
273         MGMT_STATUS_REJECTED,           /* Rejected, No Suitable Channel */
274         MGMT_STATUS_BUSY,               /* Controller Busy */
275         MGMT_STATUS_INVALID_PARAMS,     /* Unsuitable Connection Interval */
276         MGMT_STATUS_TIMEOUT,            /* Directed Advertising Timeout */
277         MGMT_STATUS_AUTH_FAILED,        /* Terminated Due to MIC Failure */
278         MGMT_STATUS_CONNECT_FAILED,     /* Connection Establishment Failed */
279         MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
280 };
281
282 static u8 mgmt_errno_status(int err)
283 {
284         switch (err) {
285         case 0:
286                 return MGMT_STATUS_SUCCESS;
287         case -EPERM:
288                 return MGMT_STATUS_REJECTED;
289         case -EINVAL:
290                 return MGMT_STATUS_INVALID_PARAMS;
291         case -EOPNOTSUPP:
292                 return MGMT_STATUS_NOT_SUPPORTED;
293         case -EBUSY:
294                 return MGMT_STATUS_BUSY;
295         case -ETIMEDOUT:
296                 return MGMT_STATUS_AUTH_FAILED;
297         case -ENOMEM:
298                 return MGMT_STATUS_NO_RESOURCES;
299         case -EISCONN:
300                 return MGMT_STATUS_ALREADY_CONNECTED;
301         case -ENOTCONN:
302                 return MGMT_STATUS_DISCONNECTED;
303         }
304
305         return MGMT_STATUS_FAILED;
306 }
307
308 static u8 mgmt_status(int err)
309 {
310         if (err < 0)
311                 return mgmt_errno_status(err);
312
313         if (err < ARRAY_SIZE(mgmt_status_table))
314                 return mgmt_status_table[err];
315
316         return MGMT_STATUS_FAILED;
317 }
318
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320                             u16 len, int flag)
321 {
322         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323                                flag, NULL);
324 }
325
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327                               u16 len, int flag, struct sock *skip_sk)
328 {
329         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330                                flag, skip_sk);
331 }
332
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334                       struct sock *skip_sk)
335 {
336         return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337                                HCI_SOCK_TRUSTED, skip_sk);
338 }
339
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342         return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343                                    skip_sk);
344 }
345
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348         if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349                 return ADDR_LE_DEV_PUBLIC;
350         else
351                 return ADDR_LE_DEV_RANDOM;
352 }
353
354 void mgmt_fill_version_info(void *ver)
355 {
356         struct mgmt_rp_read_version *rp = ver;
357
358         rp->version = MGMT_VERSION;
359         rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363                         u16 data_len)
364 {
365         struct mgmt_rp_read_version rp;
366
367         bt_dev_dbg(hdev, "sock %p", sk);
368
369         mgmt_fill_version_info(&rp);
370
371         return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372                                  &rp, sizeof(rp));
373 }
374
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376                          u16 data_len)
377 {
378         struct mgmt_rp_read_commands *rp;
379         u16 num_commands, num_events;
380         size_t rp_size;
381         int i, err;
382
383         bt_dev_dbg(hdev, "sock %p", sk);
384
385         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386                 num_commands = ARRAY_SIZE(mgmt_commands);
387                 num_events = ARRAY_SIZE(mgmt_events);
388         } else {
389                 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390                 num_events = ARRAY_SIZE(mgmt_untrusted_events);
391         }
392
393         rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394
395         rp = kmalloc(rp_size, GFP_KERNEL);
396         if (!rp)
397                 return -ENOMEM;
398
399         rp->num_commands = cpu_to_le16(num_commands);
400         rp->num_events = cpu_to_le16(num_events);
401
402         if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403                 __le16 *opcode = rp->opcodes;
404
405                 for (i = 0; i < num_commands; i++, opcode++)
406                         put_unaligned_le16(mgmt_commands[i], opcode);
407
408                 for (i = 0; i < num_events; i++, opcode++)
409                         put_unaligned_le16(mgmt_events[i], opcode);
410         } else {
411                 __le16 *opcode = rp->opcodes;
412
413                 for (i = 0; i < num_commands; i++, opcode++)
414                         put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415
416                 for (i = 0; i < num_events; i++, opcode++)
417                         put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418         }
419
420         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421                                 rp, rp_size);
422         kfree(rp);
423
424         return err;
425 }
426
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428                            u16 data_len)
429 {
430         struct mgmt_rp_read_index_list *rp;
431         struct hci_dev *d;
432         size_t rp_len;
433         u16 count;
434         int err;
435
436         bt_dev_dbg(hdev, "sock %p", sk);
437
438         read_lock(&hci_dev_list_lock);
439
440         count = 0;
441         list_for_each_entry(d, &hci_dev_list, list) {
442                 if (d->dev_type == HCI_PRIMARY &&
443                     !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444                         count++;
445         }
446
447         rp_len = sizeof(*rp) + (2 * count);
448         rp = kmalloc(rp_len, GFP_ATOMIC);
449         if (!rp) {
450                 read_unlock(&hci_dev_list_lock);
451                 return -ENOMEM;
452         }
453
454         count = 0;
455         list_for_each_entry(d, &hci_dev_list, list) {
456                 if (hci_dev_test_flag(d, HCI_SETUP) ||
457                     hci_dev_test_flag(d, HCI_CONFIG) ||
458                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
459                         continue;
460
461                 /* Devices marked as raw-only are neither configured
462                  * nor unconfigured controllers.
463                  */
464                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465                         continue;
466
467                 if (d->dev_type == HCI_PRIMARY &&
468                     !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469                         rp->index[count++] = cpu_to_le16(d->id);
470                         bt_dev_dbg(hdev, "Added hci%u", d->id);
471                 }
472         }
473
474         rp->num_controllers = cpu_to_le16(count);
475         rp_len = sizeof(*rp) + (2 * count);
476
477         read_unlock(&hci_dev_list_lock);
478
479         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480                                 0, rp, rp_len);
481
482         kfree(rp);
483
484         return err;
485 }
486
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488                                   void *data, u16 data_len)
489 {
490         struct mgmt_rp_read_unconf_index_list *rp;
491         struct hci_dev *d;
492         size_t rp_len;
493         u16 count;
494         int err;
495
496         bt_dev_dbg(hdev, "sock %p", sk);
497
498         read_lock(&hci_dev_list_lock);
499
500         count = 0;
501         list_for_each_entry(d, &hci_dev_list, list) {
502                 if (d->dev_type == HCI_PRIMARY &&
503                     hci_dev_test_flag(d, HCI_UNCONFIGURED))
504                         count++;
505         }
506
507         rp_len = sizeof(*rp) + (2 * count);
508         rp = kmalloc(rp_len, GFP_ATOMIC);
509         if (!rp) {
510                 read_unlock(&hci_dev_list_lock);
511                 return -ENOMEM;
512         }
513
514         count = 0;
515         list_for_each_entry(d, &hci_dev_list, list) {
516                 if (hci_dev_test_flag(d, HCI_SETUP) ||
517                     hci_dev_test_flag(d, HCI_CONFIG) ||
518                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
519                         continue;
520
521                 /* Devices marked as raw-only are neither configured
522                  * nor unconfigured controllers.
523                  */
524                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525                         continue;
526
527                 if (d->dev_type == HCI_PRIMARY &&
528                     hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529                         rp->index[count++] = cpu_to_le16(d->id);
530                         bt_dev_dbg(hdev, "Added hci%u", d->id);
531                 }
532         }
533
534         rp->num_controllers = cpu_to_le16(count);
535         rp_len = sizeof(*rp) + (2 * count);
536
537         read_unlock(&hci_dev_list_lock);
538
539         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540                                 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541
542         kfree(rp);
543
544         return err;
545 }
546
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548                                void *data, u16 data_len)
549 {
550         struct mgmt_rp_read_ext_index_list *rp;
551         struct hci_dev *d;
552         u16 count;
553         int err;
554
555         bt_dev_dbg(hdev, "sock %p", sk);
556
557         read_lock(&hci_dev_list_lock);
558
559         count = 0;
560         list_for_each_entry(d, &hci_dev_list, list) {
561                 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562                         count++;
563         }
564
565         rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566         if (!rp) {
567                 read_unlock(&hci_dev_list_lock);
568                 return -ENOMEM;
569         }
570
571         count = 0;
572         list_for_each_entry(d, &hci_dev_list, list) {
573                 if (hci_dev_test_flag(d, HCI_SETUP) ||
574                     hci_dev_test_flag(d, HCI_CONFIG) ||
575                     hci_dev_test_flag(d, HCI_USER_CHANNEL))
576                         continue;
577
578                 /* Devices marked as raw-only are neither configured
579                  * nor unconfigured controllers.
580                  */
581                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582                         continue;
583
584                 if (d->dev_type == HCI_PRIMARY) {
585                         if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586                                 rp->entry[count].type = 0x01;
587                         else
588                                 rp->entry[count].type = 0x00;
589                 } else if (d->dev_type == HCI_AMP) {
590                         rp->entry[count].type = 0x02;
591                 } else {
592                         continue;
593                 }
594
595                 rp->entry[count].bus = d->bus;
596                 rp->entry[count++].index = cpu_to_le16(d->id);
597                 bt_dev_dbg(hdev, "Added hci%u", d->id);
598         }
599
600         rp->num_controllers = cpu_to_le16(count);
601
602         read_unlock(&hci_dev_list_lock);
603
604         /* If this command is called at least once, then all the
605          * default index and unconfigured index events are disabled
606          * and from now on only extended index events are used.
607          */
608         hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609         hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610         hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611
612         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613                                 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614                                 struct_size(rp, entry, count));
615
616         kfree(rp);
617
618         return err;
619 }
620
621 static bool is_configured(struct hci_dev *hdev)
622 {
623         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625                 return false;
626
627         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629             !bacmp(&hdev->public_addr, BDADDR_ANY))
630                 return false;
631
632         return true;
633 }
634
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637         u32 options = 0;
638
639         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640             !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
642
643         if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644              test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645             !bacmp(&hdev->public_addr, BDADDR_ANY))
646                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
647
648         return cpu_to_le32(options);
649 }
650
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653         __le32 options = get_missing_options(hdev);
654
655         return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656                                   sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661         __le32 options = get_missing_options(hdev);
662
663         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664                                  sizeof(options));
665 }
666
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668                             void *data, u16 data_len)
669 {
670         struct mgmt_rp_read_config_info rp;
671         u32 options = 0;
672
673         bt_dev_dbg(hdev, "sock %p", sk);
674
675         hci_dev_lock(hdev);
676
677         memset(&rp, 0, sizeof(rp));
678         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679
680         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681                 options |= MGMT_OPTION_EXTERNAL_CONFIG;
682
683         if (hdev->set_bdaddr)
684                 options |= MGMT_OPTION_PUBLIC_ADDRESS;
685
686         rp.supported_options = cpu_to_le32(options);
687         rp.missing_options = get_missing_options(hdev);
688
689         hci_dev_unlock(hdev);
690
691         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692                                  &rp, sizeof(rp));
693 }
694
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697         u32 supported_phys = 0;
698
699         if (lmp_bredr_capable(hdev)) {
700                 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701
702                 if (hdev->features[0][0] & LMP_3SLOT)
703                         supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704
705                 if (hdev->features[0][0] & LMP_5SLOT)
706                         supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707
708                 if (lmp_edr_2m_capable(hdev)) {
709                         supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710
711                         if (lmp_edr_3slot_capable(hdev))
712                                 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713
714                         if (lmp_edr_5slot_capable(hdev))
715                                 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716
717                         if (lmp_edr_3m_capable(hdev)) {
718                                 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719
720                                 if (lmp_edr_3slot_capable(hdev))
721                                         supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722
723                                 if (lmp_edr_5slot_capable(hdev))
724                                         supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725                         }
726                 }
727         }
728
729         if (lmp_le_capable(hdev)) {
730                 supported_phys |= MGMT_PHY_LE_1M_TX;
731                 supported_phys |= MGMT_PHY_LE_1M_RX;
732
733                 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734                         supported_phys |= MGMT_PHY_LE_2M_TX;
735                         supported_phys |= MGMT_PHY_LE_2M_RX;
736                 }
737
738                 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739                         supported_phys |= MGMT_PHY_LE_CODED_TX;
740                         supported_phys |= MGMT_PHY_LE_CODED_RX;
741                 }
742         }
743
744         return supported_phys;
745 }
746
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749         u32 selected_phys = 0;
750
751         if (lmp_bredr_capable(hdev)) {
752                 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753
754                 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755                         selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756
757                 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758                         selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759
760                 if (lmp_edr_2m_capable(hdev)) {
761                         if (!(hdev->pkt_type & HCI_2DH1))
762                                 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763
764                         if (lmp_edr_3slot_capable(hdev) &&
765                             !(hdev->pkt_type & HCI_2DH3))
766                                 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767
768                         if (lmp_edr_5slot_capable(hdev) &&
769                             !(hdev->pkt_type & HCI_2DH5))
770                                 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771
772                         if (lmp_edr_3m_capable(hdev)) {
773                                 if (!(hdev->pkt_type & HCI_3DH1))
774                                         selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775
776                                 if (lmp_edr_3slot_capable(hdev) &&
777                                     !(hdev->pkt_type & HCI_3DH3))
778                                         selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779
780                                 if (lmp_edr_5slot_capable(hdev) &&
781                                     !(hdev->pkt_type & HCI_3DH5))
782                                         selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783                         }
784                 }
785         }
786
787         if (lmp_le_capable(hdev)) {
788                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789                         selected_phys |= MGMT_PHY_LE_1M_TX;
790
791                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792                         selected_phys |= MGMT_PHY_LE_1M_RX;
793
794                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795                         selected_phys |= MGMT_PHY_LE_2M_TX;
796
797                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798                         selected_phys |= MGMT_PHY_LE_2M_RX;
799
800                 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801                         selected_phys |= MGMT_PHY_LE_CODED_TX;
802
803                 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804                         selected_phys |= MGMT_PHY_LE_CODED_RX;
805         }
806
807         return selected_phys;
808 }
809
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812         return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813                 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818         u32 settings = 0;
819
820         settings |= MGMT_SETTING_POWERED;
821         settings |= MGMT_SETTING_BONDABLE;
822         settings |= MGMT_SETTING_DEBUG_KEYS;
823         settings |= MGMT_SETTING_CONNECTABLE;
824         settings |= MGMT_SETTING_DISCOVERABLE;
825
826         if (lmp_bredr_capable(hdev)) {
827                 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828                         settings |= MGMT_SETTING_FAST_CONNECTABLE;
829                 settings |= MGMT_SETTING_BREDR;
830                 settings |= MGMT_SETTING_LINK_SECURITY;
831
832                 if (lmp_ssp_capable(hdev)) {
833                         settings |= MGMT_SETTING_SSP;
834                         if (IS_ENABLED(CONFIG_BT_HS))
835                                 settings |= MGMT_SETTING_HS;
836                 }
837
838                 if (lmp_sc_capable(hdev))
839                         settings |= MGMT_SETTING_SECURE_CONN;
840
841                 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842                              &hdev->quirks))
843                         settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844         }
845
846         if (lmp_le_capable(hdev)) {
847                 settings |= MGMT_SETTING_LE;
848                 settings |= MGMT_SETTING_SECURE_CONN;
849                 settings |= MGMT_SETTING_PRIVACY;
850                 settings |= MGMT_SETTING_STATIC_ADDRESS;
851                 settings |= MGMT_SETTING_ADVERTISING;
852         }
853
854         if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855             hdev->set_bdaddr)
856                 settings |= MGMT_SETTING_CONFIGURATION;
857
858         settings |= MGMT_SETTING_PHY_CONFIGURATION;
859
860         return settings;
861 }
862
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865         u32 settings = 0;
866
867         if (hdev_is_powered(hdev))
868                 settings |= MGMT_SETTING_POWERED;
869
870         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871                 settings |= MGMT_SETTING_CONNECTABLE;
872
873         if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874                 settings |= MGMT_SETTING_FAST_CONNECTABLE;
875
876         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877                 settings |= MGMT_SETTING_DISCOVERABLE;
878
879         if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880                 settings |= MGMT_SETTING_BONDABLE;
881
882         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883                 settings |= MGMT_SETTING_BREDR;
884
885         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886                 settings |= MGMT_SETTING_LE;
887
888         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889                 settings |= MGMT_SETTING_LINK_SECURITY;
890
891         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892                 settings |= MGMT_SETTING_SSP;
893
894         if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895                 settings |= MGMT_SETTING_HS;
896
897         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898                 settings |= MGMT_SETTING_ADVERTISING;
899
900         if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901                 settings |= MGMT_SETTING_SECURE_CONN;
902
903         if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904                 settings |= MGMT_SETTING_DEBUG_KEYS;
905
906         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907                 settings |= MGMT_SETTING_PRIVACY;
908
909         /* The current setting for static address has two purposes. The
910          * first is to indicate if the static address will be used and
911          * the second is to indicate if it is actually set.
912          *
913          * This means if the static address is not configured, this flag
914          * will never be set. If the address is configured, then if the
915          * address is actually used decides if the flag is set or not.
916          *
917          * For single mode LE only controllers and dual-mode controllers
918          * with BR/EDR disabled, the existence of the static address will
919          * be evaluated.
920          */
921         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924                 if (bacmp(&hdev->static_addr, BDADDR_ANY))
925                         settings |= MGMT_SETTING_STATIC_ADDRESS;
926         }
927
928         if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929                 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930
931         return settings;
932 }
933
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936         return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941         struct mgmt_pending_cmd *cmd;
942
943         /* If there's a pending mgmt command the flags will not yet have
944          * their final values, so check for this first.
945          */
946         cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947         if (cmd) {
948                 struct mgmt_mode *cp = cmd->param;
949                 if (cp->val == 0x01)
950                         return LE_AD_GENERAL;
951                 else if (cp->val == 0x02)
952                         return LE_AD_LIMITED;
953         } else {
954                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955                         return LE_AD_LIMITED;
956                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957                         return LE_AD_GENERAL;
958         }
959
960         return 0;
961 }
962
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965         struct mgmt_pending_cmd *cmd;
966
967         /* If there's a pending mgmt command the flag will not yet have
968          * it's final value, so check for this first.
969          */
970         cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971         if (cmd) {
972                 struct mgmt_mode *cp = cmd->param;
973
974                 return cp->val;
975         }
976
977         return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982         hci_update_eir_sync(hdev);
983         hci_update_class_sync(hdev);
984
985         return 0;
986 }
987
988 static void service_cache_off(struct work_struct *work)
989 {
990         struct hci_dev *hdev = container_of(work, struct hci_dev,
991                                             service_cache.work);
992
993         if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994                 return;
995
996         hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001         /* The generation of a new RPA and programming it into the
1002          * controller happens in the hci_req_enable_advertising()
1003          * function.
1004          */
1005         if (ext_adv_capable(hdev))
1006                 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007         else
1008                 return hci_enable_advertising_sync(hdev);
1009 }
1010
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013         struct hci_dev *hdev = container_of(work, struct hci_dev,
1014                                             rpa_expired.work);
1015
1016         bt_dev_dbg(hdev, "");
1017
1018         hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019
1020         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021                 return;
1022
1023         hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025
1026 static void discov_off(struct work_struct *work)
1027 {
1028         struct hci_dev *hdev = container_of(work, struct hci_dev,
1029                                             discov_off.work);
1030
1031         bt_dev_dbg(hdev, "");
1032
1033         hci_dev_lock(hdev);
1034
1035         /* When discoverable timeout triggers, then just make sure
1036          * the limited discoverable flag is cleared. Even in the case
1037          * of a timeout triggered from general discoverable, it is
1038          * safe to unconditionally clear the flag.
1039          */
1040         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1041         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1042         hdev->discov_timeout = 0;
1043
1044         hci_update_discoverable(hdev);
1045
1046         mgmt_new_settings(hdev);
1047
1048         hci_dev_unlock(hdev);
1049 }
1050
1051 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1052 {
1053         if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1054                 return;
1055
1056         BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1057
1058         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1059         INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1060         INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1061
1062         /* Non-mgmt controlled devices get this bit set
1063          * implicitly so that pairing works for them, however
1064          * for mgmt we require user-space to explicitly enable
1065          * it
1066          */
1067         hci_dev_clear_flag(hdev, HCI_BONDABLE);
1068 }
1069
1070 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1071                                 void *data, u16 data_len)
1072 {
1073         struct mgmt_rp_read_info rp;
1074
1075         bt_dev_dbg(hdev, "sock %p", sk);
1076
1077         hci_dev_lock(hdev);
1078
1079         memset(&rp, 0, sizeof(rp));
1080
1081         bacpy(&rp.bdaddr, &hdev->bdaddr);
1082
1083         rp.version = hdev->hci_ver;
1084         rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1085
1086         rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1087         rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1088
1089         memcpy(rp.dev_class, hdev->dev_class, 3);
1090
1091         memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1092         memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1093
1094         hci_dev_unlock(hdev);
1095
1096         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1097                                  sizeof(rp));
1098 }
1099
1100 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1101 {
1102         u16 eir_len = 0;
1103         size_t name_len;
1104
1105         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1106                 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1107                                           hdev->dev_class, 3);
1108
1109         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1110                 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1111                                           hdev->appearance);
1112
1113         name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1114         eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1115                                   hdev->dev_name, name_len);
1116
1117         name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1118         eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1119                                   hdev->short_name, name_len);
1120
1121         return eir_len;
1122 }
1123
1124 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1125                                     void *data, u16 data_len)
1126 {
1127         char buf[512];
1128         struct mgmt_rp_read_ext_info *rp = (void *)buf;
1129         u16 eir_len;
1130
1131         bt_dev_dbg(hdev, "sock %p", sk);
1132
1133         memset(&buf, 0, sizeof(buf));
1134
1135         hci_dev_lock(hdev);
1136
1137         bacpy(&rp->bdaddr, &hdev->bdaddr);
1138
1139         rp->version = hdev->hci_ver;
1140         rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1141
1142         rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1143         rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1144
1145
1146         eir_len = append_eir_data_to_buf(hdev, rp->eir);
1147         rp->eir_len = cpu_to_le16(eir_len);
1148
1149         hci_dev_unlock(hdev);
1150
1151         /* If this command is called at least once, then the events
1152          * for class of device and local name changes are disabled
1153          * and only the new extended controller information event
1154          * is used.
1155          */
1156         hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1157         hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1158         hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1159
1160         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1161                                  sizeof(*rp) + eir_len);
1162 }
1163
1164 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1165 {
1166         char buf[512];
1167         struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1168         u16 eir_len;
1169
1170         memset(buf, 0, sizeof(buf));
1171
1172         eir_len = append_eir_data_to_buf(hdev, ev->eir);
1173         ev->eir_len = cpu_to_le16(eir_len);
1174
1175         return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1176                                   sizeof(*ev) + eir_len,
1177                                   HCI_MGMT_EXT_INFO_EVENTS, skip);
1178 }
1179
1180 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1181 {
1182         __le32 settings = cpu_to_le32(get_current_settings(hdev));
1183
1184         return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1185                                  sizeof(settings));
1186 }
1187
1188 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1189 {
1190         struct mgmt_ev_advertising_added ev;
1191
1192         ev.instance = instance;
1193
1194         mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1195 }
1196
1197 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1198                               u8 instance)
1199 {
1200         struct mgmt_ev_advertising_removed ev;
1201
1202         ev.instance = instance;
1203
1204         mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1205 }
1206
1207 static void cancel_adv_timeout(struct hci_dev *hdev)
1208 {
1209         if (hdev->adv_instance_timeout) {
1210                 hdev->adv_instance_timeout = 0;
1211                 cancel_delayed_work(&hdev->adv_instance_expire);
1212         }
1213 }
1214
1215 /* This function requires the caller holds hdev->lock */
1216 static void restart_le_actions(struct hci_dev *hdev)
1217 {
1218         struct hci_conn_params *p;
1219
1220         list_for_each_entry(p, &hdev->le_conn_params, list) {
1221                 /* Needed for AUTO_OFF case where might not "really"
1222                  * have been powered off.
1223                  */
1224                 list_del_init(&p->action);
1225
1226                 switch (p->auto_connect) {
1227                 case HCI_AUTO_CONN_DIRECT:
1228                 case HCI_AUTO_CONN_ALWAYS:
1229                         list_add(&p->action, &hdev->pend_le_conns);
1230                         break;
1231                 case HCI_AUTO_CONN_REPORT:
1232                         list_add(&p->action, &hdev->pend_le_reports);
1233                         break;
1234                 default:
1235                         break;
1236                 }
1237         }
1238 }
1239
1240 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1241 {
1242         __le32 ev = cpu_to_le32(get_current_settings(hdev));
1243
1244         return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1245                                   sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1246 }
1247
1248 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1249 {
1250         struct mgmt_pending_cmd *cmd = data;
1251         struct mgmt_mode *cp;
1252
1253         /* Make sure cmd still outstanding. */
1254         if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1255                 return;
1256
1257         cp = cmd->param;
1258
1259         bt_dev_dbg(hdev, "err %d", err);
1260
1261         if (!err) {
1262                 if (cp->val) {
1263                         hci_dev_lock(hdev);
1264                         restart_le_actions(hdev);
1265                         hci_update_passive_scan(hdev);
1266                         hci_dev_unlock(hdev);
1267                 }
1268
1269                 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1270
1271                 /* Only call new_setting for power on as power off is deferred
1272                  * to hdev->power_off work which does call hci_dev_do_close.
1273                  */
1274                 if (cp->val)
1275                         new_settings(hdev, cmd->sk);
1276         } else {
1277                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1278                                 mgmt_status(err));
1279         }
1280
1281         mgmt_pending_remove(cmd);
1282 }
1283
1284 static int set_powered_sync(struct hci_dev *hdev, void *data)
1285 {
1286         struct mgmt_pending_cmd *cmd = data;
1287         struct mgmt_mode *cp = cmd->param;
1288
1289         BT_DBG("%s", hdev->name);
1290
1291         return hci_set_powered_sync(hdev, cp->val);
1292 }
1293
1294 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1295                        u16 len)
1296 {
1297         struct mgmt_mode *cp = data;
1298         struct mgmt_pending_cmd *cmd;
1299         int err;
1300
1301         bt_dev_dbg(hdev, "sock %p", sk);
1302
1303         if (cp->val != 0x00 && cp->val != 0x01)
1304                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1305                                        MGMT_STATUS_INVALID_PARAMS);
1306
1307         hci_dev_lock(hdev);
1308
1309         if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1310                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1311                                       MGMT_STATUS_BUSY);
1312                 goto failed;
1313         }
1314
1315         if (!!cp->val == hdev_is_powered(hdev)) {
1316                 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1317                 goto failed;
1318         }
1319
1320         cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1321         if (!cmd) {
1322                 err = -ENOMEM;
1323                 goto failed;
1324         }
1325
1326         err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1327                                  mgmt_set_powered_complete);
1328
1329         if (err < 0)
1330                 mgmt_pending_remove(cmd);
1331
1332 failed:
1333         hci_dev_unlock(hdev);
1334         return err;
1335 }
1336
1337 int mgmt_new_settings(struct hci_dev *hdev)
1338 {
1339         return new_settings(hdev, NULL);
1340 }
1341
1342 struct cmd_lookup {
1343         struct sock *sk;
1344         struct hci_dev *hdev;
1345         u8 mgmt_status;
1346 };
1347
1348 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1349 {
1350         struct cmd_lookup *match = data;
1351
1352         send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1353
1354         list_del(&cmd->list);
1355
1356         if (match->sk == NULL) {
1357                 match->sk = cmd->sk;
1358                 sock_hold(match->sk);
1359         }
1360
1361         mgmt_pending_free(cmd);
1362 }
1363
1364 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1365 {
1366         u8 *status = data;
1367
1368         mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1369         mgmt_pending_remove(cmd);
1370 }
1371
1372 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1373 {
1374         if (cmd->cmd_complete) {
1375                 u8 *status = data;
1376
1377                 cmd->cmd_complete(cmd, *status);
1378                 mgmt_pending_remove(cmd);
1379
1380                 return;
1381         }
1382
1383         cmd_status_rsp(cmd, data);
1384 }
1385
1386 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1387 {
1388         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1389                                  cmd->param, cmd->param_len);
1390 }
1391
1392 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1393 {
1394         return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1395                                  cmd->param, sizeof(struct mgmt_addr_info));
1396 }
1397
1398 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1399 {
1400         if (!lmp_bredr_capable(hdev))
1401                 return MGMT_STATUS_NOT_SUPPORTED;
1402         else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1403                 return MGMT_STATUS_REJECTED;
1404         else
1405                 return MGMT_STATUS_SUCCESS;
1406 }
1407
1408 static u8 mgmt_le_support(struct hci_dev *hdev)
1409 {
1410         if (!lmp_le_capable(hdev))
1411                 return MGMT_STATUS_NOT_SUPPORTED;
1412         else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1413                 return MGMT_STATUS_REJECTED;
1414         else
1415                 return MGMT_STATUS_SUCCESS;
1416 }
1417
1418 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1419                                            int err)
1420 {
1421         struct mgmt_pending_cmd *cmd = data;
1422
1423         bt_dev_dbg(hdev, "err %d", err);
1424
1425         /* Make sure cmd still outstanding. */
1426         if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1427                 return;
1428
1429         hci_dev_lock(hdev);
1430
1431         if (err) {
1432                 u8 mgmt_err = mgmt_status(err);
1433                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1434                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1435                 goto done;
1436         }
1437
1438         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1439             hdev->discov_timeout > 0) {
1440                 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1441                 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1442         }
1443
1444         send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1445         new_settings(hdev, cmd->sk);
1446
1447 done:
1448         mgmt_pending_remove(cmd);
1449         hci_dev_unlock(hdev);
1450 }
1451
1452 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1453 {
1454         BT_DBG("%s", hdev->name);
1455
1456         return hci_update_discoverable_sync(hdev);
1457 }
1458
1459 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1460                             u16 len)
1461 {
1462         struct mgmt_cp_set_discoverable *cp = data;
1463         struct mgmt_pending_cmd *cmd;
1464         u16 timeout;
1465         int err;
1466
1467         bt_dev_dbg(hdev, "sock %p", sk);
1468
1469         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1470             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1471                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1472                                        MGMT_STATUS_REJECTED);
1473
1474         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1475                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1476                                        MGMT_STATUS_INVALID_PARAMS);
1477
1478         timeout = __le16_to_cpu(cp->timeout);
1479
1480         /* Disabling discoverable requires that no timeout is set,
1481          * and enabling limited discoverable requires a timeout.
1482          */
1483         if ((cp->val == 0x00 && timeout > 0) ||
1484             (cp->val == 0x02 && timeout == 0))
1485                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1486                                        MGMT_STATUS_INVALID_PARAMS);
1487
1488         hci_dev_lock(hdev);
1489
1490         if (!hdev_is_powered(hdev) && timeout > 0) {
1491                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1492                                       MGMT_STATUS_NOT_POWERED);
1493                 goto failed;
1494         }
1495
1496         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1497             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1498                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1499                                       MGMT_STATUS_BUSY);
1500                 goto failed;
1501         }
1502
1503         if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1504                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1505                                       MGMT_STATUS_REJECTED);
1506                 goto failed;
1507         }
1508
1509         if (hdev->advertising_paused) {
1510                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1511                                       MGMT_STATUS_BUSY);
1512                 goto failed;
1513         }
1514
1515         if (!hdev_is_powered(hdev)) {
1516                 bool changed = false;
1517
1518                 /* Setting limited discoverable when powered off is
1519                  * not a valid operation since it requires a timeout
1520                  * and so no need to check HCI_LIMITED_DISCOVERABLE.
1521                  */
1522                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1523                         hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1524                         changed = true;
1525                 }
1526
1527                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1528                 if (err < 0)
1529                         goto failed;
1530
1531                 if (changed)
1532                         err = new_settings(hdev, sk);
1533
1534                 goto failed;
1535         }
1536
1537         /* If the current mode is the same, then just update the timeout
1538          * value with the new value. And if only the timeout gets updated,
1539          * then no need for any HCI transactions.
1540          */
1541         if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1542             (cp->val == 0x02) == hci_dev_test_flag(hdev,
1543                                                    HCI_LIMITED_DISCOVERABLE)) {
1544                 cancel_delayed_work(&hdev->discov_off);
1545                 hdev->discov_timeout = timeout;
1546
1547                 if (cp->val && hdev->discov_timeout > 0) {
1548                         int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1549                         queue_delayed_work(hdev->req_workqueue,
1550                                            &hdev->discov_off, to);
1551                 }
1552
1553                 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1554                 goto failed;
1555         }
1556
1557         cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1558         if (!cmd) {
1559                 err = -ENOMEM;
1560                 goto failed;
1561         }
1562
1563         /* Cancel any potential discoverable timeout that might be
1564          * still active and store new timeout value. The arming of
1565          * the timeout happens in the complete handler.
1566          */
1567         cancel_delayed_work(&hdev->discov_off);
1568         hdev->discov_timeout = timeout;
1569
1570         if (cp->val)
1571                 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1572         else
1573                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1574
1575         /* Limited discoverable mode */
1576         if (cp->val == 0x02)
1577                 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1578         else
1579                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1580
1581         err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1582                                  mgmt_set_discoverable_complete);
1583
1584         if (err < 0)
1585                 mgmt_pending_remove(cmd);
1586
1587 failed:
1588         hci_dev_unlock(hdev);
1589         return err;
1590 }
1591
1592 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1593                                           int err)
1594 {
1595         struct mgmt_pending_cmd *cmd = data;
1596
1597         bt_dev_dbg(hdev, "err %d", err);
1598
1599         /* Make sure cmd still outstanding. */
1600         if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1601                 return;
1602
1603         hci_dev_lock(hdev);
1604
1605         if (err) {
1606                 u8 mgmt_err = mgmt_status(err);
1607                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1608                 goto done;
1609         }
1610
1611         send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1612         new_settings(hdev, cmd->sk);
1613
1614 done:
1615         if (cmd)
1616                 mgmt_pending_remove(cmd);
1617
1618         hci_dev_unlock(hdev);
1619 }
1620
1621 static int set_connectable_update_settings(struct hci_dev *hdev,
1622                                            struct sock *sk, u8 val)
1623 {
1624         bool changed = false;
1625         int err;
1626
1627         if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1628                 changed = true;
1629
1630         if (val) {
1631                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1632         } else {
1633                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1634                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1635         }
1636
1637         err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1638         if (err < 0)
1639                 return err;
1640
1641         if (changed) {
1642                 hci_update_scan(hdev);
1643                 hci_update_passive_scan(hdev);
1644                 return new_settings(hdev, sk);
1645         }
1646
1647         return 0;
1648 }
1649
1650 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1651 {
1652         BT_DBG("%s", hdev->name);
1653
1654         return hci_update_connectable_sync(hdev);
1655 }
1656
1657 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1658                            u16 len)
1659 {
1660         struct mgmt_mode *cp = data;
1661         struct mgmt_pending_cmd *cmd;
1662         int err;
1663
1664         bt_dev_dbg(hdev, "sock %p", sk);
1665
1666         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1667             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1668                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1669                                        MGMT_STATUS_REJECTED);
1670
1671         if (cp->val != 0x00 && cp->val != 0x01)
1672                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1673                                        MGMT_STATUS_INVALID_PARAMS);
1674
1675         hci_dev_lock(hdev);
1676
1677         if (!hdev_is_powered(hdev)) {
1678                 err = set_connectable_update_settings(hdev, sk, cp->val);
1679                 goto failed;
1680         }
1681
1682         if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1683             pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1684                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1685                                       MGMT_STATUS_BUSY);
1686                 goto failed;
1687         }
1688
1689         cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1690         if (!cmd) {
1691                 err = -ENOMEM;
1692                 goto failed;
1693         }
1694
1695         if (cp->val) {
1696                 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1697         } else {
1698                 if (hdev->discov_timeout > 0)
1699                         cancel_delayed_work(&hdev->discov_off);
1700
1701                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1702                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1703                 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1704         }
1705
1706         err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1707                                  mgmt_set_connectable_complete);
1708
1709         if (err < 0)
1710                 mgmt_pending_remove(cmd);
1711
1712 failed:
1713         hci_dev_unlock(hdev);
1714         return err;
1715 }
1716
1717 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1718                         u16 len)
1719 {
1720         struct mgmt_mode *cp = data;
1721         bool changed;
1722         int err;
1723
1724         bt_dev_dbg(hdev, "sock %p", sk);
1725
1726         if (cp->val != 0x00 && cp->val != 0x01)
1727                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1728                                        MGMT_STATUS_INVALID_PARAMS);
1729
1730         hci_dev_lock(hdev);
1731
1732         if (cp->val)
1733                 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1734         else
1735                 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1736
1737         err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1738         if (err < 0)
1739                 goto unlock;
1740
1741         if (changed) {
1742                 /* In limited privacy mode the change of bondable mode
1743                  * may affect the local advertising address.
1744                  */
1745                 hci_update_discoverable(hdev);
1746
1747                 err = new_settings(hdev, sk);
1748         }
1749
1750 unlock:
1751         hci_dev_unlock(hdev);
1752         return err;
1753 }
1754
1755 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1756                              u16 len)
1757 {
1758         struct mgmt_mode *cp = data;
1759         struct mgmt_pending_cmd *cmd;
1760         u8 val, status;
1761         int err;
1762
1763         bt_dev_dbg(hdev, "sock %p", sk);
1764
1765         status = mgmt_bredr_support(hdev);
1766         if (status)
1767                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768                                        status);
1769
1770         if (cp->val != 0x00 && cp->val != 0x01)
1771                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1772                                        MGMT_STATUS_INVALID_PARAMS);
1773
1774         hci_dev_lock(hdev);
1775
1776         if (!hdev_is_powered(hdev)) {
1777                 bool changed = false;
1778
1779                 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1780                         hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1781                         changed = true;
1782                 }
1783
1784                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1785                 if (err < 0)
1786                         goto failed;
1787
1788                 if (changed)
1789                         err = new_settings(hdev, sk);
1790
1791                 goto failed;
1792         }
1793
1794         if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1795                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1796                                       MGMT_STATUS_BUSY);
1797                 goto failed;
1798         }
1799
1800         val = !!cp->val;
1801
1802         if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1803                 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1804                 goto failed;
1805         }
1806
1807         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1808         if (!cmd) {
1809                 err = -ENOMEM;
1810                 goto failed;
1811         }
1812
1813         err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1814         if (err < 0) {
1815                 mgmt_pending_remove(cmd);
1816                 goto failed;
1817         }
1818
1819 failed:
1820         hci_dev_unlock(hdev);
1821         return err;
1822 }
1823
1824 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1825 {
1826         struct cmd_lookup match = { NULL, hdev };
1827         struct mgmt_pending_cmd *cmd = data;
1828         struct mgmt_mode *cp = cmd->param;
1829         u8 enable = cp->val;
1830         bool changed;
1831
1832         /* Make sure cmd still outstanding. */
1833         if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1834                 return;
1835
1836         if (err) {
1837                 u8 mgmt_err = mgmt_status(err);
1838
1839                 if (enable && hci_dev_test_and_clear_flag(hdev,
1840                                                           HCI_SSP_ENABLED)) {
1841                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1842                         new_settings(hdev, NULL);
1843                 }
1844
1845                 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1846                                      &mgmt_err);
1847                 return;
1848         }
1849
1850         if (enable) {
1851                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1852         } else {
1853                 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1854
1855                 if (!changed)
1856                         changed = hci_dev_test_and_clear_flag(hdev,
1857                                                               HCI_HS_ENABLED);
1858                 else
1859                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1860         }
1861
1862         mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1863
1864         if (changed)
1865                 new_settings(hdev, match.sk);
1866
1867         if (match.sk)
1868                 sock_put(match.sk);
1869
1870         hci_update_eir_sync(hdev);
1871 }
1872
1873 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1874 {
1875         struct mgmt_pending_cmd *cmd = data;
1876         struct mgmt_mode *cp = cmd->param;
1877         bool changed = false;
1878         int err;
1879
1880         if (cp->val)
1881                 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1882
1883         err = hci_write_ssp_mode_sync(hdev, cp->val);
1884
1885         if (!err && changed)
1886                 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1887
1888         return err;
1889 }
1890
1891 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1892 {
1893         struct mgmt_mode *cp = data;
1894         struct mgmt_pending_cmd *cmd;
1895         u8 status;
1896         int err;
1897
1898         bt_dev_dbg(hdev, "sock %p", sk);
1899
1900         status = mgmt_bredr_support(hdev);
1901         if (status)
1902                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1903
1904         if (!lmp_ssp_capable(hdev))
1905                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1906                                        MGMT_STATUS_NOT_SUPPORTED);
1907
1908         if (cp->val != 0x00 && cp->val != 0x01)
1909                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1910                                        MGMT_STATUS_INVALID_PARAMS);
1911
1912         hci_dev_lock(hdev);
1913
1914         if (!hdev_is_powered(hdev)) {
1915                 bool changed;
1916
1917                 if (cp->val) {
1918                         changed = !hci_dev_test_and_set_flag(hdev,
1919                                                              HCI_SSP_ENABLED);
1920                 } else {
1921                         changed = hci_dev_test_and_clear_flag(hdev,
1922                                                               HCI_SSP_ENABLED);
1923                         if (!changed)
1924                                 changed = hci_dev_test_and_clear_flag(hdev,
1925                                                                       HCI_HS_ENABLED);
1926                         else
1927                                 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1928                 }
1929
1930                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1931                 if (err < 0)
1932                         goto failed;
1933
1934                 if (changed)
1935                         err = new_settings(hdev, sk);
1936
1937                 goto failed;
1938         }
1939
1940         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1941                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1942                                       MGMT_STATUS_BUSY);
1943                 goto failed;
1944         }
1945
1946         if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1947                 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1948                 goto failed;
1949         }
1950
1951         cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1952         if (!cmd)
1953                 err = -ENOMEM;
1954         else
1955                 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1956                                          set_ssp_complete);
1957
1958         if (err < 0) {
1959                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1960                                       MGMT_STATUS_FAILED);
1961
1962                 if (cmd)
1963                         mgmt_pending_remove(cmd);
1964         }
1965
1966 failed:
1967         hci_dev_unlock(hdev);
1968         return err;
1969 }
1970
1971 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973         struct mgmt_mode *cp = data;
1974         bool changed;
1975         u8 status;
1976         int err;
1977
1978         bt_dev_dbg(hdev, "sock %p", sk);
1979
1980         if (!IS_ENABLED(CONFIG_BT_HS))
1981                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1982                                        MGMT_STATUS_NOT_SUPPORTED);
1983
1984         status = mgmt_bredr_support(hdev);
1985         if (status)
1986                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1987
1988         if (!lmp_ssp_capable(hdev))
1989                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1990                                        MGMT_STATUS_NOT_SUPPORTED);
1991
1992         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1993                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1994                                        MGMT_STATUS_REJECTED);
1995
1996         if (cp->val != 0x00 && cp->val != 0x01)
1997                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1998                                        MGMT_STATUS_INVALID_PARAMS);
1999
2000         hci_dev_lock(hdev);
2001
2002         if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2003                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2004                                       MGMT_STATUS_BUSY);
2005                 goto unlock;
2006         }
2007
2008         if (cp->val) {
2009                 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2010         } else {
2011                 if (hdev_is_powered(hdev)) {
2012                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2013                                               MGMT_STATUS_REJECTED);
2014                         goto unlock;
2015                 }
2016
2017                 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2018         }
2019
2020         err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2021         if (err < 0)
2022                 goto unlock;
2023
2024         if (changed)
2025                 err = new_settings(hdev, sk);
2026
2027 unlock:
2028         hci_dev_unlock(hdev);
2029         return err;
2030 }
2031
2032 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2033 {
2034         struct cmd_lookup match = { NULL, hdev };
2035         u8 status = mgmt_status(err);
2036
2037         bt_dev_dbg(hdev, "err %d", err);
2038
2039         if (status) {
2040                 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2041                                                         &status);
2042                 return;
2043         }
2044
2045         mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2046
2047         new_settings(hdev, match.sk);
2048
2049         if (match.sk)
2050                 sock_put(match.sk);
2051 }
2052
2053 static int set_le_sync(struct hci_dev *hdev, void *data)
2054 {
2055         struct mgmt_pending_cmd *cmd = data;
2056         struct mgmt_mode *cp = cmd->param;
2057         u8 val = !!cp->val;
2058         int err;
2059
2060         if (!val) {
2061                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2062                         hci_disable_advertising_sync(hdev);
2063
2064                 if (ext_adv_capable(hdev))
2065                         hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2066         } else {
2067                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2068         }
2069
2070         err = hci_write_le_host_supported_sync(hdev, val, 0);
2071
2072         /* Make sure the controller has a good default for
2073          * advertising data. Restrict the update to when LE
2074          * has actually been enabled. During power on, the
2075          * update in powered_update_hci will take care of it.
2076          */
2077         if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2078                 if (ext_adv_capable(hdev)) {
2079                         int status;
2080
2081                         status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2082                         if (!status)
2083                                 hci_update_scan_rsp_data_sync(hdev, 0x00);
2084                 } else {
2085                         hci_update_adv_data_sync(hdev, 0x00);
2086                         hci_update_scan_rsp_data_sync(hdev, 0x00);
2087                 }
2088
2089                 hci_update_passive_scan(hdev);
2090         }
2091
2092         return err;
2093 }
2094
2095 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097         struct mgmt_mode *cp = data;
2098         struct mgmt_pending_cmd *cmd;
2099         int err;
2100         u8 val, enabled;
2101
2102         bt_dev_dbg(hdev, "sock %p", sk);
2103
2104         if (!lmp_le_capable(hdev))
2105                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2106                                        MGMT_STATUS_NOT_SUPPORTED);
2107
2108         if (cp->val != 0x00 && cp->val != 0x01)
2109                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2110                                        MGMT_STATUS_INVALID_PARAMS);
2111
2112         /* Bluetooth single mode LE only controllers or dual-mode
2113          * controllers configured as LE only devices, do not allow
2114          * switching LE off. These have either LE enabled explicitly
2115          * or BR/EDR has been previously switched off.
2116          *
2117          * When trying to enable an already enabled LE, then gracefully
2118          * send a positive response. Trying to disable it however will
2119          * result into rejection.
2120          */
2121         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2122                 if (cp->val == 0x01)
2123                         return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2124
2125                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2126                                        MGMT_STATUS_REJECTED);
2127         }
2128
2129         hci_dev_lock(hdev);
2130
2131         val = !!cp->val;
2132         enabled = lmp_host_le_capable(hdev);
2133
2134         if (!val)
2135                 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2136
2137         if (!hdev_is_powered(hdev) || val == enabled) {
2138                 bool changed = false;
2139
2140                 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2141                         hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2142                         changed = true;
2143                 }
2144
2145                 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2146                         hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2147                         changed = true;
2148                 }
2149
2150                 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2151                 if (err < 0)
2152                         goto unlock;
2153
2154                 if (changed)
2155                         err = new_settings(hdev, sk);
2156
2157                 goto unlock;
2158         }
2159
2160         if (pending_find(MGMT_OP_SET_LE, hdev) ||
2161             pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2162                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2163                                       MGMT_STATUS_BUSY);
2164                 goto unlock;
2165         }
2166
2167         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2168         if (!cmd)
2169                 err = -ENOMEM;
2170         else
2171                 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2172                                          set_le_complete);
2173
2174         if (err < 0) {
2175                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176                                       MGMT_STATUS_FAILED);
2177
2178                 if (cmd)
2179                         mgmt_pending_remove(cmd);
2180         }
2181
2182 unlock:
2183         hci_dev_unlock(hdev);
2184         return err;
2185 }
2186
2187 /* This is a helper function to test for pending mgmt commands that can
2188  * cause CoD or EIR HCI commands. We can only allow one such pending
2189  * mgmt command at a time since otherwise we cannot easily track what
2190  * the current values are, will be, and based on that calculate if a new
2191  * HCI command needs to be sent and if yes with what value.
2192  */
2193 static bool pending_eir_or_class(struct hci_dev *hdev)
2194 {
2195         struct mgmt_pending_cmd *cmd;
2196
2197         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2198                 switch (cmd->opcode) {
2199                 case MGMT_OP_ADD_UUID:
2200                 case MGMT_OP_REMOVE_UUID:
2201                 case MGMT_OP_SET_DEV_CLASS:
2202                 case MGMT_OP_SET_POWERED:
2203                         return true;
2204                 }
2205         }
2206
2207         return false;
2208 }
2209
2210 static const u8 bluetooth_base_uuid[] = {
2211                         0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2212                         0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2213 };
2214
2215 static u8 get_uuid_size(const u8 *uuid)
2216 {
2217         u32 val;
2218
2219         if (memcmp(uuid, bluetooth_base_uuid, 12))
2220                 return 128;
2221
2222         val = get_unaligned_le32(&uuid[12]);
2223         if (val > 0xffff)
2224                 return 32;
2225
2226         return 16;
2227 }
2228
2229 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2230 {
2231         struct mgmt_pending_cmd *cmd = data;
2232
2233         bt_dev_dbg(hdev, "err %d", err);
2234
2235         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2236                           mgmt_status(err), hdev->dev_class, 3);
2237
2238         mgmt_pending_free(cmd);
2239 }
2240
2241 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2242 {
2243         int err;
2244
2245         err = hci_update_class_sync(hdev);
2246         if (err)
2247                 return err;
2248
2249         return hci_update_eir_sync(hdev);
2250 }
2251
2252 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2253 {
2254         struct mgmt_cp_add_uuid *cp = data;
2255         struct mgmt_pending_cmd *cmd;
2256         struct bt_uuid *uuid;
2257         int err;
2258
2259         bt_dev_dbg(hdev, "sock %p", sk);
2260
2261         hci_dev_lock(hdev);
2262
2263         if (pending_eir_or_class(hdev)) {
2264                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2265                                       MGMT_STATUS_BUSY);
2266                 goto failed;
2267         }
2268
2269         uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2270         if (!uuid) {
2271                 err = -ENOMEM;
2272                 goto failed;
2273         }
2274
2275         memcpy(uuid->uuid, cp->uuid, 16);
2276         uuid->svc_hint = cp->svc_hint;
2277         uuid->size = get_uuid_size(cp->uuid);
2278
2279         list_add_tail(&uuid->list, &hdev->uuids);
2280
2281         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2282         if (!cmd) {
2283                 err = -ENOMEM;
2284                 goto failed;
2285         }
2286
2287         err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2288         if (err < 0) {
2289                 mgmt_pending_free(cmd);
2290                 goto failed;
2291         }
2292
2293 failed:
2294         hci_dev_unlock(hdev);
2295         return err;
2296 }
2297
2298 static bool enable_service_cache(struct hci_dev *hdev)
2299 {
2300         if (!hdev_is_powered(hdev))
2301                 return false;
2302
2303         if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2304                 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2305                                    CACHE_TIMEOUT);
2306                 return true;
2307         }
2308
2309         return false;
2310 }
2311
2312 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2313 {
2314         int err;
2315
2316         err = hci_update_class_sync(hdev);
2317         if (err)
2318                 return err;
2319
2320         return hci_update_eir_sync(hdev);
2321 }
2322
2323 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2324                        u16 len)
2325 {
2326         struct mgmt_cp_remove_uuid *cp = data;
2327         struct mgmt_pending_cmd *cmd;
2328         struct bt_uuid *match, *tmp;
2329         static const u8 bt_uuid_any[] = {
2330                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2331         };
2332         int err, found;
2333
2334         bt_dev_dbg(hdev, "sock %p", sk);
2335
2336         hci_dev_lock(hdev);
2337
2338         if (pending_eir_or_class(hdev)) {
2339                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2340                                       MGMT_STATUS_BUSY);
2341                 goto unlock;
2342         }
2343
2344         if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2345                 hci_uuids_clear(hdev);
2346
2347                 if (enable_service_cache(hdev)) {
2348                         err = mgmt_cmd_complete(sk, hdev->id,
2349                                                 MGMT_OP_REMOVE_UUID,
2350                                                 0, hdev->dev_class, 3);
2351                         goto unlock;
2352                 }
2353
2354                 goto update_class;
2355         }
2356
2357         found = 0;
2358
2359         list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2360                 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2361                         continue;
2362
2363                 list_del(&match->list);
2364                 kfree(match);
2365                 found++;
2366         }
2367
2368         if (found == 0) {
2369                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2370                                       MGMT_STATUS_INVALID_PARAMS);
2371                 goto unlock;
2372         }
2373
2374 update_class:
2375         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2376         if (!cmd) {
2377                 err = -ENOMEM;
2378                 goto unlock;
2379         }
2380
2381         err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2382                                  mgmt_class_complete);
2383         if (err < 0)
2384                 mgmt_pending_free(cmd);
2385
2386 unlock:
2387         hci_dev_unlock(hdev);
2388         return err;
2389 }
2390
2391 static int set_class_sync(struct hci_dev *hdev, void *data)
2392 {
2393         int err = 0;
2394
2395         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2396                 cancel_delayed_work_sync(&hdev->service_cache);
2397                 err = hci_update_eir_sync(hdev);
2398         }
2399
2400         if (err)
2401                 return err;
2402
2403         return hci_update_class_sync(hdev);
2404 }
2405
2406 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2407                          u16 len)
2408 {
2409         struct mgmt_cp_set_dev_class *cp = data;
2410         struct mgmt_pending_cmd *cmd;
2411         int err;
2412
2413         bt_dev_dbg(hdev, "sock %p", sk);
2414
2415         if (!lmp_bredr_capable(hdev))
2416                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2417                                        MGMT_STATUS_NOT_SUPPORTED);
2418
2419         hci_dev_lock(hdev);
2420
2421         if (pending_eir_or_class(hdev)) {
2422                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2423                                       MGMT_STATUS_BUSY);
2424                 goto unlock;
2425         }
2426
2427         if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2428                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2429                                       MGMT_STATUS_INVALID_PARAMS);
2430                 goto unlock;
2431         }
2432
2433         hdev->major_class = cp->major;
2434         hdev->minor_class = cp->minor;
2435
2436         if (!hdev_is_powered(hdev)) {
2437                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2438                                         hdev->dev_class, 3);
2439                 goto unlock;
2440         }
2441
2442         cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2443         if (!cmd) {
2444                 err = -ENOMEM;
2445                 goto unlock;
2446         }
2447
2448         err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2449                                  mgmt_class_complete);
2450         if (err < 0)
2451                 mgmt_pending_free(cmd);
2452
2453 unlock:
2454         hci_dev_unlock(hdev);
2455         return err;
2456 }
2457
2458 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2459                           u16 len)
2460 {
2461         struct mgmt_cp_load_link_keys *cp = data;
2462         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2463                                    sizeof(struct mgmt_link_key_info));
2464         u16 key_count, expected_len;
2465         bool changed;
2466         int i;
2467
2468         bt_dev_dbg(hdev, "sock %p", sk);
2469
2470         if (!lmp_bredr_capable(hdev))
2471                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2472                                        MGMT_STATUS_NOT_SUPPORTED);
2473
2474         key_count = __le16_to_cpu(cp->key_count);
2475         if (key_count > max_key_count) {
2476                 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2477                            key_count);
2478                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2479                                        MGMT_STATUS_INVALID_PARAMS);
2480         }
2481
2482         expected_len = struct_size(cp, keys, key_count);
2483         if (expected_len != len) {
2484                 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2485                            expected_len, len);
2486                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2487                                        MGMT_STATUS_INVALID_PARAMS);
2488         }
2489
2490         if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2491                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2492                                        MGMT_STATUS_INVALID_PARAMS);
2493
2494         bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2495                    key_count);
2496
2497         for (i = 0; i < key_count; i++) {
2498                 struct mgmt_link_key_info *key = &cp->keys[i];
2499
2500                 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2501                         return mgmt_cmd_status(sk, hdev->id,
2502                                                MGMT_OP_LOAD_LINK_KEYS,
2503                                                MGMT_STATUS_INVALID_PARAMS);
2504         }
2505
2506         hci_dev_lock(hdev);
2507
2508         hci_link_keys_clear(hdev);
2509
2510         if (cp->debug_keys)
2511                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2512         else
2513                 changed = hci_dev_test_and_clear_flag(hdev,
2514                                                       HCI_KEEP_DEBUG_KEYS);
2515
2516         if (changed)
2517                 new_settings(hdev, NULL);
2518
2519         for (i = 0; i < key_count; i++) {
2520                 struct mgmt_link_key_info *key = &cp->keys[i];
2521
2522                 if (hci_is_blocked_key(hdev,
2523                                        HCI_BLOCKED_KEY_TYPE_LINKKEY,
2524                                        key->val)) {
2525                         bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2526                                     &key->addr.bdaddr);
2527                         continue;
2528                 }
2529
2530                 /* Always ignore debug keys and require a new pairing if
2531                  * the user wants to use them.
2532                  */
2533                 if (key->type == HCI_LK_DEBUG_COMBINATION)
2534                         continue;
2535
2536                 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2537                                  key->type, key->pin_len, NULL);
2538         }
2539
2540         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2541
2542         hci_dev_unlock(hdev);
2543
2544         return 0;
2545 }
2546
2547 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548                            u8 addr_type, struct sock *skip_sk)
2549 {
2550         struct mgmt_ev_device_unpaired ev;
2551
2552         bacpy(&ev.addr.bdaddr, bdaddr);
2553         ev.addr.type = addr_type;
2554
2555         return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2556                           skip_sk);
2557 }
2558
2559 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2560 {
2561         struct mgmt_pending_cmd *cmd = data;
2562         struct mgmt_cp_unpair_device *cp = cmd->param;
2563
2564         if (!err)
2565                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2566
2567         cmd->cmd_complete(cmd, err);
2568         mgmt_pending_free(cmd);
2569 }
2570
2571 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2572 {
2573         struct mgmt_pending_cmd *cmd = data;
2574         struct mgmt_cp_unpair_device *cp = cmd->param;
2575         struct hci_conn *conn;
2576
2577         if (cp->addr.type == BDADDR_BREDR)
2578                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2579                                                &cp->addr.bdaddr);
2580         else
2581                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2582                                                le_addr_type(cp->addr.type));
2583
2584         if (!conn)
2585                 return 0;
2586
2587         return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2588 }
2589
2590 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2591                          u16 len)
2592 {
2593         struct mgmt_cp_unpair_device *cp = data;
2594         struct mgmt_rp_unpair_device rp;
2595         struct hci_conn_params *params;
2596         struct mgmt_pending_cmd *cmd;
2597         struct hci_conn *conn;
2598         u8 addr_type;
2599         int err;
2600
2601         memset(&rp, 0, sizeof(rp));
2602         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2603         rp.addr.type = cp->addr.type;
2604
2605         if (!bdaddr_type_is_valid(cp->addr.type))
2606                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2607                                          MGMT_STATUS_INVALID_PARAMS,
2608                                          &rp, sizeof(rp));
2609
2610         if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2611                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2612                                          MGMT_STATUS_INVALID_PARAMS,
2613                                          &rp, sizeof(rp));
2614
2615         hci_dev_lock(hdev);
2616
2617         if (!hdev_is_powered(hdev)) {
2618                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2619                                         MGMT_STATUS_NOT_POWERED, &rp,
2620                                         sizeof(rp));
2621                 goto unlock;
2622         }
2623
2624         if (cp->addr.type == BDADDR_BREDR) {
2625                 /* If disconnection is requested, then look up the
2626                  * connection. If the remote device is connected, it
2627                  * will be later used to terminate the link.
2628                  *
2629                  * Setting it to NULL explicitly will cause no
2630                  * termination of the link.
2631                  */
2632                 if (cp->disconnect)
2633                         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2634                                                        &cp->addr.bdaddr);
2635                 else
2636                         conn = NULL;
2637
2638                 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2639                 if (err < 0) {
2640                         err = mgmt_cmd_complete(sk, hdev->id,
2641                                                 MGMT_OP_UNPAIR_DEVICE,
2642                                                 MGMT_STATUS_NOT_PAIRED, &rp,
2643                                                 sizeof(rp));
2644                         goto unlock;
2645                 }
2646
2647                 goto done;
2648         }
2649
2650         /* LE address type */
2651         addr_type = le_addr_type(cp->addr.type);
2652
2653         /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2654         err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2655         if (err < 0) {
2656                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2657                                         MGMT_STATUS_NOT_PAIRED, &rp,
2658                                         sizeof(rp));
2659                 goto unlock;
2660         }
2661
2662         conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2663         if (!conn) {
2664                 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2665                 goto done;
2666         }
2667
2668
2669         /* Defer clearing up the connection parameters until closing to
2670          * give a chance of keeping them if a repairing happens.
2671          */
2672         set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2673
2674         /* Disable auto-connection parameters if present */
2675         params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2676         if (params) {
2677                 if (params->explicit_connect)
2678                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2679                 else
2680                         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2681         }
2682
2683         /* If disconnection is not requested, then clear the connection
2684          * variable so that the link is not terminated.
2685          */
2686         if (!cp->disconnect)
2687                 conn = NULL;
2688
2689 done:
2690         /* If the connection variable is set, then termination of the
2691          * link is requested.
2692          */
2693         if (!conn) {
2694                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2695                                         &rp, sizeof(rp));
2696                 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2697                 goto unlock;
2698         }
2699
2700         cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2701                                sizeof(*cp));
2702         if (!cmd) {
2703                 err = -ENOMEM;
2704                 goto unlock;
2705         }
2706
2707         cmd->cmd_complete = addr_cmd_complete;
2708
2709         err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
2710                                  unpair_device_complete);
2711         if (err < 0)
2712                 mgmt_pending_free(cmd);
2713
2714 unlock:
2715         hci_dev_unlock(hdev);
2716         return err;
2717 }
2718
2719 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2720                       u16 len)
2721 {
2722         struct mgmt_cp_disconnect *cp = data;
2723         struct mgmt_rp_disconnect rp;
2724         struct mgmt_pending_cmd *cmd;
2725         struct hci_conn *conn;
2726         int err;
2727
2728         bt_dev_dbg(hdev, "sock %p", sk);
2729
2730         memset(&rp, 0, sizeof(rp));
2731         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2732         rp.addr.type = cp->addr.type;
2733
2734         if (!bdaddr_type_is_valid(cp->addr.type))
2735                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2736                                          MGMT_STATUS_INVALID_PARAMS,
2737                                          &rp, sizeof(rp));
2738
2739         hci_dev_lock(hdev);
2740
2741         if (!test_bit(HCI_UP, &hdev->flags)) {
2742                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2743                                         MGMT_STATUS_NOT_POWERED, &rp,
2744                                         sizeof(rp));
2745                 goto failed;
2746         }
2747
2748         if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2749                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2750                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
2751                 goto failed;
2752         }
2753
2754         if (cp->addr.type == BDADDR_BREDR)
2755                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2756                                                &cp->addr.bdaddr);
2757         else
2758                 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2759                                                le_addr_type(cp->addr.type));
2760
2761         if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2762                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2763                                         MGMT_STATUS_NOT_CONNECTED, &rp,
2764                                         sizeof(rp));
2765                 goto failed;
2766         }
2767
2768         cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2769         if (!cmd) {
2770                 err = -ENOMEM;
2771                 goto failed;
2772         }
2773
2774         cmd->cmd_complete = generic_cmd_complete;
2775
2776         err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2777         if (err < 0)
2778                 mgmt_pending_remove(cmd);
2779
2780 failed:
2781         hci_dev_unlock(hdev);
2782         return err;
2783 }
2784
2785 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2786 {
2787         switch (link_type) {
2788         case LE_LINK:
2789                 switch (addr_type) {
2790                 case ADDR_LE_DEV_PUBLIC:
2791                         return BDADDR_LE_PUBLIC;
2792
2793                 default:
2794                         /* Fallback to LE Random address type */
2795                         return BDADDR_LE_RANDOM;
2796                 }
2797
2798         default:
2799                 /* Fallback to BR/EDR type */
2800                 return BDADDR_BREDR;
2801         }
2802 }
2803
2804 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2805                            u16 data_len)
2806 {
2807         struct mgmt_rp_get_connections *rp;
2808         struct hci_conn *c;
2809         int err;
2810         u16 i;
2811
2812         bt_dev_dbg(hdev, "sock %p", sk);
2813
2814         hci_dev_lock(hdev);
2815
2816         if (!hdev_is_powered(hdev)) {
2817                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2818                                       MGMT_STATUS_NOT_POWERED);
2819                 goto unlock;
2820         }
2821
2822         i = 0;
2823         list_for_each_entry(c, &hdev->conn_hash.list, list) {
2824                 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2825                         i++;
2826         }
2827
2828         rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2829         if (!rp) {
2830                 err = -ENOMEM;
2831                 goto unlock;
2832         }
2833
2834         i = 0;
2835         list_for_each_entry(c, &hdev->conn_hash.list, list) {
2836                 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2837                         continue;
2838                 bacpy(&rp->addr[i].bdaddr, &c->dst);
2839                 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2840                 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2841                         continue;
2842                 i++;
2843         }
2844
2845         rp->conn_count = cpu_to_le16(i);
2846
2847         /* Recalculate length in case of filtered SCO connections, etc */
2848         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2849                                 struct_size(rp, addr, i));
2850
2851         kfree(rp);
2852
2853 unlock:
2854         hci_dev_unlock(hdev);
2855         return err;
2856 }
2857
2858 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2859                                    struct mgmt_cp_pin_code_neg_reply *cp)
2860 {
2861         struct mgmt_pending_cmd *cmd;
2862         int err;
2863
2864         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2865                                sizeof(*cp));
2866         if (!cmd)
2867                 return -ENOMEM;
2868
2869         cmd->cmd_complete = addr_cmd_complete;
2870
2871         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2872                            sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2873         if (err < 0)
2874                 mgmt_pending_remove(cmd);
2875
2876         return err;
2877 }
2878
2879 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2880                           u16 len)
2881 {
2882         struct hci_conn *conn;
2883         struct mgmt_cp_pin_code_reply *cp = data;
2884         struct hci_cp_pin_code_reply reply;
2885         struct mgmt_pending_cmd *cmd;
2886         int err;
2887
2888         bt_dev_dbg(hdev, "sock %p", sk);
2889
2890         hci_dev_lock(hdev);
2891
2892         if (!hdev_is_powered(hdev)) {
2893                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2894                                       MGMT_STATUS_NOT_POWERED);
2895                 goto failed;
2896         }
2897
2898         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2899         if (!conn) {
2900                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2901                                       MGMT_STATUS_NOT_CONNECTED);
2902                 goto failed;
2903         }
2904
2905         if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2906                 struct mgmt_cp_pin_code_neg_reply ncp;
2907
2908                 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2909
2910                 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2911
2912                 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2913                 if (err >= 0)
2914                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2915                                               MGMT_STATUS_INVALID_PARAMS);
2916
2917                 goto failed;
2918         }
2919
2920         cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2921         if (!cmd) {
2922                 err = -ENOMEM;
2923                 goto failed;
2924         }
2925
2926         cmd->cmd_complete = addr_cmd_complete;
2927
2928         bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2929         reply.pin_len = cp->pin_len;
2930         memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2931
2932         err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2933         if (err < 0)
2934                 mgmt_pending_remove(cmd);
2935
2936 failed:
2937         hci_dev_unlock(hdev);
2938         return err;
2939 }
2940
2941 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2942                              u16 len)
2943 {
2944         struct mgmt_cp_set_io_capability *cp = data;
2945
2946         bt_dev_dbg(hdev, "sock %p", sk);
2947
2948         if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2949                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2950                                        MGMT_STATUS_INVALID_PARAMS);
2951
2952         hci_dev_lock(hdev);
2953
2954         hdev->io_capability = cp->io_capability;
2955
2956         bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2957
2958         hci_dev_unlock(hdev);
2959
2960         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2961                                  NULL, 0);
2962 }
2963
2964 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2965 {
2966         struct hci_dev *hdev = conn->hdev;
2967         struct mgmt_pending_cmd *cmd;
2968
2969         list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2970                 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2971                         continue;
2972
2973                 if (cmd->user_data != conn)
2974                         continue;
2975
2976                 return cmd;
2977         }
2978
2979         return NULL;
2980 }
2981
2982 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2983 {
2984         struct mgmt_rp_pair_device rp;
2985         struct hci_conn *conn = cmd->user_data;
2986         int err;
2987
2988         bacpy(&rp.addr.bdaddr, &conn->dst);
2989         rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2990
2991         err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2992                                 status, &rp, sizeof(rp));
2993
2994         /* So we don't get further callbacks for this connection */
2995         conn->connect_cfm_cb = NULL;
2996         conn->security_cfm_cb = NULL;
2997         conn->disconn_cfm_cb = NULL;
2998
2999         hci_conn_drop(conn);
3000
3001         /* The device is paired so there is no need to remove
3002          * its connection parameters anymore.
3003          */
3004         clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3005
3006         hci_conn_put(conn);
3007
3008         return err;
3009 }
3010
3011 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3012 {
3013         u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3014         struct mgmt_pending_cmd *cmd;
3015
3016         cmd = find_pairing(conn);
3017         if (cmd) {
3018                 cmd->cmd_complete(cmd, status);
3019                 mgmt_pending_remove(cmd);
3020         }
3021 }
3022
3023 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3024 {
3025         struct mgmt_pending_cmd *cmd;
3026
3027         BT_DBG("status %u", status);
3028
3029         cmd = find_pairing(conn);
3030         if (!cmd) {
3031                 BT_DBG("Unable to find a pending command");
3032                 return;
3033         }
3034
3035         cmd->cmd_complete(cmd, mgmt_status(status));
3036         mgmt_pending_remove(cmd);
3037 }
3038
3039 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3040 {
3041         struct mgmt_pending_cmd *cmd;
3042
3043         BT_DBG("status %u", status);
3044
3045         if (!status)
3046                 return;
3047
3048         cmd = find_pairing(conn);
3049         if (!cmd) {
3050                 BT_DBG("Unable to find a pending command");
3051                 return;
3052         }
3053
3054         cmd->cmd_complete(cmd, mgmt_status(status));
3055         mgmt_pending_remove(cmd);
3056 }
3057
3058 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3059                        u16 len)
3060 {
3061         struct mgmt_cp_pair_device *cp = data;
3062         struct mgmt_rp_pair_device rp;
3063         struct mgmt_pending_cmd *cmd;
3064         u8 sec_level, auth_type;
3065         struct hci_conn *conn;
3066         int err;
3067
3068         bt_dev_dbg(hdev, "sock %p", sk);
3069
3070         memset(&rp, 0, sizeof(rp));
3071         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3072         rp.addr.type = cp->addr.type;
3073
3074         if (!bdaddr_type_is_valid(cp->addr.type))
3075                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3076                                          MGMT_STATUS_INVALID_PARAMS,
3077                                          &rp, sizeof(rp));
3078
3079         if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3080                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3081                                          MGMT_STATUS_INVALID_PARAMS,
3082                                          &rp, sizeof(rp));
3083
3084         hci_dev_lock(hdev);
3085
3086         if (!hdev_is_powered(hdev)) {
3087                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3088                                         MGMT_STATUS_NOT_POWERED, &rp,
3089                                         sizeof(rp));
3090                 goto unlock;
3091         }
3092
3093         if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3094                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3095                                         MGMT_STATUS_ALREADY_PAIRED, &rp,
3096                                         sizeof(rp));
3097                 goto unlock;
3098         }
3099
3100         sec_level = BT_SECURITY_MEDIUM;
3101         auth_type = HCI_AT_DEDICATED_BONDING;
3102
3103         if (cp->addr.type == BDADDR_BREDR) {
3104                 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3105                                        auth_type, CONN_REASON_PAIR_DEVICE);
3106         } else {
3107                 u8 addr_type = le_addr_type(cp->addr.type);
3108                 struct hci_conn_params *p;
3109
3110                 /* When pairing a new device, it is expected to remember
3111                  * this device for future connections. Adding the connection
3112                  * parameter information ahead of time allows tracking
3113                  * of the peripheral preferred values and will speed up any
3114                  * further connection establishment.
3115                  *
3116                  * If connection parameters already exist, then they
3117                  * will be kept and this function does nothing.
3118                  */
3119                 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3120
3121                 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3122                         p->auto_connect = HCI_AUTO_CONN_DISABLED;
3123
3124                 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3125                                            sec_level, HCI_LE_CONN_TIMEOUT,
3126                                            CONN_REASON_PAIR_DEVICE);
3127         }
3128
3129         if (IS_ERR(conn)) {
3130                 int status;
3131
3132                 if (PTR_ERR(conn) == -EBUSY)
3133                         status = MGMT_STATUS_BUSY;
3134                 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3135                         status = MGMT_STATUS_NOT_SUPPORTED;
3136                 else if (PTR_ERR(conn) == -ECONNREFUSED)
3137                         status = MGMT_STATUS_REJECTED;
3138                 else
3139                         status = MGMT_STATUS_CONNECT_FAILED;
3140
3141                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3142                                         status, &rp, sizeof(rp));
3143                 goto unlock;
3144         }
3145
3146         if (conn->connect_cfm_cb) {
3147                 hci_conn_drop(conn);
3148                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3149                                         MGMT_STATUS_BUSY, &rp, sizeof(rp));
3150                 goto unlock;
3151         }
3152
3153         cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3154         if (!cmd) {
3155                 err = -ENOMEM;
3156                 hci_conn_drop(conn);
3157                 goto unlock;
3158         }
3159
3160         cmd->cmd_complete = pairing_complete;
3161
3162         /* For LE, just connecting isn't a proof that the pairing finished */
3163         if (cp->addr.type == BDADDR_BREDR) {
3164                 conn->connect_cfm_cb = pairing_complete_cb;
3165                 conn->security_cfm_cb = pairing_complete_cb;
3166                 conn->disconn_cfm_cb = pairing_complete_cb;
3167         } else {
3168                 conn->connect_cfm_cb = le_pairing_complete_cb;
3169                 conn->security_cfm_cb = le_pairing_complete_cb;
3170                 conn->disconn_cfm_cb = le_pairing_complete_cb;
3171         }
3172
3173         conn->io_capability = cp->io_cap;
3174         cmd->user_data = hci_conn_get(conn);
3175
3176         if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3177             hci_conn_security(conn, sec_level, auth_type, true)) {
3178                 cmd->cmd_complete(cmd, 0);
3179                 mgmt_pending_remove(cmd);
3180         }
3181
3182         err = 0;
3183
3184 unlock:
3185         hci_dev_unlock(hdev);
3186         return err;
3187 }
3188
3189 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3190                               u16 len)
3191 {
3192         struct mgmt_addr_info *addr = data;
3193         struct mgmt_pending_cmd *cmd;
3194         struct hci_conn *conn;
3195         int err;
3196
3197         bt_dev_dbg(hdev, "sock %p", sk);
3198
3199         hci_dev_lock(hdev);
3200
3201         if (!hdev_is_powered(hdev)) {
3202                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3203                                       MGMT_STATUS_NOT_POWERED);
3204                 goto unlock;
3205         }
3206
3207         cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3208         if (!cmd) {
3209                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3210                                       MGMT_STATUS_INVALID_PARAMS);
3211                 goto unlock;
3212         }
3213
3214         conn = cmd->user_data;
3215
3216         if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3217                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3218                                       MGMT_STATUS_INVALID_PARAMS);
3219                 goto unlock;
3220         }
3221
3222         cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3223         mgmt_pending_remove(cmd);
3224
3225         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3226                                 addr, sizeof(*addr));
3227
3228         /* Since user doesn't want to proceed with the connection, abort any
3229          * ongoing pairing and then terminate the link if it was created
3230          * because of the pair device action.
3231          */
3232         if (addr->type == BDADDR_BREDR)
3233                 hci_remove_link_key(hdev, &addr->bdaddr);
3234         else
3235                 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3236                                               le_addr_type(addr->type));
3237
3238         if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3239                 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3240
3241 unlock:
3242         hci_dev_unlock(hdev);
3243         return err;
3244 }
3245
3246 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3247                              struct mgmt_addr_info *addr, u16 mgmt_op,
3248                              u16 hci_op, __le32 passkey)
3249 {
3250         struct mgmt_pending_cmd *cmd;
3251         struct hci_conn *conn;
3252         int err;
3253
3254         hci_dev_lock(hdev);
3255
3256         if (!hdev_is_powered(hdev)) {
3257                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3258                                         MGMT_STATUS_NOT_POWERED, addr,
3259                                         sizeof(*addr));
3260                 goto done;
3261         }
3262
3263         if (addr->type == BDADDR_BREDR)
3264                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3265         else
3266                 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3267                                                le_addr_type(addr->type));
3268
3269         if (!conn) {
3270                 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3271                                         MGMT_STATUS_NOT_CONNECTED, addr,
3272                                         sizeof(*addr));
3273                 goto done;
3274         }
3275
3276         if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3277                 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3278                 if (!err)
3279                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3280                                                 MGMT_STATUS_SUCCESS, addr,
3281                                                 sizeof(*addr));
3282                 else
3283                         err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3284                                                 MGMT_STATUS_FAILED, addr,
3285                                                 sizeof(*addr));
3286
3287                 goto done;
3288         }
3289
3290         cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3291         if (!cmd) {
3292                 err = -ENOMEM;
3293                 goto done;
3294         }
3295
3296         cmd->cmd_complete = addr_cmd_complete;
3297
3298         /* Continue with pairing via HCI */
3299         if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3300                 struct hci_cp_user_passkey_reply cp;
3301
3302                 bacpy(&cp.bdaddr, &addr->bdaddr);
3303                 cp.passkey = passkey;
3304                 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3305         } else
3306                 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3307                                    &addr->bdaddr);
3308
3309         if (err < 0)
3310                 mgmt_pending_remove(cmd);
3311
3312 done:
3313         hci_dev_unlock(hdev);
3314         return err;
3315 }
3316
3317 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3318                               void *data, u16 len)
3319 {
3320         struct mgmt_cp_pin_code_neg_reply *cp = data;
3321
3322         bt_dev_dbg(hdev, "sock %p", sk);
3323
3324         return user_pairing_resp(sk, hdev, &cp->addr,
3325                                 MGMT_OP_PIN_CODE_NEG_REPLY,
3326                                 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3327 }
3328
3329 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3330                               u16 len)
3331 {
3332         struct mgmt_cp_user_confirm_reply *cp = data;
3333
3334         bt_dev_dbg(hdev, "sock %p", sk);
3335
3336         if (len != sizeof(*cp))
3337                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3338                                        MGMT_STATUS_INVALID_PARAMS);
3339
3340         return user_pairing_resp(sk, hdev, &cp->addr,
3341                                  MGMT_OP_USER_CONFIRM_REPLY,
3342                                  HCI_OP_USER_CONFIRM_REPLY, 0);
3343 }
3344
3345 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3346                                   void *data, u16 len)
3347 {
3348         struct mgmt_cp_user_confirm_neg_reply *cp = data;
3349
3350         bt_dev_dbg(hdev, "sock %p", sk);
3351
3352         return user_pairing_resp(sk, hdev, &cp->addr,
3353                                  MGMT_OP_USER_CONFIRM_NEG_REPLY,
3354                                  HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3355 }
3356
3357 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3358                               u16 len)
3359 {
3360         struct mgmt_cp_user_passkey_reply *cp = data;
3361
3362         bt_dev_dbg(hdev, "sock %p", sk);
3363
3364         return user_pairing_resp(sk, hdev, &cp->addr,
3365                                  MGMT_OP_USER_PASSKEY_REPLY,
3366                                  HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3367 }
3368
3369 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3370                                   void *data, u16 len)
3371 {
3372         struct mgmt_cp_user_passkey_neg_reply *cp = data;
3373
3374         bt_dev_dbg(hdev, "sock %p", sk);
3375
3376         return user_pairing_resp(sk, hdev, &cp->addr,
3377                                  MGMT_OP_USER_PASSKEY_NEG_REPLY,
3378                                  HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3379 }
3380
3381 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3382 {
3383         struct adv_info *adv_instance;
3384
3385         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3386         if (!adv_instance)
3387                 return 0;
3388
3389         /* stop if current instance doesn't need to be changed */
3390         if (!(adv_instance->flags & flags))
3391                 return 0;
3392
3393         cancel_adv_timeout(hdev);
3394
3395         adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3396         if (!adv_instance)
3397                 return 0;
3398
3399         hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3400
3401         return 0;
3402 }
3403
3404 static int name_changed_sync(struct hci_dev *hdev, void *data)
3405 {
3406         return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3407 }
3408
3409 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3410 {
3411         struct mgmt_pending_cmd *cmd = data;
3412         struct mgmt_cp_set_local_name *cp = cmd->param;
3413         u8 status = mgmt_status(err);
3414
3415         bt_dev_dbg(hdev, "err %d", err);
3416
3417         if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3418                 return;
3419
3420         if (status) {
3421                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3422                                 status);
3423         } else {
3424                 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3425                                   cp, sizeof(*cp));
3426
3427                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3428                         hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3429         }
3430
3431         mgmt_pending_remove(cmd);
3432 }
3433
3434 static int set_name_sync(struct hci_dev *hdev, void *data)
3435 {
3436         if (lmp_bredr_capable(hdev)) {
3437                 hci_update_name_sync(hdev);
3438                 hci_update_eir_sync(hdev);
3439         }
3440
3441         /* The name is stored in the scan response data and so
3442          * no need to update the advertising data here.
3443          */
3444         if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3445                 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3446
3447         return 0;
3448 }
3449
3450 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3451                           u16 len)
3452 {
3453         struct mgmt_cp_set_local_name *cp = data;
3454         struct mgmt_pending_cmd *cmd;
3455         int err;
3456
3457         bt_dev_dbg(hdev, "sock %p", sk);
3458
3459         hci_dev_lock(hdev);
3460
3461         /* If the old values are the same as the new ones just return a
3462          * direct command complete event.
3463          */
3464         if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3465             !memcmp(hdev->short_name, cp->short_name,
3466                     sizeof(hdev->short_name))) {
3467                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3468                                         data, len);
3469                 goto failed;
3470         }
3471
3472         memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3473
3474         if (!hdev_is_powered(hdev)) {
3475                 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3476
3477                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3478                                         data, len);
3479                 if (err < 0)
3480                         goto failed;
3481
3482                 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3483                                          len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3484                 ext_info_changed(hdev, sk);
3485
3486                 goto failed;
3487         }
3488
3489         cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3490         if (!cmd)
3491                 err = -ENOMEM;
3492         else
3493                 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3494                                          set_name_complete);
3495
3496         if (err < 0) {
3497                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3498                                       MGMT_STATUS_FAILED);
3499
3500                 if (cmd)
3501                         mgmt_pending_remove(cmd);
3502
3503                 goto failed;
3504         }
3505
3506         memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3507
3508 failed:
3509         hci_dev_unlock(hdev);
3510         return err;
3511 }
3512
3513 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3514 {
3515         return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3516 }
3517
3518 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3519                           u16 len)
3520 {
3521         struct mgmt_cp_set_appearance *cp = data;
3522         u16 appearance;
3523         int err;
3524
3525         bt_dev_dbg(hdev, "sock %p", sk);
3526
3527         if (!lmp_le_capable(hdev))
3528                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3529                                        MGMT_STATUS_NOT_SUPPORTED);
3530
3531         appearance = le16_to_cpu(cp->appearance);
3532
3533         hci_dev_lock(hdev);
3534
3535         if (hdev->appearance != appearance) {
3536                 hdev->appearance = appearance;
3537
3538                 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3539                         hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3540                                            NULL);
3541
3542                 ext_info_changed(hdev, sk);
3543         }
3544
3545         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3546                                 0);
3547
3548         hci_dev_unlock(hdev);
3549
3550         return err;
3551 }
3552
3553 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3554                                  void *data, u16 len)
3555 {
3556         struct mgmt_rp_get_phy_configuration rp;
3557
3558         bt_dev_dbg(hdev, "sock %p", sk);
3559
3560         hci_dev_lock(hdev);
3561
3562         memset(&rp, 0, sizeof(rp));
3563
3564         rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3565         rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3566         rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3567
3568         hci_dev_unlock(hdev);
3569
3570         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3571                                  &rp, sizeof(rp));
3572 }
3573
3574 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3575 {
3576         struct mgmt_ev_phy_configuration_changed ev;
3577
3578         memset(&ev, 0, sizeof(ev));
3579
3580         ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3581
3582         return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3583                           sizeof(ev), skip);
3584 }
3585
3586 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3587 {
3588         struct mgmt_pending_cmd *cmd = data;
3589         struct sk_buff *skb = cmd->skb;
3590         u8 status = mgmt_status(err);
3591
3592         if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3593                 return;
3594
3595         if (!status) {
3596                 if (!skb)
3597                         status = MGMT_STATUS_FAILED;
3598                 else if (IS_ERR(skb))
3599                         status = mgmt_status(PTR_ERR(skb));
3600                 else
3601                         status = mgmt_status(skb->data[0]);
3602         }
3603
3604         bt_dev_dbg(hdev, "status %d", status);
3605
3606         if (status) {
3607                 mgmt_cmd_status(cmd->sk, hdev->id,
3608                                 MGMT_OP_SET_PHY_CONFIGURATION, status);
3609         } else {
3610                 mgmt_cmd_complete(cmd->sk, hdev->id,
3611                                   MGMT_OP_SET_PHY_CONFIGURATION, 0,
3612                                   NULL, 0);
3613
3614                 mgmt_phy_configuration_changed(hdev, cmd->sk);
3615         }
3616
3617         if (skb && !IS_ERR(skb))
3618                 kfree_skb(skb);
3619
3620         mgmt_pending_remove(cmd);
3621 }
3622
3623 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3624 {
3625         struct mgmt_pending_cmd *cmd = data;
3626         struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3627         struct hci_cp_le_set_default_phy cp_phy;
3628         u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3629
3630         memset(&cp_phy, 0, sizeof(cp_phy));
3631
3632         if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3633                 cp_phy.all_phys |= 0x01;
3634
3635         if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3636                 cp_phy.all_phys |= 0x02;
3637
3638         if (selected_phys & MGMT_PHY_LE_1M_TX)
3639                 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3640
3641         if (selected_phys & MGMT_PHY_LE_2M_TX)
3642                 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3643
3644         if (selected_phys & MGMT_PHY_LE_CODED_TX)
3645                 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3646
3647         if (selected_phys & MGMT_PHY_LE_1M_RX)
3648                 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3649
3650         if (selected_phys & MGMT_PHY_LE_2M_RX)
3651                 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3652
3653         if (selected_phys & MGMT_PHY_LE_CODED_RX)
3654                 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3655
3656         cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3657                                    sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3658
3659         return 0;
3660 }
3661
3662 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3663                                  void *data, u16 len)
3664 {
3665         struct mgmt_cp_set_phy_configuration *cp = data;
3666         struct mgmt_pending_cmd *cmd;
3667         u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3668         u16 pkt_type = (HCI_DH1 | HCI_DM1);
3669         bool changed = false;
3670         int err;
3671
3672         bt_dev_dbg(hdev, "sock %p", sk);
3673
3674         configurable_phys = get_configurable_phys(hdev);
3675         supported_phys = get_supported_phys(hdev);
3676         selected_phys = __le32_to_cpu(cp->selected_phys);
3677
3678         if (selected_phys & ~supported_phys)
3679                 return mgmt_cmd_status(sk, hdev->id,
3680                                        MGMT_OP_SET_PHY_CONFIGURATION,
3681                                        MGMT_STATUS_INVALID_PARAMS);
3682
3683         unconfigure_phys = supported_phys & ~configurable_phys;
3684
3685         if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3686                 return mgmt_cmd_status(sk, hdev->id,
3687                                        MGMT_OP_SET_PHY_CONFIGURATION,
3688                                        MGMT_STATUS_INVALID_PARAMS);
3689
3690         if (selected_phys == get_selected_phys(hdev))
3691                 return mgmt_cmd_complete(sk, hdev->id,
3692                                          MGMT_OP_SET_PHY_CONFIGURATION,
3693                                          0, NULL, 0);
3694
3695         hci_dev_lock(hdev);
3696
3697         if (!hdev_is_powered(hdev)) {
3698                 err = mgmt_cmd_status(sk, hdev->id,
3699                                       MGMT_OP_SET_PHY_CONFIGURATION,
3700                                       MGMT_STATUS_REJECTED);
3701                 goto unlock;
3702         }
3703
3704         if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3705                 err = mgmt_cmd_status(sk, hdev->id,
3706                                       MGMT_OP_SET_PHY_CONFIGURATION,
3707                                       MGMT_STATUS_BUSY);
3708                 goto unlock;
3709         }
3710
3711         if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3712                 pkt_type |= (HCI_DH3 | HCI_DM3);
3713         else
3714                 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3715
3716         if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3717                 pkt_type |= (HCI_DH5 | HCI_DM5);
3718         else
3719                 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3720
3721         if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3722                 pkt_type &= ~HCI_2DH1;
3723         else
3724                 pkt_type |= HCI_2DH1;
3725
3726         if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3727                 pkt_type &= ~HCI_2DH3;
3728         else
3729                 pkt_type |= HCI_2DH3;
3730
3731         if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3732                 pkt_type &= ~HCI_2DH5;
3733         else
3734                 pkt_type |= HCI_2DH5;
3735
3736         if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3737                 pkt_type &= ~HCI_3DH1;
3738         else
3739                 pkt_type |= HCI_3DH1;
3740
3741         if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3742                 pkt_type &= ~HCI_3DH3;
3743         else
3744                 pkt_type |= HCI_3DH3;
3745
3746         if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3747                 pkt_type &= ~HCI_3DH5;
3748         else
3749                 pkt_type |= HCI_3DH5;
3750
3751         if (pkt_type != hdev->pkt_type) {
3752                 hdev->pkt_type = pkt_type;
3753                 changed = true;
3754         }
3755
3756         if ((selected_phys & MGMT_PHY_LE_MASK) ==
3757             (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3758                 if (changed)
3759                         mgmt_phy_configuration_changed(hdev, sk);
3760
3761                 err = mgmt_cmd_complete(sk, hdev->id,
3762                                         MGMT_OP_SET_PHY_CONFIGURATION,
3763                                         0, NULL, 0);
3764
3765                 goto unlock;
3766         }
3767
3768         cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3769                                len);
3770         if (!cmd)
3771                 err = -ENOMEM;
3772         else
3773                 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3774                                          set_default_phy_complete);
3775
3776         if (err < 0) {
3777                 err = mgmt_cmd_status(sk, hdev->id,
3778                                       MGMT_OP_SET_PHY_CONFIGURATION,
3779                                       MGMT_STATUS_FAILED);
3780
3781                 if (cmd)
3782                         mgmt_pending_remove(cmd);
3783         }
3784
3785 unlock:
3786         hci_dev_unlock(hdev);
3787
3788         return err;
3789 }
3790
3791 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3792                             u16 len)
3793 {
3794         int err = MGMT_STATUS_SUCCESS;
3795         struct mgmt_cp_set_blocked_keys *keys = data;
3796         const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3797                                    sizeof(struct mgmt_blocked_key_info));
3798         u16 key_count, expected_len;
3799         int i;
3800
3801         bt_dev_dbg(hdev, "sock %p", sk);
3802
3803         key_count = __le16_to_cpu(keys->key_count);
3804         if (key_count > max_key_count) {
3805                 bt_dev_err(hdev, "too big key_count value %u", key_count);
3806                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3807                                        MGMT_STATUS_INVALID_PARAMS);
3808         }
3809
3810         expected_len = struct_size(keys, keys, key_count);
3811         if (expected_len != len) {
3812                 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3813                            expected_len, len);
3814                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3815                                        MGMT_STATUS_INVALID_PARAMS);
3816         }
3817
3818         hci_dev_lock(hdev);
3819
3820         hci_blocked_keys_clear(hdev);
3821
3822         for (i = 0; i < keys->key_count; ++i) {
3823                 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3824
3825                 if (!b) {
3826                         err = MGMT_STATUS_NO_RESOURCES;
3827                         break;
3828                 }
3829
3830                 b->type = keys->keys[i].type;
3831                 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3832                 list_add_rcu(&b->list, &hdev->blocked_keys);
3833         }
3834         hci_dev_unlock(hdev);
3835
3836         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3837                                 err, NULL, 0);
3838 }
3839
3840 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3841                                void *data, u16 len)
3842 {
3843         struct mgmt_mode *cp = data;
3844         int err;
3845         bool changed = false;
3846
3847         bt_dev_dbg(hdev, "sock %p", sk);
3848
3849         if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3850                 return mgmt_cmd_status(sk, hdev->id,
3851                                        MGMT_OP_SET_WIDEBAND_SPEECH,
3852                                        MGMT_STATUS_NOT_SUPPORTED);
3853
3854         if (cp->val != 0x00 && cp->val != 0x01)
3855                 return mgmt_cmd_status(sk, hdev->id,
3856                                        MGMT_OP_SET_WIDEBAND_SPEECH,
3857                                        MGMT_STATUS_INVALID_PARAMS);
3858
3859         hci_dev_lock(hdev);
3860
3861         if (hdev_is_powered(hdev) &&
3862             !!cp->val != hci_dev_test_flag(hdev,
3863                                            HCI_WIDEBAND_SPEECH_ENABLED)) {
3864                 err = mgmt_cmd_status(sk, hdev->id,
3865                                       MGMT_OP_SET_WIDEBAND_SPEECH,
3866                                       MGMT_STATUS_REJECTED);
3867                 goto unlock;
3868         }
3869
3870         if (cp->val)
3871                 changed = !hci_dev_test_and_set_flag(hdev,
3872                                                    HCI_WIDEBAND_SPEECH_ENABLED);
3873         else
3874                 changed = hci_dev_test_and_clear_flag(hdev,
3875                                                    HCI_WIDEBAND_SPEECH_ENABLED);
3876
3877         err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3878         if (err < 0)
3879                 goto unlock;
3880
3881         if (changed)
3882                 err = new_settings(hdev, sk);
3883
3884 unlock:
3885         hci_dev_unlock(hdev);
3886         return err;
3887 }
3888
3889 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3890                                void *data, u16 data_len)
3891 {
3892         char buf[20];
3893         struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3894         u16 cap_len = 0;
3895         u8 flags = 0;
3896         u8 tx_power_range[2];
3897
3898         bt_dev_dbg(hdev, "sock %p", sk);
3899
3900         memset(&buf, 0, sizeof(buf));
3901
3902         hci_dev_lock(hdev);
3903
3904         /* When the Read Simple Pairing Options command is supported, then
3905          * the remote public key validation is supported.
3906          *
3907          * Alternatively, when Microsoft extensions are available, they can
3908          * indicate support for public key validation as well.
3909          */
3910         if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3911                 flags |= 0x01;  /* Remote public key validation (BR/EDR) */
3912
3913         flags |= 0x02;          /* Remote public key validation (LE) */
3914
3915         /* When the Read Encryption Key Size command is supported, then the
3916          * encryption key size is enforced.
3917          */
3918         if (hdev->commands[20] & 0x10)
3919                 flags |= 0x04;  /* Encryption key size enforcement (BR/EDR) */
3920
3921         flags |= 0x08;          /* Encryption key size enforcement (LE) */
3922
3923         cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3924                                   &flags, 1);
3925
3926         /* When the Read Simple Pairing Options command is supported, then
3927          * also max encryption key size information is provided.
3928          */
3929         if (hdev->commands[41] & 0x08)
3930                 cap_len = eir_append_le16(rp->cap, cap_len,
3931                                           MGMT_CAP_MAX_ENC_KEY_SIZE,
3932                                           hdev->max_enc_key_size);
3933
3934         cap_len = eir_append_le16(rp->cap, cap_len,
3935                                   MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3936                                   SMP_MAX_ENC_KEY_SIZE);
3937
3938         /* Append the min/max LE tx power parameters if we were able to fetch
3939          * it from the controller
3940          */
3941         if (hdev->commands[38] & 0x80) {
3942                 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3943                 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3944                 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3945                                           tx_power_range, 2);
3946         }
3947
3948         rp->cap_len = cpu_to_le16(cap_len);
3949
3950         hci_dev_unlock(hdev);
3951
3952         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3953                                  rp, sizeof(*rp) + cap_len);
3954 }
3955
3956 #ifdef CONFIG_BT_FEATURE_DEBUG
3957 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3958 static const u8 debug_uuid[16] = {
3959         0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3960         0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3961 };
3962 #endif
3963
3964 /* 330859bc-7506-492d-9370-9a6f0614037f */
3965 static const u8 quality_report_uuid[16] = {
3966         0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3967         0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3968 };
3969
3970 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3971 static const u8 offload_codecs_uuid[16] = {
3972         0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3973         0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3974 };
3975
3976 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3977 static const u8 le_simultaneous_roles_uuid[16] = {
3978         0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3979         0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3980 };
3981
3982 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3983 static const u8 rpa_resolution_uuid[16] = {
3984         0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3985         0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3986 };
3987
3988 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
3989 static const u8 iso_socket_uuid[16] = {
3990         0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
3991         0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
3992 };
3993
3994 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3995                                   void *data, u16 data_len)
3996 {
3997         char buf[122];   /* Enough space for 6 features: 2 + 20 * 6 */
3998         struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3999         u16 idx = 0;
4000         u32 flags;
4001
4002         bt_dev_dbg(hdev, "sock %p", sk);
4003
4004         memset(&buf, 0, sizeof(buf));
4005
4006 #ifdef CONFIG_BT_FEATURE_DEBUG
4007         if (!hdev) {
4008                 flags = bt_dbg_get() ? BIT(0) : 0;
4009
4010                 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4011                 rp->features[idx].flags = cpu_to_le32(flags);
4012                 idx++;
4013         }
4014 #endif
4015
4016         if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4017                 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4018                         flags = BIT(0);
4019                 else
4020                         flags = 0;
4021
4022                 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4023                 rp->features[idx].flags = cpu_to_le32(flags);
4024                 idx++;
4025         }
4026
4027         if (hdev && ll_privacy_capable(hdev)) {
4028                 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4029                         flags = BIT(0) | BIT(1);
4030                 else
4031                         flags = BIT(1);
4032
4033                 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4034                 rp->features[idx].flags = cpu_to_le32(flags);
4035                 idx++;
4036         }
4037
4038         if (hdev && (aosp_has_quality_report(hdev) ||
4039                      hdev->set_quality_report)) {
4040                 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4041                         flags = BIT(0);
4042                 else
4043                         flags = 0;
4044
4045                 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4046                 rp->features[idx].flags = cpu_to_le32(flags);
4047                 idx++;
4048         }
4049
4050         if (hdev && hdev->get_data_path_id) {
4051                 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4052                         flags = BIT(0);
4053                 else
4054                         flags = 0;
4055
4056                 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4057                 rp->features[idx].flags = cpu_to_le32(flags);
4058                 idx++;
4059         }
4060
4061         if (IS_ENABLED(CONFIG_BT_LE)) {
4062                 flags = iso_enabled() ? BIT(0) : 0;
4063                 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4064                 rp->features[idx].flags = cpu_to_le32(flags);
4065                 idx++;
4066         }
4067
4068         rp->feature_count = cpu_to_le16(idx);
4069
4070         /* After reading the experimental features information, enable
4071          * the events to update client on any future change.
4072          */
4073         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4074
4075         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4076                                  MGMT_OP_READ_EXP_FEATURES_INFO,
4077                                  0, rp, sizeof(*rp) + (20 * idx));
4078 }
4079
4080 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4081                                           struct sock *skip)
4082 {
4083         struct mgmt_ev_exp_feature_changed ev;
4084
4085         memset(&ev, 0, sizeof(ev));
4086         memcpy(ev.uuid, rpa_resolution_uuid, 16);
4087         ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4088
4089         // Do we need to be atomic with the conn_flags?
4090         if (enabled && privacy_mode_capable(hdev))
4091                 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4092         else
4093                 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4094
4095         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4096                                   &ev, sizeof(ev),
4097                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4098
4099 }
4100
4101 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4102                                bool enabled, struct sock *skip)
4103 {
4104         struct mgmt_ev_exp_feature_changed ev;
4105
4106         memset(&ev, 0, sizeof(ev));
4107         memcpy(ev.uuid, uuid, 16);
4108         ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4109
4110         return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4111                                   &ev, sizeof(ev),
4112                                   HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4113 }
4114
4115 #define EXP_FEAT(_uuid, _set_func)      \
4116 {                                       \
4117         .uuid = _uuid,                  \
4118         .set_func = _set_func,          \
4119 }
4120
4121 /* The zero key uuid is special. Multiple exp features are set through it. */
4122 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4123                              struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4124 {
4125         struct mgmt_rp_set_exp_feature rp;
4126
4127         memset(rp.uuid, 0, 16);
4128         rp.flags = cpu_to_le32(0);
4129
4130 #ifdef CONFIG_BT_FEATURE_DEBUG
4131         if (!hdev) {
4132                 bool changed = bt_dbg_get();
4133
4134                 bt_dbg_set(false);
4135
4136                 if (changed)
4137                         exp_feature_changed(NULL, ZERO_KEY, false, sk);
4138         }
4139 #endif
4140
4141         if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4142                 bool changed;
4143
4144                 changed = hci_dev_test_and_clear_flag(hdev,
4145                                                       HCI_ENABLE_LL_PRIVACY);
4146                 if (changed)
4147                         exp_feature_changed(hdev, rpa_resolution_uuid, false,
4148                                             sk);
4149         }
4150
4151         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4152
4153         return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4154                                  MGMT_OP_SET_EXP_FEATURE, 0,
4155                                  &rp, sizeof(rp));
4156 }
4157
4158 #ifdef CONFIG_BT_FEATURE_DEBUG
4159 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4160                           struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4161 {
4162         struct mgmt_rp_set_exp_feature rp;
4163
4164         bool val, changed;
4165         int err;
4166
4167         /* Command requires to use the non-controller index */
4168         if (hdev)
4169                 return mgmt_cmd_status(sk, hdev->id,
4170                                        MGMT_OP_SET_EXP_FEATURE,
4171                                        MGMT_STATUS_INVALID_INDEX);
4172
4173         /* Parameters are limited to a single octet */
4174         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4175                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4176                                        MGMT_OP_SET_EXP_FEATURE,
4177                                        MGMT_STATUS_INVALID_PARAMS);
4178
4179         /* Only boolean on/off is supported */
4180         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4181                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4182                                        MGMT_OP_SET_EXP_FEATURE,
4183                                        MGMT_STATUS_INVALID_PARAMS);
4184
4185         val = !!cp->param[0];
4186         changed = val ? !bt_dbg_get() : bt_dbg_get();
4187         bt_dbg_set(val);
4188
4189         memcpy(rp.uuid, debug_uuid, 16);
4190         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4191
4192         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4193
4194         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4195                                 MGMT_OP_SET_EXP_FEATURE, 0,
4196                                 &rp, sizeof(rp));
4197
4198         if (changed)
4199                 exp_feature_changed(hdev, debug_uuid, val, sk);
4200
4201         return err;
4202 }
4203 #endif
4204
4205 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4206                                    struct mgmt_cp_set_exp_feature *cp,
4207                                    u16 data_len)
4208 {
4209         struct mgmt_rp_set_exp_feature rp;
4210         bool val, changed;
4211         int err;
4212         u32 flags;
4213
4214         /* Command requires to use the controller index */
4215         if (!hdev)
4216                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4217                                        MGMT_OP_SET_EXP_FEATURE,
4218                                        MGMT_STATUS_INVALID_INDEX);
4219
4220         /* Changes can only be made when controller is powered down */
4221         if (hdev_is_powered(hdev))
4222                 return mgmt_cmd_status(sk, hdev->id,
4223                                        MGMT_OP_SET_EXP_FEATURE,
4224                                        MGMT_STATUS_REJECTED);
4225
4226         /* Parameters are limited to a single octet */
4227         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4228                 return mgmt_cmd_status(sk, hdev->id,
4229                                        MGMT_OP_SET_EXP_FEATURE,
4230                                        MGMT_STATUS_INVALID_PARAMS);
4231
4232         /* Only boolean on/off is supported */
4233         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4234                 return mgmt_cmd_status(sk, hdev->id,
4235                                        MGMT_OP_SET_EXP_FEATURE,
4236                                        MGMT_STATUS_INVALID_PARAMS);
4237
4238         val = !!cp->param[0];
4239
4240         if (val) {
4241                 changed = !hci_dev_test_and_set_flag(hdev,
4242                                                      HCI_ENABLE_LL_PRIVACY);
4243                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4244
4245                 /* Enable LL privacy + supported settings changed */
4246                 flags = BIT(0) | BIT(1);
4247         } else {
4248                 changed = hci_dev_test_and_clear_flag(hdev,
4249                                                       HCI_ENABLE_LL_PRIVACY);
4250
4251                 /* Disable LL privacy + supported settings changed */
4252                 flags = BIT(1);
4253         }
4254
4255         memcpy(rp.uuid, rpa_resolution_uuid, 16);
4256         rp.flags = cpu_to_le32(flags);
4257
4258         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259
4260         err = mgmt_cmd_complete(sk, hdev->id,
4261                                 MGMT_OP_SET_EXP_FEATURE, 0,
4262                                 &rp, sizeof(rp));
4263
4264         if (changed)
4265                 exp_ll_privacy_feature_changed(val, hdev, sk);
4266
4267         return err;
4268 }
4269
4270 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4271                                    struct mgmt_cp_set_exp_feature *cp,
4272                                    u16 data_len)
4273 {
4274         struct mgmt_rp_set_exp_feature rp;
4275         bool val, changed;
4276         int err;
4277
4278         /* Command requires to use a valid controller index */
4279         if (!hdev)
4280                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281                                        MGMT_OP_SET_EXP_FEATURE,
4282                                        MGMT_STATUS_INVALID_INDEX);
4283
4284         /* Parameters are limited to a single octet */
4285         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286                 return mgmt_cmd_status(sk, hdev->id,
4287                                        MGMT_OP_SET_EXP_FEATURE,
4288                                        MGMT_STATUS_INVALID_PARAMS);
4289
4290         /* Only boolean on/off is supported */
4291         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292                 return mgmt_cmd_status(sk, hdev->id,
4293                                        MGMT_OP_SET_EXP_FEATURE,
4294                                        MGMT_STATUS_INVALID_PARAMS);
4295
4296         hci_req_sync_lock(hdev);
4297
4298         val = !!cp->param[0];
4299         changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4300
4301         if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4302                 err = mgmt_cmd_status(sk, hdev->id,
4303                                       MGMT_OP_SET_EXP_FEATURE,
4304                                       MGMT_STATUS_NOT_SUPPORTED);
4305                 goto unlock_quality_report;
4306         }
4307
4308         if (changed) {
4309                 if (hdev->set_quality_report)
4310                         err = hdev->set_quality_report(hdev, val);
4311                 else
4312                         err = aosp_set_quality_report(hdev, val);
4313
4314                 if (err) {
4315                         err = mgmt_cmd_status(sk, hdev->id,
4316                                               MGMT_OP_SET_EXP_FEATURE,
4317                                               MGMT_STATUS_FAILED);
4318                         goto unlock_quality_report;
4319                 }
4320
4321                 if (val)
4322                         hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4323                 else
4324                         hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4325         }
4326
4327         bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4328
4329         memcpy(rp.uuid, quality_report_uuid, 16);
4330         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4331         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4332
4333         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4334                                 &rp, sizeof(rp));
4335
4336         if (changed)
4337                 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4338
4339 unlock_quality_report:
4340         hci_req_sync_unlock(hdev);
4341         return err;
4342 }
4343
4344 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4345                                   struct mgmt_cp_set_exp_feature *cp,
4346                                   u16 data_len)
4347 {
4348         bool val, changed;
4349         int err;
4350         struct mgmt_rp_set_exp_feature rp;
4351
4352         /* Command requires to use a valid controller index */
4353         if (!hdev)
4354                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4355                                        MGMT_OP_SET_EXP_FEATURE,
4356                                        MGMT_STATUS_INVALID_INDEX);
4357
4358         /* Parameters are limited to a single octet */
4359         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4360                 return mgmt_cmd_status(sk, hdev->id,
4361                                        MGMT_OP_SET_EXP_FEATURE,
4362                                        MGMT_STATUS_INVALID_PARAMS);
4363
4364         /* Only boolean on/off is supported */
4365         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4366                 return mgmt_cmd_status(sk, hdev->id,
4367                                        MGMT_OP_SET_EXP_FEATURE,
4368                                        MGMT_STATUS_INVALID_PARAMS);
4369
4370         val = !!cp->param[0];
4371         changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4372
4373         if (!hdev->get_data_path_id) {
4374                 return mgmt_cmd_status(sk, hdev->id,
4375                                        MGMT_OP_SET_EXP_FEATURE,
4376                                        MGMT_STATUS_NOT_SUPPORTED);
4377         }
4378
4379         if (changed) {
4380                 if (val)
4381                         hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4382                 else
4383                         hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4384         }
4385
4386         bt_dev_info(hdev, "offload codecs enable %d changed %d",
4387                     val, changed);
4388
4389         memcpy(rp.uuid, offload_codecs_uuid, 16);
4390         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4391         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4392         err = mgmt_cmd_complete(sk, hdev->id,
4393                                 MGMT_OP_SET_EXP_FEATURE, 0,
4394                                 &rp, sizeof(rp));
4395
4396         if (changed)
4397                 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4398
4399         return err;
4400 }
4401
4402 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4403                                           struct mgmt_cp_set_exp_feature *cp,
4404                                           u16 data_len)
4405 {
4406         bool val, changed;
4407         int err;
4408         struct mgmt_rp_set_exp_feature rp;
4409
4410         /* Command requires to use a valid controller index */
4411         if (!hdev)
4412                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4413                                        MGMT_OP_SET_EXP_FEATURE,
4414                                        MGMT_STATUS_INVALID_INDEX);
4415
4416         /* Parameters are limited to a single octet */
4417         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4418                 return mgmt_cmd_status(sk, hdev->id,
4419                                        MGMT_OP_SET_EXP_FEATURE,
4420                                        MGMT_STATUS_INVALID_PARAMS);
4421
4422         /* Only boolean on/off is supported */
4423         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4424                 return mgmt_cmd_status(sk, hdev->id,
4425                                        MGMT_OP_SET_EXP_FEATURE,
4426                                        MGMT_STATUS_INVALID_PARAMS);
4427
4428         val = !!cp->param[0];
4429         changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4430
4431         if (!hci_dev_le_state_simultaneous(hdev)) {
4432                 return mgmt_cmd_status(sk, hdev->id,
4433                                        MGMT_OP_SET_EXP_FEATURE,
4434                                        MGMT_STATUS_NOT_SUPPORTED);
4435         }
4436
4437         if (changed) {
4438                 if (val)
4439                         hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4440                 else
4441                         hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4442         }
4443
4444         bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4445                     val, changed);
4446
4447         memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4448         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4449         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4450         err = mgmt_cmd_complete(sk, hdev->id,
4451                                 MGMT_OP_SET_EXP_FEATURE, 0,
4452                                 &rp, sizeof(rp));
4453
4454         if (changed)
4455                 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4456
4457         return err;
4458 }
4459
4460 #ifdef CONFIG_BT_LE
4461 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4462                                struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4463 {
4464         struct mgmt_rp_set_exp_feature rp;
4465         bool val, changed = false;
4466         int err;
4467
4468         /* Command requires to use the non-controller index */
4469         if (hdev)
4470                 return mgmt_cmd_status(sk, hdev->id,
4471                                        MGMT_OP_SET_EXP_FEATURE,
4472                                        MGMT_STATUS_INVALID_INDEX);
4473
4474         /* Parameters are limited to a single octet */
4475         if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4476                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4477                                        MGMT_OP_SET_EXP_FEATURE,
4478                                        MGMT_STATUS_INVALID_PARAMS);
4479
4480         /* Only boolean on/off is supported */
4481         if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4482                 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4483                                        MGMT_OP_SET_EXP_FEATURE,
4484                                        MGMT_STATUS_INVALID_PARAMS);
4485
4486         val = cp->param[0] ? true : false;
4487         if (val)
4488                 err = iso_init();
4489         else
4490                 err = iso_exit();
4491
4492         if (!err)
4493                 changed = true;
4494
4495         memcpy(rp.uuid, iso_socket_uuid, 16);
4496         rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4497
4498         hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4499
4500         err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4501                                 MGMT_OP_SET_EXP_FEATURE, 0,
4502                                 &rp, sizeof(rp));
4503
4504         if (changed)
4505                 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4506
4507         return err;
4508 }
4509 #endif
4510
4511 static const struct mgmt_exp_feature {
4512         const u8 *uuid;
4513         int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4514                         struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4515 } exp_features[] = {
4516         EXP_FEAT(ZERO_KEY, set_zero_key_func),
4517 #ifdef CONFIG_BT_FEATURE_DEBUG
4518         EXP_FEAT(debug_uuid, set_debug_func),
4519 #endif
4520         EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4521         EXP_FEAT(quality_report_uuid, set_quality_report_func),
4522         EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4523         EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4524 #ifdef CONFIG_BT_LE
4525         EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4526 #endif
4527
4528         /* end with a null feature */
4529         EXP_FEAT(NULL, NULL)
4530 };
4531
4532 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4533                            void *data, u16 data_len)
4534 {
4535         struct mgmt_cp_set_exp_feature *cp = data;
4536         size_t i = 0;
4537
4538         bt_dev_dbg(hdev, "sock %p", sk);
4539
4540         for (i = 0; exp_features[i].uuid; i++) {
4541                 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4542                         return exp_features[i].set_func(sk, hdev, cp, data_len);
4543         }
4544
4545         return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4546                                MGMT_OP_SET_EXP_FEATURE,
4547                                MGMT_STATUS_NOT_SUPPORTED);
4548 }
4549
4550 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4551                             u16 data_len)
4552 {
4553         struct mgmt_cp_get_device_flags *cp = data;
4554         struct mgmt_rp_get_device_flags rp;
4555         struct bdaddr_list_with_flags *br_params;
4556         struct hci_conn_params *params;
4557         u32 supported_flags;
4558         u32 current_flags = 0;
4559         u8 status = MGMT_STATUS_INVALID_PARAMS;
4560
4561         bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4562                    &cp->addr.bdaddr, cp->addr.type);
4563
4564         hci_dev_lock(hdev);
4565
4566         supported_flags = hdev->conn_flags;
4567
4568         memset(&rp, 0, sizeof(rp));
4569
4570         if (cp->addr.type == BDADDR_BREDR) {
4571                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4572                                                               &cp->addr.bdaddr,
4573                                                               cp->addr.type);
4574                 if (!br_params)
4575                         goto done;
4576
4577                 current_flags = br_params->flags;
4578         } else {
4579                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4580                                                 le_addr_type(cp->addr.type));
4581
4582                 if (!params)
4583                         goto done;
4584
4585                 current_flags = params->flags;
4586         }
4587
4588         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4589         rp.addr.type = cp->addr.type;
4590         rp.supported_flags = cpu_to_le32(supported_flags);
4591         rp.current_flags = cpu_to_le32(current_flags);
4592
4593         status = MGMT_STATUS_SUCCESS;
4594
4595 done:
4596         hci_dev_unlock(hdev);
4597
4598         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4599                                 &rp, sizeof(rp));
4600 }
4601
4602 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4603                                  bdaddr_t *bdaddr, u8 bdaddr_type,
4604                                  u32 supported_flags, u32 current_flags)
4605 {
4606         struct mgmt_ev_device_flags_changed ev;
4607
4608         bacpy(&ev.addr.bdaddr, bdaddr);
4609         ev.addr.type = bdaddr_type;
4610         ev.supported_flags = cpu_to_le32(supported_flags);
4611         ev.current_flags = cpu_to_le32(current_flags);
4612
4613         mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4614 }
4615
4616 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4617                             u16 len)
4618 {
4619         struct mgmt_cp_set_device_flags *cp = data;
4620         struct bdaddr_list_with_flags *br_params;
4621         struct hci_conn_params *params;
4622         u8 status = MGMT_STATUS_INVALID_PARAMS;
4623         u32 supported_flags;
4624         u32 current_flags = __le32_to_cpu(cp->current_flags);
4625
4626         bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4627                    &cp->addr.bdaddr, cp->addr.type,
4628                    __le32_to_cpu(current_flags));
4629
4630         // We should take hci_dev_lock() early, I think.. conn_flags can change
4631         supported_flags = hdev->conn_flags;
4632
4633         if ((supported_flags | current_flags) != supported_flags) {
4634                 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4635                             current_flags, supported_flags);
4636                 goto done;
4637         }
4638
4639         hci_dev_lock(hdev);
4640
4641         if (cp->addr.type == BDADDR_BREDR) {
4642                 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4643                                                               &cp->addr.bdaddr,
4644                                                               cp->addr.type);
4645
4646                 if (br_params) {
4647                         br_params->flags = current_flags;
4648                         status = MGMT_STATUS_SUCCESS;
4649                 } else {
4650                         bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4651                                     &cp->addr.bdaddr, cp->addr.type);
4652                 }
4653         } else {
4654                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4655                                                 le_addr_type(cp->addr.type));
4656                 if (params) {
4657                         /* Devices using RPAs can only be programmed in the
4658                          * acceptlist LL Privacy has been enable otherwise they
4659                          * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4660                          */
4661                         if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
4662                             !use_ll_privacy(hdev) &&
4663                             hci_find_irk_by_addr(hdev, &params->addr,
4664                                                  params->addr_type)) {
4665                                 bt_dev_warn(hdev,
4666                                             "Cannot set wakeable for RPA");
4667                                 goto unlock;
4668                         }
4669
4670                         params->flags = current_flags;
4671                         status = MGMT_STATUS_SUCCESS;
4672
4673                         /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4674                          * has been set.
4675                          */
4676                         if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4677                                 hci_update_passive_scan(hdev);
4678                 } else {
4679                         bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4680                                     &cp->addr.bdaddr,
4681                                     le_addr_type(cp->addr.type));
4682                 }
4683         }
4684
4685 unlock:
4686         hci_dev_unlock(hdev);
4687
4688 done:
4689         if (status == MGMT_STATUS_SUCCESS)
4690                 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4691                                      supported_flags, current_flags);
4692
4693         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4694                                  &cp->addr, sizeof(cp->addr));
4695 }
4696
4697 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4698                                    u16 handle)
4699 {
4700         struct mgmt_ev_adv_monitor_added ev;
4701
4702         ev.monitor_handle = cpu_to_le16(handle);
4703
4704         mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4705 }
4706
4707 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4708 {
4709         struct mgmt_ev_adv_monitor_removed ev;
4710         struct mgmt_pending_cmd *cmd;
4711         struct sock *sk_skip = NULL;
4712         struct mgmt_cp_remove_adv_monitor *cp;
4713
4714         cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4715         if (cmd) {
4716                 cp = cmd->param;
4717
4718                 if (cp->monitor_handle)
4719                         sk_skip = cmd->sk;
4720         }
4721
4722         ev.monitor_handle = cpu_to_le16(handle);
4723
4724         mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4725 }
4726
4727 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4728                                  void *data, u16 len)
4729 {
4730         struct adv_monitor *monitor = NULL;
4731         struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4732         int handle, err;
4733         size_t rp_size = 0;
4734         __u32 supported = 0;
4735         __u32 enabled = 0;
4736         __u16 num_handles = 0;
4737         __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4738
4739         BT_DBG("request for %s", hdev->name);
4740
4741         hci_dev_lock(hdev);
4742
4743         if (msft_monitor_supported(hdev))
4744                 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4745
4746         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4747                 handles[num_handles++] = monitor->handle;
4748
4749         hci_dev_unlock(hdev);
4750
4751         rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4752         rp = kmalloc(rp_size, GFP_KERNEL);
4753         if (!rp)
4754                 return -ENOMEM;
4755
4756         /* All supported features are currently enabled */
4757         enabled = supported;
4758
4759         rp->supported_features = cpu_to_le32(supported);
4760         rp->enabled_features = cpu_to_le32(enabled);
4761         rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4762         rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4763         rp->num_handles = cpu_to_le16(num_handles);
4764         if (num_handles)
4765                 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4766
4767         err = mgmt_cmd_complete(sk, hdev->id,
4768                                 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4769                                 MGMT_STATUS_SUCCESS, rp, rp_size);
4770
4771         kfree(rp);
4772
4773         return err;
4774 }
4775
4776 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
4777                                                    void *data, int status)
4778 {
4779         struct mgmt_rp_add_adv_patterns_monitor rp;
4780         struct mgmt_pending_cmd *cmd = data;
4781         struct adv_monitor *monitor = cmd->user_data;
4782
4783         hci_dev_lock(hdev);
4784
4785         rp.monitor_handle = cpu_to_le16(monitor->handle);
4786
4787         if (!status) {
4788                 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4789                 hdev->adv_monitors_cnt++;
4790                 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4791                         monitor->state = ADV_MONITOR_STATE_REGISTERED;
4792                 hci_update_passive_scan(hdev);
4793         }
4794
4795         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4796                           mgmt_status(status), &rp, sizeof(rp));
4797         mgmt_pending_remove(cmd);
4798
4799         hci_dev_unlock(hdev);
4800         bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4801                    rp.monitor_handle, status);
4802 }
4803
4804 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
4805 {
4806         struct mgmt_pending_cmd *cmd = data;
4807         struct adv_monitor *monitor = cmd->user_data;
4808
4809         return hci_add_adv_monitor(hdev, monitor);
4810 }
4811
4812 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4813                                       struct adv_monitor *m, u8 status,
4814                                       void *data, u16 len, u16 op)
4815 {
4816         struct mgmt_pending_cmd *cmd;
4817         int err;
4818
4819         hci_dev_lock(hdev);
4820
4821         if (status)
4822                 goto unlock;
4823
4824         if (pending_find(MGMT_OP_SET_LE, hdev) ||
4825             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4826             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4827             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4828                 status = MGMT_STATUS_BUSY;
4829                 goto unlock;
4830         }
4831
4832         cmd = mgmt_pending_add(sk, op, hdev, data, len);
4833         if (!cmd) {
4834                 status = MGMT_STATUS_NO_RESOURCES;
4835                 goto unlock;
4836         }
4837
4838         cmd->user_data = m;
4839         err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
4840                                  mgmt_add_adv_patterns_monitor_complete);
4841         if (err) {
4842                 if (err == -ENOMEM)
4843                         status = MGMT_STATUS_NO_RESOURCES;
4844                 else
4845                         status = MGMT_STATUS_FAILED;
4846
4847                 goto unlock;
4848         }
4849
4850         hci_dev_unlock(hdev);
4851
4852         return 0;
4853
4854 unlock:
4855         hci_free_adv_monitor(hdev, m);
4856         hci_dev_unlock(hdev);
4857         return mgmt_cmd_status(sk, hdev->id, op, status);
4858 }
4859
4860 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4861                                    struct mgmt_adv_rssi_thresholds *rssi)
4862 {
4863         if (rssi) {
4864                 m->rssi.low_threshold = rssi->low_threshold;
4865                 m->rssi.low_threshold_timeout =
4866                     __le16_to_cpu(rssi->low_threshold_timeout);
4867                 m->rssi.high_threshold = rssi->high_threshold;
4868                 m->rssi.high_threshold_timeout =
4869                     __le16_to_cpu(rssi->high_threshold_timeout);
4870                 m->rssi.sampling_period = rssi->sampling_period;
4871         } else {
4872                 /* Default values. These numbers are the least constricting
4873                  * parameters for MSFT API to work, so it behaves as if there
4874                  * are no rssi parameter to consider. May need to be changed
4875                  * if other API are to be supported.
4876                  */
4877                 m->rssi.low_threshold = -127;
4878                 m->rssi.low_threshold_timeout = 60;
4879                 m->rssi.high_threshold = -127;
4880                 m->rssi.high_threshold_timeout = 0;
4881                 m->rssi.sampling_period = 0;
4882         }
4883 }
4884
4885 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4886                                     struct mgmt_adv_pattern *patterns)
4887 {
4888         u8 offset = 0, length = 0;
4889         struct adv_pattern *p = NULL;
4890         int i;
4891
4892         for (i = 0; i < pattern_count; i++) {
4893                 offset = patterns[i].offset;
4894                 length = patterns[i].length;
4895                 if (offset >= HCI_MAX_AD_LENGTH ||
4896                     length > HCI_MAX_AD_LENGTH ||
4897                     (offset + length) > HCI_MAX_AD_LENGTH)
4898                         return MGMT_STATUS_INVALID_PARAMS;
4899
4900                 p = kmalloc(sizeof(*p), GFP_KERNEL);
4901                 if (!p)
4902                         return MGMT_STATUS_NO_RESOURCES;
4903
4904                 p->ad_type = patterns[i].ad_type;
4905                 p->offset = patterns[i].offset;
4906                 p->length = patterns[i].length;
4907                 memcpy(p->value, patterns[i].value, p->length);
4908
4909                 INIT_LIST_HEAD(&p->list);
4910                 list_add(&p->list, &m->patterns);
4911         }
4912
4913         return MGMT_STATUS_SUCCESS;
4914 }
4915
4916 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4917                                     void *data, u16 len)
4918 {
4919         struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4920         struct adv_monitor *m = NULL;
4921         u8 status = MGMT_STATUS_SUCCESS;
4922         size_t expected_size = sizeof(*cp);
4923
4924         BT_DBG("request for %s", hdev->name);
4925
4926         if (len <= sizeof(*cp)) {
4927                 status = MGMT_STATUS_INVALID_PARAMS;
4928                 goto done;
4929         }
4930
4931         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4932         if (len != expected_size) {
4933                 status = MGMT_STATUS_INVALID_PARAMS;
4934                 goto done;
4935         }
4936
4937         m = kzalloc(sizeof(*m), GFP_KERNEL);
4938         if (!m) {
4939                 status = MGMT_STATUS_NO_RESOURCES;
4940                 goto done;
4941         }
4942
4943         INIT_LIST_HEAD(&m->patterns);
4944
4945         parse_adv_monitor_rssi(m, NULL);
4946         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4947
4948 done:
4949         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4950                                           MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4951 }
4952
4953 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4954                                          void *data, u16 len)
4955 {
4956         struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4957         struct adv_monitor *m = NULL;
4958         u8 status = MGMT_STATUS_SUCCESS;
4959         size_t expected_size = sizeof(*cp);
4960
4961         BT_DBG("request for %s", hdev->name);
4962
4963         if (len <= sizeof(*cp)) {
4964                 status = MGMT_STATUS_INVALID_PARAMS;
4965                 goto done;
4966         }
4967
4968         expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4969         if (len != expected_size) {
4970                 status = MGMT_STATUS_INVALID_PARAMS;
4971                 goto done;
4972         }
4973
4974         m = kzalloc(sizeof(*m), GFP_KERNEL);
4975         if (!m) {
4976                 status = MGMT_STATUS_NO_RESOURCES;
4977                 goto done;
4978         }
4979
4980         INIT_LIST_HEAD(&m->patterns);
4981
4982         parse_adv_monitor_rssi(m, &cp->rssi);
4983         status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4984
4985 done:
4986         return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4987                                          MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4988 }
4989
4990 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
4991                                              void *data, int status)
4992 {
4993         struct mgmt_rp_remove_adv_monitor rp;
4994         struct mgmt_pending_cmd *cmd = data;
4995         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
4996
4997         hci_dev_lock(hdev);
4998
4999         rp.monitor_handle = cp->monitor_handle;
5000
5001         if (!status)
5002                 hci_update_passive_scan(hdev);
5003
5004         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5005                           mgmt_status(status), &rp, sizeof(rp));
5006         mgmt_pending_remove(cmd);
5007
5008         hci_dev_unlock(hdev);
5009         bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5010                    rp.monitor_handle, status);
5011 }
5012
5013 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5014 {
5015         struct mgmt_pending_cmd *cmd = data;
5016         struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5017         u16 handle = __le16_to_cpu(cp->monitor_handle);
5018
5019         if (!handle)
5020                 return hci_remove_all_adv_monitor(hdev);
5021
5022         return hci_remove_single_adv_monitor(hdev, handle);
5023 }
5024
5025 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5026                               void *data, u16 len)
5027 {
5028         struct mgmt_pending_cmd *cmd;
5029         int err, status;
5030
5031         hci_dev_lock(hdev);
5032
5033         if (pending_find(MGMT_OP_SET_LE, hdev) ||
5034             pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5035             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5036             pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5037                 status = MGMT_STATUS_BUSY;
5038                 goto unlock;
5039         }
5040
5041         cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5042         if (!cmd) {
5043                 status = MGMT_STATUS_NO_RESOURCES;
5044                 goto unlock;
5045         }
5046
5047         err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5048                                  mgmt_remove_adv_monitor_complete);
5049
5050         if (err) {
5051                 mgmt_pending_remove(cmd);
5052
5053                 if (err == -ENOMEM)
5054                         status = MGMT_STATUS_NO_RESOURCES;
5055                 else
5056                         status = MGMT_STATUS_FAILED;
5057
5058                 mgmt_pending_remove(cmd);
5059                 goto unlock;
5060         }
5061
5062         hci_dev_unlock(hdev);
5063
5064         return 0;
5065
5066 unlock:
5067         hci_dev_unlock(hdev);
5068         return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5069                                status);
5070 }
5071
5072 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5073 {
5074         struct mgmt_rp_read_local_oob_data mgmt_rp;
5075         size_t rp_size = sizeof(mgmt_rp);
5076         struct mgmt_pending_cmd *cmd = data;
5077         struct sk_buff *skb = cmd->skb;
5078         u8 status = mgmt_status(err);
5079
5080         if (!status) {
5081                 if (!skb)
5082                         status = MGMT_STATUS_FAILED;
5083                 else if (IS_ERR(skb))
5084                         status = mgmt_status(PTR_ERR(skb));
5085                 else
5086                         status = mgmt_status(skb->data[0]);
5087         }
5088
5089         bt_dev_dbg(hdev, "status %d", status);
5090
5091         if (status) {
5092                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5093                 goto remove;
5094         }
5095
5096         memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5097
5098         if (!bredr_sc_enabled(hdev)) {
5099                 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5100
5101                 if (skb->len < sizeof(*rp)) {
5102                         mgmt_cmd_status(cmd->sk, hdev->id,
5103                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5104                                         MGMT_STATUS_FAILED);
5105                         goto remove;
5106                 }
5107
5108                 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5109                 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5110
5111                 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5112         } else {
5113                 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5114
5115                 if (skb->len < sizeof(*rp)) {
5116                         mgmt_cmd_status(cmd->sk, hdev->id,
5117                                         MGMT_OP_READ_LOCAL_OOB_DATA,
5118                                         MGMT_STATUS_FAILED);
5119                         goto remove;
5120                 }
5121
5122                 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5123                 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5124
5125                 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5126                 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5127         }
5128
5129         mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5130                           MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5131
5132 remove:
5133         if (skb && !IS_ERR(skb))
5134                 kfree_skb(skb);
5135
5136         mgmt_pending_free(cmd);
5137 }
5138
5139 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5140 {
5141         struct mgmt_pending_cmd *cmd = data;
5142
5143         if (bredr_sc_enabled(hdev))
5144                 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5145         else
5146                 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5147
5148         if (IS_ERR(cmd->skb))
5149                 return PTR_ERR(cmd->skb);
5150         else
5151                 return 0;
5152 }
5153
5154 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5155                                void *data, u16 data_len)
5156 {
5157         struct mgmt_pending_cmd *cmd;
5158         int err;
5159
5160         bt_dev_dbg(hdev, "sock %p", sk);
5161
5162         hci_dev_lock(hdev);
5163
5164         if (!hdev_is_powered(hdev)) {
5165                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5166                                       MGMT_STATUS_NOT_POWERED);
5167                 goto unlock;
5168         }
5169
5170         if (!lmp_ssp_capable(hdev)) {
5171                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5172                                       MGMT_STATUS_NOT_SUPPORTED);
5173                 goto unlock;
5174         }
5175
5176         cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5177         if (!cmd)
5178                 err = -ENOMEM;
5179         else
5180                 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5181                                          read_local_oob_data_complete);
5182
5183         if (err < 0) {
5184                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5185                                       MGMT_STATUS_FAILED);
5186
5187                 if (cmd)
5188                         mgmt_pending_free(cmd);
5189         }
5190
5191 unlock:
5192         hci_dev_unlock(hdev);
5193         return err;
5194 }
5195
5196 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5197                                void *data, u16 len)
5198 {
5199         struct mgmt_addr_info *addr = data;
5200         int err;
5201
5202         bt_dev_dbg(hdev, "sock %p", sk);
5203
5204         if (!bdaddr_type_is_valid(addr->type))
5205                 return mgmt_cmd_complete(sk, hdev->id,
5206                                          MGMT_OP_ADD_REMOTE_OOB_DATA,
5207                                          MGMT_STATUS_INVALID_PARAMS,
5208                                          addr, sizeof(*addr));
5209
5210         hci_dev_lock(hdev);
5211
5212         if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5213                 struct mgmt_cp_add_remote_oob_data *cp = data;
5214                 u8 status;
5215
5216                 if (cp->addr.type != BDADDR_BREDR) {
5217                         err = mgmt_cmd_complete(sk, hdev->id,
5218                                                 MGMT_OP_ADD_REMOTE_OOB_DATA,
5219                                                 MGMT_STATUS_INVALID_PARAMS,
5220                                                 &cp->addr, sizeof(cp->addr));
5221                         goto unlock;
5222                 }
5223
5224                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5225                                               cp->addr.type, cp->hash,
5226                                               cp->rand, NULL, NULL);
5227                 if (err < 0)
5228                         status = MGMT_STATUS_FAILED;
5229                 else
5230                         status = MGMT_STATUS_SUCCESS;
5231
5232                 err = mgmt_cmd_complete(sk, hdev->id,
5233                                         MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5234                                         &cp->addr, sizeof(cp->addr));
5235         } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5236                 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5237                 u8 *rand192, *hash192, *rand256, *hash256;
5238                 u8 status;
5239
5240                 if (bdaddr_type_is_le(cp->addr.type)) {
5241                         /* Enforce zero-valued 192-bit parameters as
5242                          * long as legacy SMP OOB isn't implemented.
5243                          */
5244                         if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5245                             memcmp(cp->hash192, ZERO_KEY, 16)) {
5246                                 err = mgmt_cmd_complete(sk, hdev->id,
5247                                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5248                                                         MGMT_STATUS_INVALID_PARAMS,
5249                                                         addr, sizeof(*addr));
5250                                 goto unlock;
5251                         }
5252
5253                         rand192 = NULL;
5254                         hash192 = NULL;
5255                 } else {
5256                         /* In case one of the P-192 values is set to zero,
5257                          * then just disable OOB data for P-192.
5258                          */
5259                         if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5260                             !memcmp(cp->hash192, ZERO_KEY, 16)) {
5261                                 rand192 = NULL;
5262                                 hash192 = NULL;
5263                         } else {
5264                                 rand192 = cp->rand192;
5265                                 hash192 = cp->hash192;
5266                         }
5267                 }
5268
5269                 /* In case one of the P-256 values is set to zero, then just
5270                  * disable OOB data for P-256.
5271                  */
5272                 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5273                     !memcmp(cp->hash256, ZERO_KEY, 16)) {
5274                         rand256 = NULL;
5275                         hash256 = NULL;
5276                 } else {
5277                         rand256 = cp->rand256;
5278                         hash256 = cp->hash256;
5279                 }
5280
5281                 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5282                                               cp->addr.type, hash192, rand192,
5283                                               hash256, rand256);
5284                 if (err < 0)
5285                         status = MGMT_STATUS_FAILED;
5286                 else
5287                         status = MGMT_STATUS_SUCCESS;
5288
5289                 err = mgmt_cmd_complete(sk, hdev->id,
5290                                         MGMT_OP_ADD_REMOTE_OOB_DATA,
5291                                         status, &cp->addr, sizeof(cp->addr));
5292         } else {
5293                 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5294                            len);
5295                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5296                                       MGMT_STATUS_INVALID_PARAMS);
5297         }
5298
5299 unlock:
5300         hci_dev_unlock(hdev);
5301         return err;
5302 }
5303
5304 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5305                                   void *data, u16 len)
5306 {
5307         struct mgmt_cp_remove_remote_oob_data *cp = data;
5308         u8 status;
5309         int err;
5310
5311         bt_dev_dbg(hdev, "sock %p", sk);
5312
5313         if (cp->addr.type != BDADDR_BREDR)
5314                 return mgmt_cmd_complete(sk, hdev->id,
5315                                          MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5316                                          MGMT_STATUS_INVALID_PARAMS,
5317                                          &cp->addr, sizeof(cp->addr));
5318
5319         hci_dev_lock(hdev);
5320
5321         if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5322                 hci_remote_oob_data_clear(hdev);
5323                 status = MGMT_STATUS_SUCCESS;
5324                 goto done;
5325         }
5326
5327         err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5328         if (err < 0)
5329                 status = MGMT_STATUS_INVALID_PARAMS;
5330         else
5331                 status = MGMT_STATUS_SUCCESS;
5332
5333 done:
5334         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5335                                 status, &cp->addr, sizeof(cp->addr));
5336
5337         hci_dev_unlock(hdev);
5338         return err;
5339 }
5340
5341 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5342 {
5343         struct mgmt_pending_cmd *cmd;
5344
5345         bt_dev_dbg(hdev, "status %u", status);
5346
5347         hci_dev_lock(hdev);
5348
5349         cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5350         if (!cmd)
5351                 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5352
5353         if (!cmd)
5354                 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5355
5356         if (cmd) {
5357                 cmd->cmd_complete(cmd, mgmt_status(status));
5358                 mgmt_pending_remove(cmd);
5359         }
5360
5361         hci_dev_unlock(hdev);
5362 }
5363
5364 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5365                                     uint8_t *mgmt_status)
5366 {
5367         switch (type) {
5368         case DISCOV_TYPE_LE:
5369                 *mgmt_status = mgmt_le_support(hdev);
5370                 if (*mgmt_status)
5371                         return false;
5372                 break;
5373         case DISCOV_TYPE_INTERLEAVED:
5374                 *mgmt_status = mgmt_le_support(hdev);
5375                 if (*mgmt_status)
5376                         return false;
5377                 fallthrough;
5378         case DISCOV_TYPE_BREDR:
5379                 *mgmt_status = mgmt_bredr_support(hdev);
5380                 if (*mgmt_status)
5381                         return false;
5382                 break;
5383         default:
5384                 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5385                 return false;
5386         }
5387
5388         return true;
5389 }
5390
5391 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5392 {
5393         struct mgmt_pending_cmd *cmd = data;
5394
5395         if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5396             cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5397             cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5398                 return;
5399
5400         bt_dev_dbg(hdev, "err %d", err);
5401
5402         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5403                           cmd->param, 1);
5404         mgmt_pending_remove(cmd);
5405
5406         hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5407                                 DISCOVERY_FINDING);
5408 }
5409
5410 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5411 {
5412         return hci_start_discovery_sync(hdev);
5413 }
5414
5415 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5416                                     u16 op, void *data, u16 len)
5417 {
5418         struct mgmt_cp_start_discovery *cp = data;
5419         struct mgmt_pending_cmd *cmd;
5420         u8 status;
5421         int err;
5422
5423         bt_dev_dbg(hdev, "sock %p", sk);
5424
5425         hci_dev_lock(hdev);
5426
5427         if (!hdev_is_powered(hdev)) {
5428                 err = mgmt_cmd_complete(sk, hdev->id, op,
5429                                         MGMT_STATUS_NOT_POWERED,
5430                                         &cp->type, sizeof(cp->type));
5431                 goto failed;
5432         }
5433
5434         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5435             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5436                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5437                                         &cp->type, sizeof(cp->type));
5438                 goto failed;
5439         }
5440
5441         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5442                 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5443                                         &cp->type, sizeof(cp->type));
5444                 goto failed;
5445         }
5446
5447         /* Can't start discovery when it is paused */
5448         if (hdev->discovery_paused) {
5449                 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5450                                         &cp->type, sizeof(cp->type));
5451                 goto failed;
5452         }
5453
5454         /* Clear the discovery filter first to free any previously
5455          * allocated memory for the UUID list.
5456          */
5457         hci_discovery_filter_clear(hdev);
5458
5459         hdev->discovery.type = cp->type;
5460         hdev->discovery.report_invalid_rssi = false;
5461         if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5462                 hdev->discovery.limited = true;
5463         else
5464                 hdev->discovery.limited = false;
5465
5466         cmd = mgmt_pending_add(sk, op, hdev, data, len);
5467         if (!cmd) {
5468                 err = -ENOMEM;
5469                 goto failed;
5470         }
5471
5472         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5473                                  start_discovery_complete);
5474         if (err < 0) {
5475                 mgmt_pending_remove(cmd);
5476                 goto failed;
5477         }
5478
5479         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5480
5481 failed:
5482         hci_dev_unlock(hdev);
5483         return err;
5484 }
5485
5486 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5487                            void *data, u16 len)
5488 {
5489         return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5490                                         data, len);
5491 }
5492
5493 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5494                                    void *data, u16 len)
5495 {
5496         return start_discovery_internal(sk, hdev,
5497                                         MGMT_OP_START_LIMITED_DISCOVERY,
5498                                         data, len);
5499 }
5500
5501 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5502                                    void *data, u16 len)
5503 {
5504         struct mgmt_cp_start_service_discovery *cp = data;
5505         struct mgmt_pending_cmd *cmd;
5506         const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5507         u16 uuid_count, expected_len;
5508         u8 status;
5509         int err;
5510
5511         bt_dev_dbg(hdev, "sock %p", sk);
5512
5513         hci_dev_lock(hdev);
5514
5515         if (!hdev_is_powered(hdev)) {
5516                 err = mgmt_cmd_complete(sk, hdev->id,
5517                                         MGMT_OP_START_SERVICE_DISCOVERY,
5518                                         MGMT_STATUS_NOT_POWERED,
5519                                         &cp->type, sizeof(cp->type));
5520                 goto failed;
5521         }
5522
5523         if (hdev->discovery.state != DISCOVERY_STOPPED ||
5524             hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5525                 err = mgmt_cmd_complete(sk, hdev->id,
5526                                         MGMT_OP_START_SERVICE_DISCOVERY,
5527                                         MGMT_STATUS_BUSY, &cp->type,
5528                                         sizeof(cp->type));
5529                 goto failed;
5530         }
5531
5532         if (hdev->discovery_paused) {
5533                 err = mgmt_cmd_complete(sk, hdev->id,
5534                                         MGMT_OP_START_SERVICE_DISCOVERY,
5535                                         MGMT_STATUS_BUSY, &cp->type,
5536                                         sizeof(cp->type));
5537                 goto failed;
5538         }
5539
5540         uuid_count = __le16_to_cpu(cp->uuid_count);
5541         if (uuid_count > max_uuid_count) {
5542                 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5543                            uuid_count);
5544                 err = mgmt_cmd_complete(sk, hdev->id,
5545                                         MGMT_OP_START_SERVICE_DISCOVERY,
5546                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
5547                                         sizeof(cp->type));
5548                 goto failed;
5549         }
5550
5551         expected_len = sizeof(*cp) + uuid_count * 16;
5552         if (expected_len != len) {
5553                 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5554                            expected_len, len);
5555                 err = mgmt_cmd_complete(sk, hdev->id,
5556                                         MGMT_OP_START_SERVICE_DISCOVERY,
5557                                         MGMT_STATUS_INVALID_PARAMS, &cp->type,
5558                                         sizeof(cp->type));
5559                 goto failed;
5560         }
5561
5562         if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5563                 err = mgmt_cmd_complete(sk, hdev->id,
5564                                         MGMT_OP_START_SERVICE_DISCOVERY,
5565                                         status, &cp->type, sizeof(cp->type));
5566                 goto failed;
5567         }
5568
5569         cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5570                                hdev, data, len);
5571         if (!cmd) {
5572                 err = -ENOMEM;
5573                 goto failed;
5574         }
5575
5576         /* Clear the discovery filter first to free any previously
5577          * allocated memory for the UUID list.
5578          */
5579         hci_discovery_filter_clear(hdev);
5580
5581         hdev->discovery.result_filtering = true;
5582         hdev->discovery.type = cp->type;
5583         hdev->discovery.rssi = cp->rssi;
5584         hdev->discovery.uuid_count = uuid_count;
5585
5586         if (uuid_count > 0) {
5587                 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5588                                                 GFP_KERNEL);
5589                 if (!hdev->discovery.uuids) {
5590                         err = mgmt_cmd_complete(sk, hdev->id,
5591                                                 MGMT_OP_START_SERVICE_DISCOVERY,
5592                                                 MGMT_STATUS_FAILED,
5593                                                 &cp->type, sizeof(cp->type));
5594                         mgmt_pending_remove(cmd);
5595                         goto failed;
5596                 }
5597         }
5598
5599         err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5600                                  start_discovery_complete);
5601         if (err < 0) {
5602                 mgmt_pending_remove(cmd);
5603                 goto failed;
5604         }
5605
5606         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5607
5608 failed:
5609         hci_dev_unlock(hdev);
5610         return err;
5611 }
5612
5613 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5614 {
5615         struct mgmt_pending_cmd *cmd;
5616
5617         bt_dev_dbg(hdev, "status %u", status);
5618
5619         hci_dev_lock(hdev);
5620
5621         cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5622         if (cmd) {
5623                 cmd->cmd_complete(cmd, mgmt_status(status));
5624                 mgmt_pending_remove(cmd);
5625         }
5626
5627         hci_dev_unlock(hdev);
5628 }
5629
5630 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5631 {
5632         struct mgmt_pending_cmd *cmd = data;
5633
5634         if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5635                 return;
5636
5637         bt_dev_dbg(hdev, "err %d", err);
5638
5639         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5640                           cmd->param, 1);
5641         mgmt_pending_remove(cmd);
5642
5643         if (!err)
5644                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5645 }
5646
5647 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5648 {
5649         return hci_stop_discovery_sync(hdev);
5650 }
5651
5652 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5653                           u16 len)
5654 {
5655         struct mgmt_cp_stop_discovery *mgmt_cp = data;
5656         struct mgmt_pending_cmd *cmd;
5657         int err;
5658
5659         bt_dev_dbg(hdev, "sock %p", sk);
5660
5661         hci_dev_lock(hdev);
5662
5663         if (!hci_discovery_active(hdev)) {
5664                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5665                                         MGMT_STATUS_REJECTED, &mgmt_cp->type,
5666                                         sizeof(mgmt_cp->type));
5667                 goto unlock;
5668         }
5669
5670         if (hdev->discovery.type != mgmt_cp->type) {
5671                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5672                                         MGMT_STATUS_INVALID_PARAMS,
5673                                         &mgmt_cp->type, sizeof(mgmt_cp->type));
5674                 goto unlock;
5675         }
5676
5677         cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5678         if (!cmd) {
5679                 err = -ENOMEM;
5680                 goto unlock;
5681         }
5682
5683         err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5684                                  stop_discovery_complete);
5685         if (err < 0) {
5686                 mgmt_pending_remove(cmd);
5687                 goto unlock;
5688         }
5689
5690         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5691
5692 unlock:
5693         hci_dev_unlock(hdev);
5694         return err;
5695 }
5696
5697 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5698                         u16 len)
5699 {
5700         struct mgmt_cp_confirm_name *cp = data;
5701         struct inquiry_entry *e;
5702         int err;
5703
5704         bt_dev_dbg(hdev, "sock %p", sk);
5705
5706         hci_dev_lock(hdev);
5707
5708         if (!hci_discovery_active(hdev)) {
5709                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5710                                         MGMT_STATUS_FAILED, &cp->addr,
5711                                         sizeof(cp->addr));
5712                 goto failed;
5713         }
5714
5715         e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5716         if (!e) {
5717                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5718                                         MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5719                                         sizeof(cp->addr));
5720                 goto failed;
5721         }
5722
5723         if (cp->name_known) {
5724                 e->name_state = NAME_KNOWN;
5725                 list_del(&e->list);
5726         } else {
5727                 e->name_state = NAME_NEEDED;
5728                 hci_inquiry_cache_update_resolve(hdev, e);
5729         }
5730
5731         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5732                                 &cp->addr, sizeof(cp->addr));
5733
5734 failed:
5735         hci_dev_unlock(hdev);
5736         return err;
5737 }
5738
5739 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5740                         u16 len)
5741 {
5742         struct mgmt_cp_block_device *cp = data;
5743         u8 status;
5744         int err;
5745
5746         bt_dev_dbg(hdev, "sock %p", sk);
5747
5748         if (!bdaddr_type_is_valid(cp->addr.type))
5749                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5750                                          MGMT_STATUS_INVALID_PARAMS,
5751                                          &cp->addr, sizeof(cp->addr));
5752
5753         hci_dev_lock(hdev);
5754
5755         err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5756                                   cp->addr.type);
5757         if (err < 0) {
5758                 status = MGMT_STATUS_FAILED;
5759                 goto done;
5760         }
5761
5762         mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5763                    sk);
5764         status = MGMT_STATUS_SUCCESS;
5765
5766 done:
5767         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5768                                 &cp->addr, sizeof(cp->addr));
5769
5770         hci_dev_unlock(hdev);
5771
5772         return err;
5773 }
5774
5775 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5776                           u16 len)
5777 {
5778         struct mgmt_cp_unblock_device *cp = data;
5779         u8 status;
5780         int err;
5781
5782         bt_dev_dbg(hdev, "sock %p", sk);
5783
5784         if (!bdaddr_type_is_valid(cp->addr.type))
5785                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5786                                          MGMT_STATUS_INVALID_PARAMS,
5787                                          &cp->addr, sizeof(cp->addr));
5788
5789         hci_dev_lock(hdev);
5790
5791         err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5792                                   cp->addr.type);
5793         if (err < 0) {
5794                 status = MGMT_STATUS_INVALID_PARAMS;
5795                 goto done;
5796         }
5797
5798         mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5799                    sk);
5800         status = MGMT_STATUS_SUCCESS;
5801
5802 done:
5803         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5804                                 &cp->addr, sizeof(cp->addr));
5805
5806         hci_dev_unlock(hdev);
5807
5808         return err;
5809 }
5810
5811 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5812 {
5813         return hci_update_eir_sync(hdev);
5814 }
5815
5816 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5817                          u16 len)
5818 {
5819         struct mgmt_cp_set_device_id *cp = data;
5820         int err;
5821         __u16 source;
5822
5823         bt_dev_dbg(hdev, "sock %p", sk);
5824
5825         source = __le16_to_cpu(cp->source);
5826
5827         if (source > 0x0002)
5828                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5829                                        MGMT_STATUS_INVALID_PARAMS);
5830
5831         hci_dev_lock(hdev);
5832
5833         hdev->devid_source = source;
5834         hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5835         hdev->devid_product = __le16_to_cpu(cp->product);
5836         hdev->devid_version = __le16_to_cpu(cp->version);
5837
5838         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5839                                 NULL, 0);
5840
5841         hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5842
5843         hci_dev_unlock(hdev);
5844
5845         return err;
5846 }
5847
5848 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5849 {
5850         if (err)
5851                 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5852         else
5853                 bt_dev_dbg(hdev, "status %d", err);
5854 }
5855
5856 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5857 {
5858         struct cmd_lookup match = { NULL, hdev };
5859         u8 instance;
5860         struct adv_info *adv_instance;
5861         u8 status = mgmt_status(err);
5862
5863         if (status) {
5864                 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5865                                      cmd_status_rsp, &status);
5866                 return;
5867         }
5868
5869         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5870                 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5871         else
5872                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5873
5874         mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5875                              &match);
5876
5877         new_settings(hdev, match.sk);
5878
5879         if (match.sk)
5880                 sock_put(match.sk);
5881
5882         /* If "Set Advertising" was just disabled and instance advertising was
5883          * set up earlier, then re-enable multi-instance advertising.
5884          */
5885         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5886             list_empty(&hdev->adv_instances))
5887                 return;
5888
5889         instance = hdev->cur_adv_instance;
5890         if (!instance) {
5891                 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5892                                                         struct adv_info, list);
5893                 if (!adv_instance)
5894                         return;
5895
5896                 instance = adv_instance->instance;
5897         }
5898
5899         err = hci_schedule_adv_instance_sync(hdev, instance, true);
5900
5901         enable_advertising_instance(hdev, err);
5902 }
5903
5904 static int set_adv_sync(struct hci_dev *hdev, void *data)
5905 {
5906         struct mgmt_pending_cmd *cmd = data;
5907         struct mgmt_mode *cp = cmd->param;
5908         u8 val = !!cp->val;
5909
5910         if (cp->val == 0x02)
5911                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5912         else
5913                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5914
5915         cancel_adv_timeout(hdev);
5916
5917         if (val) {
5918                 /* Switch to instance "0" for the Set Advertising setting.
5919                  * We cannot use update_[adv|scan_rsp]_data() here as the
5920                  * HCI_ADVERTISING flag is not yet set.
5921                  */
5922                 hdev->cur_adv_instance = 0x00;
5923
5924                 if (ext_adv_capable(hdev)) {
5925                         hci_start_ext_adv_sync(hdev, 0x00);
5926                 } else {
5927                         hci_update_adv_data_sync(hdev, 0x00);
5928                         hci_update_scan_rsp_data_sync(hdev, 0x00);
5929                         hci_enable_advertising_sync(hdev);
5930                 }
5931         } else {
5932                 hci_disable_advertising_sync(hdev);
5933         }
5934
5935         return 0;
5936 }
5937
5938 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5939                            u16 len)
5940 {
5941         struct mgmt_mode *cp = data;
5942         struct mgmt_pending_cmd *cmd;
5943         u8 val, status;
5944         int err;
5945
5946         bt_dev_dbg(hdev, "sock %p", sk);
5947
5948         status = mgmt_le_support(hdev);
5949         if (status)
5950                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5951                                        status);
5952
5953         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5954                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5955                                        MGMT_STATUS_INVALID_PARAMS);
5956
5957         if (hdev->advertising_paused)
5958                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5959                                        MGMT_STATUS_BUSY);
5960
5961         hci_dev_lock(hdev);
5962
5963         val = !!cp->val;
5964
5965         /* The following conditions are ones which mean that we should
5966          * not do any HCI communication but directly send a mgmt
5967          * response to user space (after toggling the flag if
5968          * necessary).
5969          */
5970         if (!hdev_is_powered(hdev) ||
5971             (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5972              (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5973             hci_conn_num(hdev, LE_LINK) > 0 ||
5974             (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5975              hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5976                 bool changed;
5977
5978                 if (cp->val) {
5979                         hdev->cur_adv_instance = 0x00;
5980                         changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5981                         if (cp->val == 0x02)
5982                                 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5983                         else
5984                                 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5985                 } else {
5986                         changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5987                         hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5988                 }
5989
5990                 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5991                 if (err < 0)
5992                         goto unlock;
5993
5994                 if (changed)
5995                         err = new_settings(hdev, sk);
5996
5997                 goto unlock;
5998         }
5999
6000         if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6001             pending_find(MGMT_OP_SET_LE, hdev)) {
6002                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6003                                       MGMT_STATUS_BUSY);
6004                 goto unlock;
6005         }
6006
6007         cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6008         if (!cmd)
6009                 err = -ENOMEM;
6010         else
6011                 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6012                                          set_advertising_complete);
6013
6014         if (err < 0 && cmd)
6015                 mgmt_pending_remove(cmd);
6016
6017 unlock:
6018         hci_dev_unlock(hdev);
6019         return err;
6020 }
6021
6022 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6023                               void *data, u16 len)
6024 {
6025         struct mgmt_cp_set_static_address *cp = data;
6026         int err;
6027
6028         bt_dev_dbg(hdev, "sock %p", sk);
6029
6030         if (!lmp_le_capable(hdev))
6031                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6032                                        MGMT_STATUS_NOT_SUPPORTED);
6033
6034         if (hdev_is_powered(hdev))
6035                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6036                                        MGMT_STATUS_REJECTED);
6037
6038         if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6039                 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6040                         return mgmt_cmd_status(sk, hdev->id,
6041                                                MGMT_OP_SET_STATIC_ADDRESS,
6042                                                MGMT_STATUS_INVALID_PARAMS);
6043
6044                 /* Two most significant bits shall be set */
6045                 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6046                         return mgmt_cmd_status(sk, hdev->id,
6047                                                MGMT_OP_SET_STATIC_ADDRESS,
6048                                                MGMT_STATUS_INVALID_PARAMS);
6049         }
6050
6051         hci_dev_lock(hdev);
6052
6053         bacpy(&hdev->static_addr, &cp->bdaddr);
6054
6055         err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6056         if (err < 0)
6057                 goto unlock;
6058
6059         err = new_settings(hdev, sk);
6060
6061 unlock:
6062         hci_dev_unlock(hdev);
6063         return err;
6064 }
6065
6066 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6067                            void *data, u16 len)
6068 {
6069         struct mgmt_cp_set_scan_params *cp = data;
6070         __u16 interval, window;
6071         int err;
6072
6073         bt_dev_dbg(hdev, "sock %p", sk);
6074
6075         if (!lmp_le_capable(hdev))
6076                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6077                                        MGMT_STATUS_NOT_SUPPORTED);
6078
6079         interval = __le16_to_cpu(cp->interval);
6080
6081         if (interval < 0x0004 || interval > 0x4000)
6082                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6083                                        MGMT_STATUS_INVALID_PARAMS);
6084
6085         window = __le16_to_cpu(cp->window);
6086
6087         if (window < 0x0004 || window > 0x4000)
6088                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6089                                        MGMT_STATUS_INVALID_PARAMS);
6090
6091         if (window > interval)
6092                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6093                                        MGMT_STATUS_INVALID_PARAMS);
6094
6095         hci_dev_lock(hdev);
6096
6097         hdev->le_scan_interval = interval;
6098         hdev->le_scan_window = window;
6099
6100         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6101                                 NULL, 0);
6102
6103         /* If background scan is running, restart it so new parameters are
6104          * loaded.
6105          */
6106         if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6107             hdev->discovery.state == DISCOVERY_STOPPED)
6108                 hci_update_passive_scan(hdev);
6109
6110         hci_dev_unlock(hdev);
6111
6112         return err;
6113 }
6114
6115 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6116 {
6117         struct mgmt_pending_cmd *cmd = data;
6118
6119         bt_dev_dbg(hdev, "err %d", err);
6120
6121         if (err) {
6122                 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6123                                 mgmt_status(err));
6124         } else {
6125                 struct mgmt_mode *cp = cmd->param;
6126
6127                 if (cp->val)
6128                         hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6129                 else
6130                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6131
6132                 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6133                 new_settings(hdev, cmd->sk);
6134         }
6135
6136         mgmt_pending_free(cmd);
6137 }
6138
6139 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6140 {
6141         struct mgmt_pending_cmd *cmd = data;
6142         struct mgmt_mode *cp = cmd->param;
6143
6144         return hci_write_fast_connectable_sync(hdev, cp->val);
6145 }
6146
6147 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6148                                 void *data, u16 len)
6149 {
6150         struct mgmt_mode *cp = data;
6151         struct mgmt_pending_cmd *cmd;
6152         int err;
6153
6154         bt_dev_dbg(hdev, "sock %p", sk);
6155
6156         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6157             hdev->hci_ver < BLUETOOTH_VER_1_2)
6158                 return mgmt_cmd_status(sk, hdev->id,
6159                                        MGMT_OP_SET_FAST_CONNECTABLE,
6160                                        MGMT_STATUS_NOT_SUPPORTED);
6161
6162         if (cp->val != 0x00 && cp->val != 0x01)
6163                 return mgmt_cmd_status(sk, hdev->id,
6164                                        MGMT_OP_SET_FAST_CONNECTABLE,
6165                                        MGMT_STATUS_INVALID_PARAMS);
6166
6167         hci_dev_lock(hdev);
6168
6169         if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6170                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6171                 goto unlock;
6172         }
6173
6174         if (!hdev_is_powered(hdev)) {
6175                 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6176                 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6177                 new_settings(hdev, sk);
6178                 goto unlock;
6179         }
6180
6181         cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6182                                len);
6183         if (!cmd)
6184                 err = -ENOMEM;
6185         else
6186                 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6187                                          fast_connectable_complete);
6188
6189         if (err < 0) {
6190                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6191                                 MGMT_STATUS_FAILED);
6192
6193                 if (cmd)
6194                         mgmt_pending_free(cmd);
6195         }
6196
6197 unlock:
6198         hci_dev_unlock(hdev);
6199
6200         return err;
6201 }
6202
6203 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6204 {
6205         struct mgmt_pending_cmd *cmd = data;
6206
6207         bt_dev_dbg(hdev, "err %d", err);
6208
6209         if (err) {
6210                 u8 mgmt_err = mgmt_status(err);
6211
6212                 /* We need to restore the flag if related HCI commands
6213                  * failed.
6214                  */
6215                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6216
6217                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6218         } else {
6219                 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6220                 new_settings(hdev, cmd->sk);
6221         }
6222
6223         mgmt_pending_free(cmd);
6224 }
6225
6226 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6227 {
6228         int status;
6229
6230         status = hci_write_fast_connectable_sync(hdev, false);
6231
6232         if (!status)
6233                 status = hci_update_scan_sync(hdev);
6234
6235         /* Since only the advertising data flags will change, there
6236          * is no need to update the scan response data.
6237          */
6238         if (!status)
6239                 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6240
6241         return status;
6242 }
6243
6244 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6245 {
6246         struct mgmt_mode *cp = data;
6247         struct mgmt_pending_cmd *cmd;
6248         int err;
6249
6250         bt_dev_dbg(hdev, "sock %p", sk);
6251
6252         if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6253                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6254                                        MGMT_STATUS_NOT_SUPPORTED);
6255
6256         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6257                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6258                                        MGMT_STATUS_REJECTED);
6259
6260         if (cp->val != 0x00 && cp->val != 0x01)
6261                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6262                                        MGMT_STATUS_INVALID_PARAMS);
6263
6264         hci_dev_lock(hdev);
6265
6266         if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6267                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6268                 goto unlock;
6269         }
6270
6271         if (!hdev_is_powered(hdev)) {
6272                 if (!cp->val) {
6273                         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6274                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6275                         hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6276                         hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6277                         hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6278                 }
6279
6280                 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6281
6282                 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6283                 if (err < 0)
6284                         goto unlock;
6285
6286                 err = new_settings(hdev, sk);
6287                 goto unlock;
6288         }
6289
6290         /* Reject disabling when powered on */
6291         if (!cp->val) {
6292                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6293                                       MGMT_STATUS_REJECTED);
6294                 goto unlock;
6295         } else {
6296                 /* When configuring a dual-mode controller to operate
6297                  * with LE only and using a static address, then switching
6298                  * BR/EDR back on is not allowed.
6299                  *
6300                  * Dual-mode controllers shall operate with the public
6301                  * address as its identity address for BR/EDR and LE. So
6302                  * reject the attempt to create an invalid configuration.
6303                  *
6304                  * The same restrictions applies when secure connections
6305                  * has been enabled. For BR/EDR this is a controller feature
6306                  * while for LE it is a host stack feature. This means that
6307                  * switching BR/EDR back on when secure connections has been
6308                  * enabled is not a supported transaction.
6309                  */
6310                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6311                     (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6312                      hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6313                         err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6314                                               MGMT_STATUS_REJECTED);
6315                         goto unlock;
6316                 }
6317         }
6318
6319         cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6320         if (!cmd)
6321                 err = -ENOMEM;
6322         else
6323                 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6324                                          set_bredr_complete);
6325
6326         if (err < 0) {
6327                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6328                                 MGMT_STATUS_FAILED);
6329                 if (cmd)
6330                         mgmt_pending_free(cmd);
6331
6332                 goto unlock;
6333         }
6334
6335         /* We need to flip the bit already here so that
6336          * hci_req_update_adv_data generates the correct flags.
6337          */
6338         hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6339
6340 unlock:
6341         hci_dev_unlock(hdev);
6342         return err;
6343 }
6344
6345 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6346 {
6347         struct mgmt_pending_cmd *cmd = data;
6348         struct mgmt_mode *cp;
6349
6350         bt_dev_dbg(hdev, "err %d", err);
6351
6352         if (err) {
6353                 u8 mgmt_err = mgmt_status(err);
6354
6355                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6356                 goto done;
6357         }
6358
6359         cp = cmd->param;
6360
6361         switch (cp->val) {
6362         case 0x00:
6363                 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6364                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6365                 break;
6366         case 0x01:
6367                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6368                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6369                 break;
6370         case 0x02:
6371                 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6372                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6373                 break;
6374         }
6375
6376         send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6377         new_settings(hdev, cmd->sk);
6378
6379 done:
6380         mgmt_pending_free(cmd);
6381 }
6382
6383 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6384 {
6385         struct mgmt_pending_cmd *cmd = data;
6386         struct mgmt_mode *cp = cmd->param;
6387         u8 val = !!cp->val;
6388
6389         /* Force write of val */
6390         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6391
6392         return hci_write_sc_support_sync(hdev, val);
6393 }
6394
6395 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6396                            void *data, u16 len)
6397 {
6398         struct mgmt_mode *cp = data;
6399         struct mgmt_pending_cmd *cmd;
6400         u8 val;
6401         int err;
6402
6403         bt_dev_dbg(hdev, "sock %p", sk);
6404
6405         if (!lmp_sc_capable(hdev) &&
6406             !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6407                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6408                                        MGMT_STATUS_NOT_SUPPORTED);
6409
6410         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6411             lmp_sc_capable(hdev) &&
6412             !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6413                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6414                                        MGMT_STATUS_REJECTED);
6415
6416         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6417                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6418                                        MGMT_STATUS_INVALID_PARAMS);
6419
6420         hci_dev_lock(hdev);
6421
6422         if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6423             !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6424                 bool changed;
6425
6426                 if (cp->val) {
6427                         changed = !hci_dev_test_and_set_flag(hdev,
6428                                                              HCI_SC_ENABLED);
6429                         if (cp->val == 0x02)
6430                                 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6431                         else
6432                                 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6433                 } else {
6434                         changed = hci_dev_test_and_clear_flag(hdev,
6435                                                               HCI_SC_ENABLED);
6436                         hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6437                 }
6438
6439                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6440                 if (err < 0)
6441                         goto failed;
6442
6443                 if (changed)
6444                         err = new_settings(hdev, sk);
6445
6446                 goto failed;
6447         }
6448
6449         val = !!cp->val;
6450
6451         if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6452             (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6453                 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6454                 goto failed;
6455         }
6456
6457         cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6458         if (!cmd)
6459                 err = -ENOMEM;
6460         else
6461                 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6462                                          set_secure_conn_complete);
6463
6464         if (err < 0) {
6465                 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6466                                 MGMT_STATUS_FAILED);
6467                 if (cmd)
6468                         mgmt_pending_free(cmd);
6469         }
6470
6471 failed:
6472         hci_dev_unlock(hdev);
6473         return err;
6474 }
6475
6476 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6477                           void *data, u16 len)
6478 {
6479         struct mgmt_mode *cp = data;
6480         bool changed, use_changed;
6481         int err;
6482
6483         bt_dev_dbg(hdev, "sock %p", sk);
6484
6485         if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6486                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6487                                        MGMT_STATUS_INVALID_PARAMS);
6488
6489         hci_dev_lock(hdev);
6490
6491         if (cp->val)
6492                 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6493         else
6494                 changed = hci_dev_test_and_clear_flag(hdev,
6495                                                       HCI_KEEP_DEBUG_KEYS);
6496
6497         if (cp->val == 0x02)
6498                 use_changed = !hci_dev_test_and_set_flag(hdev,
6499                                                          HCI_USE_DEBUG_KEYS);
6500         else
6501                 use_changed = hci_dev_test_and_clear_flag(hdev,
6502                                                           HCI_USE_DEBUG_KEYS);
6503
6504         if (hdev_is_powered(hdev) && use_changed &&
6505             hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6506                 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6507                 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6508                              sizeof(mode), &mode);
6509         }
6510
6511         err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6512         if (err < 0)
6513                 goto unlock;
6514
6515         if (changed)
6516                 err = new_settings(hdev, sk);
6517
6518 unlock:
6519         hci_dev_unlock(hdev);
6520         return err;
6521 }
6522
6523 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6524                        u16 len)
6525 {
6526         struct mgmt_cp_set_privacy *cp = cp_data;
6527         bool changed;
6528         int err;
6529
6530         bt_dev_dbg(hdev, "sock %p", sk);
6531
6532         if (!lmp_le_capable(hdev))
6533                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6534                                        MGMT_STATUS_NOT_SUPPORTED);
6535
6536         if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6537                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6538                                        MGMT_STATUS_INVALID_PARAMS);
6539
6540         if (hdev_is_powered(hdev))
6541                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6542                                        MGMT_STATUS_REJECTED);
6543
6544         hci_dev_lock(hdev);
6545
6546         /* If user space supports this command it is also expected to
6547          * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6548          */
6549         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6550
6551         if (cp->privacy) {
6552                 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6553                 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6554                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6555                 hci_adv_instances_set_rpa_expired(hdev, true);
6556                 if (cp->privacy == 0x02)
6557                         hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6558                 else
6559                         hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6560         } else {
6561                 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6562                 memset(hdev->irk, 0, sizeof(hdev->irk));
6563                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6564                 hci_adv_instances_set_rpa_expired(hdev, false);
6565                 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6566         }
6567
6568         err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6569         if (err < 0)
6570                 goto unlock;
6571
6572         if (changed)
6573                 err = new_settings(hdev, sk);
6574
6575 unlock:
6576         hci_dev_unlock(hdev);
6577         return err;
6578 }
6579
6580 static bool irk_is_valid(struct mgmt_irk_info *irk)
6581 {
6582         switch (irk->addr.type) {
6583         case BDADDR_LE_PUBLIC:
6584                 return true;
6585
6586         case BDADDR_LE_RANDOM:
6587                 /* Two most significant bits shall be set */
6588                 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6589                         return false;
6590                 return true;
6591         }
6592
6593         return false;
6594 }
6595
6596 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6597                      u16 len)
6598 {
6599         struct mgmt_cp_load_irks *cp = cp_data;
6600         const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6601                                    sizeof(struct mgmt_irk_info));
6602         u16 irk_count, expected_len;
6603         int i, err;
6604
6605         bt_dev_dbg(hdev, "sock %p", sk);
6606
6607         if (!lmp_le_capable(hdev))
6608                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6609                                        MGMT_STATUS_NOT_SUPPORTED);
6610
6611         irk_count = __le16_to_cpu(cp->irk_count);
6612         if (irk_count > max_irk_count) {
6613                 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6614                            irk_count);
6615                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6616                                        MGMT_STATUS_INVALID_PARAMS);
6617         }
6618
6619         expected_len = struct_size(cp, irks, irk_count);
6620         if (expected_len != len) {
6621                 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6622                            expected_len, len);
6623                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6624                                        MGMT_STATUS_INVALID_PARAMS);
6625         }
6626
6627         bt_dev_dbg(hdev, "irk_count %u", irk_count);
6628
6629         for (i = 0; i < irk_count; i++) {
6630                 struct mgmt_irk_info *key = &cp->irks[i];
6631
6632                 if (!irk_is_valid(key))
6633                         return mgmt_cmd_status(sk, hdev->id,
6634                                                MGMT_OP_LOAD_IRKS,
6635                                                MGMT_STATUS_INVALID_PARAMS);
6636         }
6637
6638         hci_dev_lock(hdev);
6639
6640         hci_smp_irks_clear(hdev);
6641
6642         for (i = 0; i < irk_count; i++) {
6643                 struct mgmt_irk_info *irk = &cp->irks[i];
6644
6645                 if (hci_is_blocked_key(hdev,
6646                                        HCI_BLOCKED_KEY_TYPE_IRK,
6647                                        irk->val)) {
6648                         bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6649                                     &irk->addr.bdaddr);
6650                         continue;
6651                 }
6652
6653                 hci_add_irk(hdev, &irk->addr.bdaddr,
6654                             le_addr_type(irk->addr.type), irk->val,
6655                             BDADDR_ANY);
6656         }
6657
6658         hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6659
6660         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6661
6662         hci_dev_unlock(hdev);
6663
6664         return err;
6665 }
6666
6667 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6668 {
6669         if (key->initiator != 0x00 && key->initiator != 0x01)
6670                 return false;
6671
6672         switch (key->addr.type) {
6673         case BDADDR_LE_PUBLIC:
6674                 return true;
6675
6676         case BDADDR_LE_RANDOM:
6677                 /* Two most significant bits shall be set */
6678                 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6679                         return false;
6680                 return true;
6681         }
6682
6683         return false;
6684 }
6685
6686 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6687                                void *cp_data, u16 len)
6688 {
6689         struct mgmt_cp_load_long_term_keys *cp = cp_data;
6690         const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6691                                    sizeof(struct mgmt_ltk_info));
6692         u16 key_count, expected_len;
6693         int i, err;
6694
6695         bt_dev_dbg(hdev, "sock %p", sk);
6696
6697         if (!lmp_le_capable(hdev))
6698                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6699                                        MGMT_STATUS_NOT_SUPPORTED);
6700
6701         key_count = __le16_to_cpu(cp->key_count);
6702         if (key_count > max_key_count) {
6703                 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6704                            key_count);
6705                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6706                                        MGMT_STATUS_INVALID_PARAMS);
6707         }
6708
6709         expected_len = struct_size(cp, keys, key_count);
6710         if (expected_len != len) {
6711                 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6712                            expected_len, len);
6713                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6714                                        MGMT_STATUS_INVALID_PARAMS);
6715         }
6716
6717         bt_dev_dbg(hdev, "key_count %u", key_count);
6718
6719         for (i = 0; i < key_count; i++) {
6720                 struct mgmt_ltk_info *key = &cp->keys[i];
6721
6722                 if (!ltk_is_valid(key))
6723                         return mgmt_cmd_status(sk, hdev->id,
6724                                                MGMT_OP_LOAD_LONG_TERM_KEYS,
6725                                                MGMT_STATUS_INVALID_PARAMS);
6726         }
6727
6728         hci_dev_lock(hdev);
6729
6730         hci_smp_ltks_clear(hdev);
6731
6732         for (i = 0; i < key_count; i++) {
6733                 struct mgmt_ltk_info *key = &cp->keys[i];
6734                 u8 type, authenticated;
6735
6736                 if (hci_is_blocked_key(hdev,
6737                                        HCI_BLOCKED_KEY_TYPE_LTK,
6738                                        key->val)) {
6739                         bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6740                                     &key->addr.bdaddr);
6741                         continue;
6742                 }
6743
6744                 switch (key->type) {
6745                 case MGMT_LTK_UNAUTHENTICATED:
6746                         authenticated = 0x00;
6747                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6748                         break;
6749                 case MGMT_LTK_AUTHENTICATED:
6750                         authenticated = 0x01;
6751                         type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6752                         break;
6753                 case MGMT_LTK_P256_UNAUTH:
6754                         authenticated = 0x00;
6755                         type = SMP_LTK_P256;
6756                         break;
6757                 case MGMT_LTK_P256_AUTH:
6758                         authenticated = 0x01;
6759                         type = SMP_LTK_P256;
6760                         break;
6761                 case MGMT_LTK_P256_DEBUG:
6762                         authenticated = 0x00;
6763                         type = SMP_LTK_P256_DEBUG;
6764                         fallthrough;
6765                 default:
6766                         continue;
6767                 }
6768
6769                 hci_add_ltk(hdev, &key->addr.bdaddr,
6770                             le_addr_type(key->addr.type), type, authenticated,
6771                             key->val, key->enc_size, key->ediv, key->rand);
6772         }
6773
6774         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6775                            NULL, 0);
6776
6777         hci_dev_unlock(hdev);
6778
6779         return err;
6780 }
6781
6782 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6783 {
6784         struct mgmt_pending_cmd *cmd = data;
6785         struct hci_conn *conn = cmd->user_data;
6786         struct mgmt_cp_get_conn_info *cp = cmd->param;
6787         struct mgmt_rp_get_conn_info rp;
6788         u8 status;
6789
6790         bt_dev_dbg(hdev, "err %d", err);
6791
6792         memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6793
6794         status = mgmt_status(err);
6795         if (status == MGMT_STATUS_SUCCESS) {
6796                 rp.rssi = conn->rssi;
6797                 rp.tx_power = conn->tx_power;
6798                 rp.max_tx_power = conn->max_tx_power;
6799         } else {
6800                 rp.rssi = HCI_RSSI_INVALID;
6801                 rp.tx_power = HCI_TX_POWER_INVALID;
6802                 rp.max_tx_power = HCI_TX_POWER_INVALID;
6803         }
6804
6805         mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6806                           &rp, sizeof(rp));
6807
6808         mgmt_pending_free(cmd);
6809 }
6810
6811 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6812 {
6813         struct mgmt_pending_cmd *cmd = data;
6814         struct mgmt_cp_get_conn_info *cp = cmd->param;
6815         struct hci_conn *conn;
6816         int err;
6817         __le16   handle;
6818
6819         /* Make sure we are still connected */
6820         if (cp->addr.type == BDADDR_BREDR)
6821                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6822                                                &cp->addr.bdaddr);
6823         else
6824                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6825
6826         if (!conn || conn->state != BT_CONNECTED)
6827                 return MGMT_STATUS_NOT_CONNECTED;
6828
6829         cmd->user_data = conn;
6830         handle = cpu_to_le16(conn->handle);
6831
6832         /* Refresh RSSI each time */
6833         err = hci_read_rssi_sync(hdev, handle);
6834
6835         /* For LE links TX power does not change thus we don't need to
6836          * query for it once value is known.
6837          */
6838         if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6839                      conn->tx_power == HCI_TX_POWER_INVALID))
6840                 err = hci_read_tx_power_sync(hdev, handle, 0x00);
6841
6842         /* Max TX power needs to be read only once per connection */
6843         if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6844                 err = hci_read_tx_power_sync(hdev, handle, 0x01);
6845
6846         return err;
6847 }
6848
6849 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6850                          u16 len)
6851 {
6852         struct mgmt_cp_get_conn_info *cp = data;
6853         struct mgmt_rp_get_conn_info rp;
6854         struct hci_conn *conn;
6855         unsigned long conn_info_age;
6856         int err = 0;
6857
6858         bt_dev_dbg(hdev, "sock %p", sk);
6859
6860         memset(&rp, 0, sizeof(rp));
6861         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6862         rp.addr.type = cp->addr.type;
6863
6864         if (!bdaddr_type_is_valid(cp->addr.type))
6865                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6866                                          MGMT_STATUS_INVALID_PARAMS,
6867                                          &rp, sizeof(rp));
6868
6869         hci_dev_lock(hdev);
6870
6871         if (!hdev_is_powered(hdev)) {
6872                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6873                                         MGMT_STATUS_NOT_POWERED, &rp,
6874                                         sizeof(rp));
6875                 goto unlock;
6876         }
6877
6878         if (cp->addr.type == BDADDR_BREDR)
6879                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6880                                                &cp->addr.bdaddr);
6881         else
6882                 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6883
6884         if (!conn || conn->state != BT_CONNECTED) {
6885                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6886                                         MGMT_STATUS_NOT_CONNECTED, &rp,
6887                                         sizeof(rp));
6888                 goto unlock;
6889         }
6890
6891         /* To avoid client trying to guess when to poll again for information we
6892          * calculate conn info age as random value between min/max set in hdev.
6893          */
6894         conn_info_age = hdev->conn_info_min_age +
6895                         prandom_u32_max(hdev->conn_info_max_age -
6896                                         hdev->conn_info_min_age);
6897
6898         /* Query controller to refresh cached values if they are too old or were
6899          * never read.
6900          */
6901         if (time_after(jiffies, conn->conn_info_timestamp +
6902                        msecs_to_jiffies(conn_info_age)) ||
6903             !conn->conn_info_timestamp) {
6904                 struct mgmt_pending_cmd *cmd;
6905
6906                 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6907                                        len);
6908                 if (!cmd) {
6909                         err = -ENOMEM;
6910                 } else {
6911                         err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6912                                                  cmd, get_conn_info_complete);
6913                 }
6914
6915                 if (err < 0) {
6916                         mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6917                                           MGMT_STATUS_FAILED, &rp, sizeof(rp));
6918
6919                         if (cmd)
6920                                 mgmt_pending_free(cmd);
6921
6922                         goto unlock;
6923                 }
6924
6925                 conn->conn_info_timestamp = jiffies;
6926         } else {
6927                 /* Cache is valid, just reply with values cached in hci_conn */
6928                 rp.rssi = conn->rssi;
6929                 rp.tx_power = conn->tx_power;
6930                 rp.max_tx_power = conn->max_tx_power;
6931
6932                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6933                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6934         }
6935
6936 unlock:
6937         hci_dev_unlock(hdev);
6938         return err;
6939 }
6940
6941 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6942 {
6943         struct mgmt_pending_cmd *cmd = data;
6944         struct mgmt_cp_get_clock_info *cp = cmd->param;
6945         struct mgmt_rp_get_clock_info rp;
6946         struct hci_conn *conn = cmd->user_data;
6947         u8 status = mgmt_status(err);
6948
6949         bt_dev_dbg(hdev, "err %d", err);
6950
6951         memset(&rp, 0, sizeof(rp));
6952         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6953         rp.addr.type = cp->addr.type;
6954
6955         if (err)
6956                 goto complete;
6957
6958         rp.local_clock = cpu_to_le32(hdev->clock);
6959
6960         if (conn) {
6961                 rp.piconet_clock = cpu_to_le32(conn->clock);
6962                 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6963         }
6964
6965 complete:
6966         mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6967                           sizeof(rp));
6968
6969         mgmt_pending_free(cmd);
6970 }
6971
6972 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6973 {
6974         struct mgmt_pending_cmd *cmd = data;
6975         struct mgmt_cp_get_clock_info *cp = cmd->param;
6976         struct hci_cp_read_clock hci_cp;
6977         struct hci_conn *conn;
6978
6979         memset(&hci_cp, 0, sizeof(hci_cp));
6980         hci_read_clock_sync(hdev, &hci_cp);
6981
6982         /* Make sure connection still exists */
6983         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
6984         if (!conn || conn->state != BT_CONNECTED)
6985                 return MGMT_STATUS_NOT_CONNECTED;
6986
6987         cmd->user_data = conn;
6988         hci_cp.handle = cpu_to_le16(conn->handle);
6989         hci_cp.which = 0x01; /* Piconet clock */
6990
6991         return hci_read_clock_sync(hdev, &hci_cp);
6992 }
6993
6994 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6995                                                                 u16 len)
6996 {
6997         struct mgmt_cp_get_clock_info *cp = data;
6998         struct mgmt_rp_get_clock_info rp;
6999         struct mgmt_pending_cmd *cmd;
7000         struct hci_conn *conn;
7001         int err;
7002
7003         bt_dev_dbg(hdev, "sock %p", sk);
7004
7005         memset(&rp, 0, sizeof(rp));
7006         bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7007         rp.addr.type = cp->addr.type;
7008
7009         if (cp->addr.type != BDADDR_BREDR)
7010                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7011                                          MGMT_STATUS_INVALID_PARAMS,
7012                                          &rp, sizeof(rp));
7013
7014         hci_dev_lock(hdev);
7015
7016         if (!hdev_is_powered(hdev)) {
7017                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7018                                         MGMT_STATUS_NOT_POWERED, &rp,
7019                                         sizeof(rp));
7020                 goto unlock;
7021         }
7022
7023         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7024                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7025                                                &cp->addr.bdaddr);
7026                 if (!conn || conn->state != BT_CONNECTED) {
7027                         err = mgmt_cmd_complete(sk, hdev->id,
7028                                                 MGMT_OP_GET_CLOCK_INFO,
7029                                                 MGMT_STATUS_NOT_CONNECTED,
7030                                                 &rp, sizeof(rp));
7031                         goto unlock;
7032                 }
7033         } else {
7034                 conn = NULL;
7035         }
7036
7037         cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7038         if (!cmd)
7039                 err = -ENOMEM;
7040         else
7041                 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7042                                          get_clock_info_complete);
7043
7044         if (err < 0) {
7045                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7046                                         MGMT_STATUS_FAILED, &rp, sizeof(rp));
7047
7048                 if (cmd)
7049                         mgmt_pending_free(cmd);
7050         }
7051
7052
7053 unlock:
7054         hci_dev_unlock(hdev);
7055         return err;
7056 }
7057
7058 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7059 {
7060         struct hci_conn *conn;
7061
7062         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7063         if (!conn)
7064                 return false;
7065
7066         if (conn->dst_type != type)
7067                 return false;
7068
7069         if (conn->state != BT_CONNECTED)
7070                 return false;
7071
7072         return true;
7073 }
7074
7075 /* This function requires the caller holds hdev->lock */
7076 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7077                                u8 addr_type, u8 auto_connect)
7078 {
7079         struct hci_conn_params *params;
7080
7081         params = hci_conn_params_add(hdev, addr, addr_type);
7082         if (!params)
7083                 return -EIO;
7084
7085         if (params->auto_connect == auto_connect)
7086                 return 0;
7087
7088         list_del_init(&params->action);
7089
7090         switch (auto_connect) {
7091         case HCI_AUTO_CONN_DISABLED:
7092         case HCI_AUTO_CONN_LINK_LOSS:
7093                 /* If auto connect is being disabled when we're trying to
7094                  * connect to device, keep connecting.
7095                  */
7096                 if (params->explicit_connect)
7097                         list_add(&params->action, &hdev->pend_le_conns);
7098                 break;
7099         case HCI_AUTO_CONN_REPORT:
7100                 if (params->explicit_connect)
7101                         list_add(&params->action, &hdev->pend_le_conns);
7102                 else
7103                         list_add(&params->action, &hdev->pend_le_reports);
7104                 break;
7105         case HCI_AUTO_CONN_DIRECT:
7106         case HCI_AUTO_CONN_ALWAYS:
7107                 if (!is_connected(hdev, addr, addr_type))
7108                         list_add(&params->action, &hdev->pend_le_conns);
7109                 break;
7110         }
7111
7112         params->auto_connect = auto_connect;
7113
7114         bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7115                    addr, addr_type, auto_connect);
7116
7117         return 0;
7118 }
7119
7120 static void device_added(struct sock *sk, struct hci_dev *hdev,
7121                          bdaddr_t *bdaddr, u8 type, u8 action)
7122 {
7123         struct mgmt_ev_device_added ev;
7124
7125         bacpy(&ev.addr.bdaddr, bdaddr);
7126         ev.addr.type = type;
7127         ev.action = action;
7128
7129         mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7130 }
7131
7132 static int add_device_sync(struct hci_dev *hdev, void *data)
7133 {
7134         return hci_update_passive_scan_sync(hdev);
7135 }
7136
7137 static int add_device(struct sock *sk, struct hci_dev *hdev,
7138                       void *data, u16 len)
7139 {
7140         struct mgmt_cp_add_device *cp = data;
7141         u8 auto_conn, addr_type;
7142         struct hci_conn_params *params;
7143         int err;
7144         u32 current_flags = 0;
7145         u32 supported_flags;
7146
7147         bt_dev_dbg(hdev, "sock %p", sk);
7148
7149         if (!bdaddr_type_is_valid(cp->addr.type) ||
7150             !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7151                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7152                                          MGMT_STATUS_INVALID_PARAMS,
7153                                          &cp->addr, sizeof(cp->addr));
7154
7155         if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7156                 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7157                                          MGMT_STATUS_INVALID_PARAMS,
7158                                          &cp->addr, sizeof(cp->addr));
7159
7160         hci_dev_lock(hdev);
7161
7162         if (cp->addr.type == BDADDR_BREDR) {
7163                 /* Only incoming connections action is supported for now */
7164                 if (cp->action != 0x01) {
7165                         err = mgmt_cmd_complete(sk, hdev->id,
7166                                                 MGMT_OP_ADD_DEVICE,
7167                                                 MGMT_STATUS_INVALID_PARAMS,
7168                                                 &cp->addr, sizeof(cp->addr));
7169                         goto unlock;
7170                 }
7171
7172                 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7173                                                      &cp->addr.bdaddr,
7174                                                      cp->addr.type, 0);
7175                 if (err)
7176                         goto unlock;
7177
7178                 hci_update_scan(hdev);
7179
7180                 goto added;
7181         }
7182
7183         addr_type = le_addr_type(cp->addr.type);
7184
7185         if (cp->action == 0x02)
7186                 auto_conn = HCI_AUTO_CONN_ALWAYS;
7187         else if (cp->action == 0x01)
7188                 auto_conn = HCI_AUTO_CONN_DIRECT;
7189         else
7190                 auto_conn = HCI_AUTO_CONN_REPORT;
7191
7192         /* Kernel internally uses conn_params with resolvable private
7193          * address, but Add Device allows only identity addresses.
7194          * Make sure it is enforced before calling
7195          * hci_conn_params_lookup.
7196          */
7197         if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7198                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7199                                         MGMT_STATUS_INVALID_PARAMS,
7200                                         &cp->addr, sizeof(cp->addr));
7201                 goto unlock;
7202         }
7203
7204         /* If the connection parameters don't exist for this device,
7205          * they will be created and configured with defaults.
7206          */
7207         if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7208                                 auto_conn) < 0) {
7209                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7210                                         MGMT_STATUS_FAILED, &cp->addr,
7211                                         sizeof(cp->addr));
7212                 goto unlock;
7213         } else {
7214                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7215                                                 addr_type);
7216                 if (params)
7217                         current_flags = params->flags;
7218         }
7219
7220         err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7221         if (err < 0)
7222                 goto unlock;
7223
7224 added:
7225         device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7226         supported_flags = hdev->conn_flags;
7227         device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7228                              supported_flags, current_flags);
7229
7230         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7231                                 MGMT_STATUS_SUCCESS, &cp->addr,
7232                                 sizeof(cp->addr));
7233
7234 unlock:
7235         hci_dev_unlock(hdev);
7236         return err;
7237 }
7238
7239 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7240                            bdaddr_t *bdaddr, u8 type)
7241 {
7242         struct mgmt_ev_device_removed ev;
7243
7244         bacpy(&ev.addr.bdaddr, bdaddr);
7245         ev.addr.type = type;
7246
7247         mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7248 }
7249
7250 static int remove_device_sync(struct hci_dev *hdev, void *data)
7251 {
7252         return hci_update_passive_scan_sync(hdev);
7253 }
7254
7255 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7256                          void *data, u16 len)
7257 {
7258         struct mgmt_cp_remove_device *cp = data;
7259         int err;
7260
7261         bt_dev_dbg(hdev, "sock %p", sk);
7262
7263         hci_dev_lock(hdev);
7264
7265         if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7266                 struct hci_conn_params *params;
7267                 u8 addr_type;
7268
7269                 if (!bdaddr_type_is_valid(cp->addr.type)) {
7270                         err = mgmt_cmd_complete(sk, hdev->id,
7271                                                 MGMT_OP_REMOVE_DEVICE,
7272                                                 MGMT_STATUS_INVALID_PARAMS,
7273                                                 &cp->addr, sizeof(cp->addr));
7274                         goto unlock;
7275                 }
7276
7277                 if (cp->addr.type == BDADDR_BREDR) {
7278                         err = hci_bdaddr_list_del(&hdev->accept_list,
7279                                                   &cp->addr.bdaddr,
7280                                                   cp->addr.type);
7281                         if (err) {
7282                                 err = mgmt_cmd_complete(sk, hdev->id,
7283                                                         MGMT_OP_REMOVE_DEVICE,
7284                                                         MGMT_STATUS_INVALID_PARAMS,
7285                                                         &cp->addr,
7286                                                         sizeof(cp->addr));
7287                                 goto unlock;
7288                         }
7289
7290                         hci_update_scan(hdev);
7291
7292                         device_removed(sk, hdev, &cp->addr.bdaddr,
7293                                        cp->addr.type);
7294                         goto complete;
7295                 }
7296
7297                 addr_type = le_addr_type(cp->addr.type);
7298
7299                 /* Kernel internally uses conn_params with resolvable private
7300                  * address, but Remove Device allows only identity addresses.
7301                  * Make sure it is enforced before calling
7302                  * hci_conn_params_lookup.
7303                  */
7304                 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7305                         err = mgmt_cmd_complete(sk, hdev->id,
7306                                                 MGMT_OP_REMOVE_DEVICE,
7307                                                 MGMT_STATUS_INVALID_PARAMS,
7308                                                 &cp->addr, sizeof(cp->addr));
7309                         goto unlock;
7310                 }
7311
7312                 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7313                                                 addr_type);
7314                 if (!params) {
7315                         err = mgmt_cmd_complete(sk, hdev->id,
7316                                                 MGMT_OP_REMOVE_DEVICE,
7317                                                 MGMT_STATUS_INVALID_PARAMS,
7318                                                 &cp->addr, sizeof(cp->addr));
7319                         goto unlock;
7320                 }
7321
7322                 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7323                     params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7324                         err = mgmt_cmd_complete(sk, hdev->id,
7325                                                 MGMT_OP_REMOVE_DEVICE,
7326                                                 MGMT_STATUS_INVALID_PARAMS,
7327                                                 &cp->addr, sizeof(cp->addr));
7328                         goto unlock;
7329                 }
7330
7331                 list_del(&params->action);
7332                 list_del(&params->list);
7333                 kfree(params);
7334
7335                 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7336         } else {
7337                 struct hci_conn_params *p, *tmp;
7338                 struct bdaddr_list *b, *btmp;
7339
7340                 if (cp->addr.type) {
7341                         err = mgmt_cmd_complete(sk, hdev->id,
7342                                                 MGMT_OP_REMOVE_DEVICE,
7343                                                 MGMT_STATUS_INVALID_PARAMS,
7344                                                 &cp->addr, sizeof(cp->addr));
7345                         goto unlock;
7346                 }
7347
7348                 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7349                         device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7350                         list_del(&b->list);
7351                         kfree(b);
7352                 }
7353
7354                 hci_update_scan(hdev);
7355
7356                 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7357                         if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7358                                 continue;
7359                         device_removed(sk, hdev, &p->addr, p->addr_type);
7360                         if (p->explicit_connect) {
7361                                 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7362                                 continue;
7363                         }
7364                         list_del(&p->action);
7365                         list_del(&p->list);
7366                         kfree(p);
7367                 }
7368
7369                 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7370         }
7371
7372         hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7373
7374 complete:
7375         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7376                                 MGMT_STATUS_SUCCESS, &cp->addr,
7377                                 sizeof(cp->addr));
7378 unlock:
7379         hci_dev_unlock(hdev);
7380         return err;
7381 }
7382
7383 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7384                            u16 len)
7385 {
7386         struct mgmt_cp_load_conn_param *cp = data;
7387         const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7388                                      sizeof(struct mgmt_conn_param));
7389         u16 param_count, expected_len;
7390         int i;
7391
7392         if (!lmp_le_capable(hdev))
7393                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7394                                        MGMT_STATUS_NOT_SUPPORTED);
7395
7396         param_count = __le16_to_cpu(cp->param_count);
7397         if (param_count > max_param_count) {
7398                 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7399                            param_count);
7400                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7401                                        MGMT_STATUS_INVALID_PARAMS);
7402         }
7403
7404         expected_len = struct_size(cp, params, param_count);
7405         if (expected_len != len) {
7406                 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7407                            expected_len, len);
7408                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7409                                        MGMT_STATUS_INVALID_PARAMS);
7410         }
7411
7412         bt_dev_dbg(hdev, "param_count %u", param_count);
7413
7414         hci_dev_lock(hdev);
7415
7416         hci_conn_params_clear_disabled(hdev);
7417
7418         for (i = 0; i < param_count; i++) {
7419                 struct mgmt_conn_param *param = &cp->params[i];
7420                 struct hci_conn_params *hci_param;
7421                 u16 min, max, latency, timeout;
7422                 u8 addr_type;
7423
7424                 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7425                            param->addr.type);
7426
7427                 if (param->addr.type == BDADDR_LE_PUBLIC) {
7428                         addr_type = ADDR_LE_DEV_PUBLIC;
7429                 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7430                         addr_type = ADDR_LE_DEV_RANDOM;
7431                 } else {
7432                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7433                         continue;
7434                 }
7435
7436                 min = le16_to_cpu(param->min_interval);
7437                 max = le16_to_cpu(param->max_interval);
7438                 latency = le16_to_cpu(param->latency);
7439                 timeout = le16_to_cpu(param->timeout);
7440
7441                 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7442                            min, max, latency, timeout);
7443
7444                 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7445                         bt_dev_err(hdev, "ignoring invalid connection parameters");
7446                         continue;
7447                 }
7448
7449                 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7450                                                 addr_type);
7451                 if (!hci_param) {
7452                         bt_dev_err(hdev, "failed to add connection parameters");
7453                         continue;
7454                 }
7455
7456                 hci_param->conn_min_interval = min;
7457                 hci_param->conn_max_interval = max;
7458                 hci_param->conn_latency = latency;
7459                 hci_param->supervision_timeout = timeout;
7460         }
7461
7462         hci_dev_unlock(hdev);
7463
7464         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7465                                  NULL, 0);
7466 }
7467
7468 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7469                                void *data, u16 len)
7470 {
7471         struct mgmt_cp_set_external_config *cp = data;
7472         bool changed;
7473         int err;
7474
7475         bt_dev_dbg(hdev, "sock %p", sk);
7476
7477         if (hdev_is_powered(hdev))
7478                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7479                                        MGMT_STATUS_REJECTED);
7480
7481         if (cp->config != 0x00 && cp->config != 0x01)
7482                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7483                                          MGMT_STATUS_INVALID_PARAMS);
7484
7485         if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7486                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7487                                        MGMT_STATUS_NOT_SUPPORTED);
7488
7489         hci_dev_lock(hdev);
7490
7491         if (cp->config)
7492                 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7493         else
7494                 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7495
7496         err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7497         if (err < 0)
7498                 goto unlock;
7499
7500         if (!changed)
7501                 goto unlock;
7502
7503         err = new_options(hdev, sk);
7504
7505         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7506                 mgmt_index_removed(hdev);
7507
7508                 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7509                         hci_dev_set_flag(hdev, HCI_CONFIG);
7510                         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7511
7512                         queue_work(hdev->req_workqueue, &hdev->power_on);
7513                 } else {
7514                         set_bit(HCI_RAW, &hdev->flags);
7515                         mgmt_index_added(hdev);
7516                 }
7517         }
7518
7519 unlock:
7520         hci_dev_unlock(hdev);
7521         return err;
7522 }
7523
7524 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7525                               void *data, u16 len)
7526 {
7527         struct mgmt_cp_set_public_address *cp = data;
7528         bool changed;
7529         int err;
7530
7531         bt_dev_dbg(hdev, "sock %p", sk);
7532
7533         if (hdev_is_powered(hdev))
7534                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7535                                        MGMT_STATUS_REJECTED);
7536
7537         if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7538                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7539                                        MGMT_STATUS_INVALID_PARAMS);
7540
7541         if (!hdev->set_bdaddr)
7542                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7543                                        MGMT_STATUS_NOT_SUPPORTED);
7544
7545         hci_dev_lock(hdev);
7546
7547         changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7548         bacpy(&hdev->public_addr, &cp->bdaddr);
7549
7550         err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7551         if (err < 0)
7552                 goto unlock;
7553
7554         if (!changed)
7555                 goto unlock;
7556
7557         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7558                 err = new_options(hdev, sk);
7559
7560         if (is_configured(hdev)) {
7561                 mgmt_index_removed(hdev);
7562
7563                 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7564
7565                 hci_dev_set_flag(hdev, HCI_CONFIG);
7566                 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7567
7568                 queue_work(hdev->req_workqueue, &hdev->power_on);
7569         }
7570
7571 unlock:
7572         hci_dev_unlock(hdev);
7573         return err;
7574 }
7575
7576 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7577                                              int err)
7578 {
7579         const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7580         struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7581         u8 *h192, *r192, *h256, *r256;
7582         struct mgmt_pending_cmd *cmd = data;
7583         struct sk_buff *skb = cmd->skb;
7584         u8 status = mgmt_status(err);
7585         u16 eir_len;
7586
7587         if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7588                 return;
7589
7590         if (!status) {
7591                 if (!skb)
7592                         status = MGMT_STATUS_FAILED;
7593                 else if (IS_ERR(skb))
7594                         status = mgmt_status(PTR_ERR(skb));
7595                 else
7596                         status = mgmt_status(skb->data[0]);
7597         }
7598
7599         bt_dev_dbg(hdev, "status %u", status);
7600
7601         mgmt_cp = cmd->param;
7602
7603         if (status) {
7604                 status = mgmt_status(status);
7605                 eir_len = 0;
7606
7607                 h192 = NULL;
7608                 r192 = NULL;
7609                 h256 = NULL;
7610                 r256 = NULL;
7611         } else if (!bredr_sc_enabled(hdev)) {
7612                 struct hci_rp_read_local_oob_data *rp;
7613
7614                 if (skb->len != sizeof(*rp)) {
7615                         status = MGMT_STATUS_FAILED;
7616                         eir_len = 0;
7617                 } else {
7618                         status = MGMT_STATUS_SUCCESS;
7619                         rp = (void *)skb->data;
7620
7621                         eir_len = 5 + 18 + 18;
7622                         h192 = rp->hash;
7623                         r192 = rp->rand;
7624                         h256 = NULL;
7625                         r256 = NULL;
7626                 }
7627         } else {
7628                 struct hci_rp_read_local_oob_ext_data *rp;
7629
7630                 if (skb->len != sizeof(*rp)) {
7631                         status = MGMT_STATUS_FAILED;
7632                         eir_len = 0;
7633                 } else {
7634                         status = MGMT_STATUS_SUCCESS;
7635                         rp = (void *)skb->data;
7636
7637                         if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7638                                 eir_len = 5 + 18 + 18;
7639                                 h192 = NULL;
7640                                 r192 = NULL;
7641                         } else {
7642                                 eir_len = 5 + 18 + 18 + 18 + 18;
7643                                 h192 = rp->hash192;
7644                                 r192 = rp->rand192;
7645                         }
7646
7647                         h256 = rp->hash256;
7648                         r256 = rp->rand256;
7649                 }
7650         }
7651
7652         mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7653         if (!mgmt_rp)
7654                 goto done;
7655
7656         if (eir_len == 0)
7657                 goto send_rsp;
7658
7659         eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7660                                   hdev->dev_class, 3);
7661
7662         if (h192 && r192) {
7663                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7664                                           EIR_SSP_HASH_C192, h192, 16);
7665                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7666                                           EIR_SSP_RAND_R192, r192, 16);
7667         }
7668
7669         if (h256 && r256) {
7670                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7671                                           EIR_SSP_HASH_C256, h256, 16);
7672                 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7673                                           EIR_SSP_RAND_R256, r256, 16);
7674         }
7675
7676 send_rsp:
7677         mgmt_rp->type = mgmt_cp->type;
7678         mgmt_rp->eir_len = cpu_to_le16(eir_len);
7679
7680         err = mgmt_cmd_complete(cmd->sk, hdev->id,
7681                                 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7682                                 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7683         if (err < 0 || status)
7684                 goto done;
7685
7686         hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7687
7688         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7689                                  mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7690                                  HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7691 done:
7692         if (skb && !IS_ERR(skb))
7693                 kfree_skb(skb);
7694
7695         kfree(mgmt_rp);
7696         mgmt_pending_remove(cmd);
7697 }
7698
7699 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7700                                   struct mgmt_cp_read_local_oob_ext_data *cp)
7701 {
7702         struct mgmt_pending_cmd *cmd;
7703         int err;
7704
7705         cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7706                                cp, sizeof(*cp));
7707         if (!cmd)
7708                 return -ENOMEM;
7709
7710         err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7711                                  read_local_oob_ext_data_complete);
7712
7713         if (err < 0) {
7714                 mgmt_pending_remove(cmd);
7715                 return err;
7716         }
7717
7718         return 0;
7719 }
7720
7721 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7722                                    void *data, u16 data_len)
7723 {
7724         struct mgmt_cp_read_local_oob_ext_data *cp = data;
7725         struct mgmt_rp_read_local_oob_ext_data *rp;
7726         size_t rp_len;
7727         u16 eir_len;
7728         u8 status, flags, role, addr[7], hash[16], rand[16];
7729         int err;
7730
7731         bt_dev_dbg(hdev, "sock %p", sk);
7732
7733         if (hdev_is_powered(hdev)) {
7734                 switch (cp->type) {
7735                 case BIT(BDADDR_BREDR):
7736                         status = mgmt_bredr_support(hdev);
7737                         if (status)
7738                                 eir_len = 0;
7739                         else
7740                                 eir_len = 5;
7741                         break;
7742                 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7743                         status = mgmt_le_support(hdev);
7744                         if (status)
7745                                 eir_len = 0;
7746                         else
7747                                 eir_len = 9 + 3 + 18 + 18 + 3;
7748                         break;
7749                 default:
7750                         status = MGMT_STATUS_INVALID_PARAMS;
7751                         eir_len = 0;
7752                         break;
7753                 }
7754         } else {
7755                 status = MGMT_STATUS_NOT_POWERED;
7756                 eir_len = 0;
7757         }
7758
7759         rp_len = sizeof(*rp) + eir_len;
7760         rp = kmalloc(rp_len, GFP_ATOMIC);
7761         if (!rp)
7762                 return -ENOMEM;
7763
7764         if (!status && !lmp_ssp_capable(hdev)) {
7765                 status = MGMT_STATUS_NOT_SUPPORTED;
7766                 eir_len = 0;
7767         }
7768
7769         if (status)
7770                 goto complete;
7771
7772         hci_dev_lock(hdev);
7773
7774         eir_len = 0;
7775         switch (cp->type) {
7776         case BIT(BDADDR_BREDR):
7777                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7778                         err = read_local_ssp_oob_req(hdev, sk, cp);
7779                         hci_dev_unlock(hdev);
7780                         if (!err)
7781                                 goto done;
7782
7783                         status = MGMT_STATUS_FAILED;
7784                         goto complete;
7785                 } else {
7786                         eir_len = eir_append_data(rp->eir, eir_len,
7787                                                   EIR_CLASS_OF_DEV,
7788                                                   hdev->dev_class, 3);
7789                 }
7790                 break;
7791         case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7792                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7793                     smp_generate_oob(hdev, hash, rand) < 0) {
7794                         hci_dev_unlock(hdev);
7795                         status = MGMT_STATUS_FAILED;
7796                         goto complete;
7797                 }
7798
7799                 /* This should return the active RPA, but since the RPA
7800                  * is only programmed on demand, it is really hard to fill
7801                  * this in at the moment. For now disallow retrieving
7802                  * local out-of-band data when privacy is in use.
7803                  *
7804                  * Returning the identity address will not help here since
7805                  * pairing happens before the identity resolving key is
7806                  * known and thus the connection establishment happens
7807                  * based on the RPA and not the identity address.
7808                  */
7809                 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7810                         hci_dev_unlock(hdev);
7811                         status = MGMT_STATUS_REJECTED;
7812                         goto complete;
7813                 }
7814
7815                 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7816                    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7817                    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7818                     bacmp(&hdev->static_addr, BDADDR_ANY))) {
7819                         memcpy(addr, &hdev->static_addr, 6);
7820                         addr[6] = 0x01;
7821                 } else {
7822                         memcpy(addr, &hdev->bdaddr, 6);
7823                         addr[6] = 0x00;
7824                 }
7825
7826                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7827                                           addr, sizeof(addr));
7828
7829                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7830                         role = 0x02;
7831                 else
7832                         role = 0x01;
7833
7834                 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7835                                           &role, sizeof(role));
7836
7837                 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7838                         eir_len = eir_append_data(rp->eir, eir_len,
7839                                                   EIR_LE_SC_CONFIRM,
7840                                                   hash, sizeof(hash));
7841
7842                         eir_len = eir_append_data(rp->eir, eir_len,
7843                                                   EIR_LE_SC_RANDOM,
7844                                                   rand, sizeof(rand));
7845                 }
7846
7847                 flags = mgmt_get_adv_discov_flags(hdev);
7848
7849                 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7850                         flags |= LE_AD_NO_BREDR;
7851
7852                 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7853                                           &flags, sizeof(flags));
7854                 break;
7855         }
7856
7857         hci_dev_unlock(hdev);
7858
7859         hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7860
7861         status = MGMT_STATUS_SUCCESS;
7862
7863 complete:
7864         rp->type = cp->type;
7865         rp->eir_len = cpu_to_le16(eir_len);
7866
7867         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7868                                 status, rp, sizeof(*rp) + eir_len);
7869         if (err < 0 || status)
7870                 goto done;
7871
7872         err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7873                                  rp, sizeof(*rp) + eir_len,
7874                                  HCI_MGMT_OOB_DATA_EVENTS, sk);
7875
7876 done:
7877         kfree(rp);
7878
7879         return err;
7880 }
7881
7882 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7883 {
7884         u32 flags = 0;
7885
7886         flags |= MGMT_ADV_FLAG_CONNECTABLE;
7887         flags |= MGMT_ADV_FLAG_DISCOV;
7888         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7889         flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7890         flags |= MGMT_ADV_FLAG_APPEARANCE;
7891         flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7892         flags |= MGMT_ADV_PARAM_DURATION;
7893         flags |= MGMT_ADV_PARAM_TIMEOUT;
7894         flags |= MGMT_ADV_PARAM_INTERVALS;
7895         flags |= MGMT_ADV_PARAM_TX_POWER;
7896         flags |= MGMT_ADV_PARAM_SCAN_RSP;
7897
7898         /* In extended adv TX_POWER returned from Set Adv Param
7899          * will be always valid.
7900          */
7901         if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7902             ext_adv_capable(hdev))
7903                 flags |= MGMT_ADV_FLAG_TX_POWER;
7904
7905         if (ext_adv_capable(hdev)) {
7906                 flags |= MGMT_ADV_FLAG_SEC_1M;
7907                 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7908                 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7909
7910                 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7911                         flags |= MGMT_ADV_FLAG_SEC_2M;
7912
7913                 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7914                         flags |= MGMT_ADV_FLAG_SEC_CODED;
7915         }
7916
7917         return flags;
7918 }
7919
7920 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7921                              void *data, u16 data_len)
7922 {
7923         struct mgmt_rp_read_adv_features *rp;
7924         size_t rp_len;
7925         int err;
7926         struct adv_info *adv_instance;
7927         u32 supported_flags;
7928         u8 *instance;
7929
7930         bt_dev_dbg(hdev, "sock %p", sk);
7931
7932         if (!lmp_le_capable(hdev))
7933                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7934                                        MGMT_STATUS_REJECTED);
7935
7936         hci_dev_lock(hdev);
7937
7938         rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7939         rp = kmalloc(rp_len, GFP_ATOMIC);
7940         if (!rp) {
7941                 hci_dev_unlock(hdev);
7942                 return -ENOMEM;
7943         }
7944
7945         supported_flags = get_supported_adv_flags(hdev);
7946
7947         rp->supported_flags = cpu_to_le32(supported_flags);
7948         rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7949         rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7950         rp->max_instances = hdev->le_num_of_adv_sets;
7951         rp->num_instances = hdev->adv_instance_cnt;
7952
7953         instance = rp->instance;
7954         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7955                 *instance = adv_instance->instance;
7956                 instance++;
7957         }
7958
7959         hci_dev_unlock(hdev);
7960
7961         err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7962                                 MGMT_STATUS_SUCCESS, rp, rp_len);
7963
7964         kfree(rp);
7965
7966         return err;
7967 }
7968
7969 static u8 calculate_name_len(struct hci_dev *hdev)
7970 {
7971         u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7972
7973         return eir_append_local_name(hdev, buf, 0);
7974 }
7975
7976 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7977                            bool is_adv_data)
7978 {
7979         u8 max_len = HCI_MAX_AD_LENGTH;
7980
7981         if (is_adv_data) {
7982                 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7983                                  MGMT_ADV_FLAG_LIMITED_DISCOV |
7984                                  MGMT_ADV_FLAG_MANAGED_FLAGS))
7985                         max_len -= 3;
7986
7987                 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7988                         max_len -= 3;
7989         } else {
7990                 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7991                         max_len -= calculate_name_len(hdev);
7992
7993                 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7994                         max_len -= 4;
7995         }
7996
7997         return max_len;
7998 }
7999
8000 static bool flags_managed(u32 adv_flags)
8001 {
8002         return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8003                             MGMT_ADV_FLAG_LIMITED_DISCOV |
8004                             MGMT_ADV_FLAG_MANAGED_FLAGS);
8005 }
8006
8007 static bool tx_power_managed(u32 adv_flags)
8008 {
8009         return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8010 }
8011
8012 static bool name_managed(u32 adv_flags)
8013 {
8014         return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8015 }
8016
8017 static bool appearance_managed(u32 adv_flags)
8018 {
8019         return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8020 }
8021
8022 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8023                               u8 len, bool is_adv_data)
8024 {
8025         int i, cur_len;
8026         u8 max_len;
8027
8028         max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8029
8030         if (len > max_len)
8031                 return false;
8032
8033         /* Make sure that the data is correctly formatted. */
8034         for (i = 0; i < len; i += (cur_len + 1)) {
8035                 cur_len = data[i];
8036
8037                 if (!cur_len)
8038                         continue;
8039
8040                 if (data[i + 1] == EIR_FLAGS &&
8041                     (!is_adv_data || flags_managed(adv_flags)))
8042                         return false;
8043
8044                 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8045                         return false;
8046
8047                 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8048                         return false;
8049
8050                 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8051                         return false;
8052
8053                 if (data[i + 1] == EIR_APPEARANCE &&
8054                     appearance_managed(adv_flags))
8055                         return false;
8056
8057                 /* If the current field length would exceed the total data
8058                  * length, then it's invalid.
8059                  */
8060                 if (i + cur_len >= len)
8061                         return false;
8062         }
8063
8064         return true;
8065 }
8066
8067 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8068 {
8069         u32 supported_flags, phy_flags;
8070
8071         /* The current implementation only supports a subset of the specified
8072          * flags. Also need to check mutual exclusiveness of sec flags.
8073          */
8074         supported_flags = get_supported_adv_flags(hdev);
8075         phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8076         if (adv_flags & ~supported_flags ||
8077             ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8078                 return false;
8079
8080         return true;
8081 }
8082
8083 static bool adv_busy(struct hci_dev *hdev)
8084 {
8085         return pending_find(MGMT_OP_SET_LE, hdev);
8086 }
8087
8088 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8089                              int err)
8090 {
8091         struct adv_info *adv, *n;
8092
8093         bt_dev_dbg(hdev, "err %d", err);
8094
8095         hci_dev_lock(hdev);
8096
8097         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8098                 u8 instance;
8099
8100                 if (!adv->pending)
8101                         continue;
8102
8103                 if (!err) {
8104                         adv->pending = false;
8105                         continue;
8106                 }
8107
8108                 instance = adv->instance;
8109
8110                 if (hdev->cur_adv_instance == instance)
8111                         cancel_adv_timeout(hdev);
8112
8113                 hci_remove_adv_instance(hdev, instance);
8114                 mgmt_advertising_removed(sk, hdev, instance);
8115         }
8116
8117         hci_dev_unlock(hdev);
8118 }
8119
8120 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8121 {
8122         struct mgmt_pending_cmd *cmd = data;
8123         struct mgmt_cp_add_advertising *cp = cmd->param;
8124         struct mgmt_rp_add_advertising rp;
8125
8126         memset(&rp, 0, sizeof(rp));
8127
8128         rp.instance = cp->instance;
8129
8130         if (err)
8131                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8132                                 mgmt_status(err));
8133         else
8134                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8135                                   mgmt_status(err), &rp, sizeof(rp));
8136
8137         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8138
8139         mgmt_pending_free(cmd);
8140 }
8141
8142 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8143 {
8144         struct mgmt_pending_cmd *cmd = data;
8145         struct mgmt_cp_add_advertising *cp = cmd->param;
8146
8147         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8148 }
8149
8150 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8151                            void *data, u16 data_len)
8152 {
8153         struct mgmt_cp_add_advertising *cp = data;
8154         struct mgmt_rp_add_advertising rp;
8155         u32 flags;
8156         u8 status;
8157         u16 timeout, duration;
8158         unsigned int prev_instance_cnt;
8159         u8 schedule_instance = 0;
8160         struct adv_info *adv, *next_instance;
8161         int err;
8162         struct mgmt_pending_cmd *cmd;
8163
8164         bt_dev_dbg(hdev, "sock %p", sk);
8165
8166         status = mgmt_le_support(hdev);
8167         if (status)
8168                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8169                                        status);
8170
8171         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8172                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8173                                        MGMT_STATUS_INVALID_PARAMS);
8174
8175         if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8176                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8177                                        MGMT_STATUS_INVALID_PARAMS);
8178
8179         flags = __le32_to_cpu(cp->flags);
8180         timeout = __le16_to_cpu(cp->timeout);
8181         duration = __le16_to_cpu(cp->duration);
8182
8183         if (!requested_adv_flags_are_valid(hdev, flags))
8184                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8185                                        MGMT_STATUS_INVALID_PARAMS);
8186
8187         hci_dev_lock(hdev);
8188
8189         if (timeout && !hdev_is_powered(hdev)) {
8190                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8191                                       MGMT_STATUS_REJECTED);
8192                 goto unlock;
8193         }
8194
8195         if (adv_busy(hdev)) {
8196                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8197                                       MGMT_STATUS_BUSY);
8198                 goto unlock;
8199         }
8200
8201         if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8202             !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8203                                cp->scan_rsp_len, false)) {
8204                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8205                                       MGMT_STATUS_INVALID_PARAMS);
8206                 goto unlock;
8207         }
8208
8209         prev_instance_cnt = hdev->adv_instance_cnt;
8210
8211         adv = hci_add_adv_instance(hdev, cp->instance, flags,
8212                                    cp->adv_data_len, cp->data,
8213                                    cp->scan_rsp_len,
8214                                    cp->data + cp->adv_data_len,
8215                                    timeout, duration,
8216                                    HCI_ADV_TX_POWER_NO_PREFERENCE,
8217                                    hdev->le_adv_min_interval,
8218                                    hdev->le_adv_max_interval);
8219         if (IS_ERR(adv)) {
8220                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8221                                       MGMT_STATUS_FAILED);
8222                 goto unlock;
8223         }
8224
8225         /* Only trigger an advertising added event if a new instance was
8226          * actually added.
8227          */
8228         if (hdev->adv_instance_cnt > prev_instance_cnt)
8229                 mgmt_advertising_added(sk, hdev, cp->instance);
8230
8231         if (hdev->cur_adv_instance == cp->instance) {
8232                 /* If the currently advertised instance is being changed then
8233                  * cancel the current advertising and schedule the next
8234                  * instance. If there is only one instance then the overridden
8235                  * advertising data will be visible right away.
8236                  */
8237                 cancel_adv_timeout(hdev);
8238
8239                 next_instance = hci_get_next_instance(hdev, cp->instance);
8240                 if (next_instance)
8241                         schedule_instance = next_instance->instance;
8242         } else if (!hdev->adv_instance_timeout) {
8243                 /* Immediately advertise the new instance if no other
8244                  * instance is currently being advertised.
8245                  */
8246                 schedule_instance = cp->instance;
8247         }
8248
8249         /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8250          * there is no instance to be advertised then we have no HCI
8251          * communication to make. Simply return.
8252          */
8253         if (!hdev_is_powered(hdev) ||
8254             hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8255             !schedule_instance) {
8256                 rp.instance = cp->instance;
8257                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8258                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8259                 goto unlock;
8260         }
8261
8262         /* We're good to go, update advertising data, parameters, and start
8263          * advertising.
8264          */
8265         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8266                                data_len);
8267         if (!cmd) {
8268                 err = -ENOMEM;
8269                 goto unlock;
8270         }
8271
8272         cp->instance = schedule_instance;
8273
8274         err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8275                                  add_advertising_complete);
8276         if (err < 0)
8277                 mgmt_pending_free(cmd);
8278
8279 unlock:
8280         hci_dev_unlock(hdev);
8281
8282         return err;
8283 }
8284
8285 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8286                                         int err)
8287 {
8288         struct mgmt_pending_cmd *cmd = data;
8289         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8290         struct mgmt_rp_add_ext_adv_params rp;
8291         struct adv_info *adv;
8292         u32 flags;
8293
8294         BT_DBG("%s", hdev->name);
8295
8296         hci_dev_lock(hdev);
8297
8298         adv = hci_find_adv_instance(hdev, cp->instance);
8299         if (!adv)
8300                 goto unlock;
8301
8302         rp.instance = cp->instance;
8303         rp.tx_power = adv->tx_power;
8304
8305         /* While we're at it, inform userspace of the available space for this
8306          * advertisement, given the flags that will be used.
8307          */
8308         flags = __le32_to_cpu(cp->flags);
8309         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8310         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8311
8312         if (err) {
8313                 /* If this advertisement was previously advertising and we
8314                  * failed to update it, we signal that it has been removed and
8315                  * delete its structure
8316                  */
8317                 if (!adv->pending)
8318                         mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8319
8320                 hci_remove_adv_instance(hdev, cp->instance);
8321
8322                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8323                                 mgmt_status(err));
8324         } else {
8325                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8326                                   mgmt_status(err), &rp, sizeof(rp));
8327         }
8328
8329 unlock:
8330         if (cmd)
8331                 mgmt_pending_free(cmd);
8332
8333         hci_dev_unlock(hdev);
8334 }
8335
8336 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8337 {
8338         struct mgmt_pending_cmd *cmd = data;
8339         struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8340
8341         return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8342 }
8343
8344 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8345                               void *data, u16 data_len)
8346 {
8347         struct mgmt_cp_add_ext_adv_params *cp = data;
8348         struct mgmt_rp_add_ext_adv_params rp;
8349         struct mgmt_pending_cmd *cmd = NULL;
8350         struct adv_info *adv;
8351         u32 flags, min_interval, max_interval;
8352         u16 timeout, duration;
8353         u8 status;
8354         s8 tx_power;
8355         int err;
8356
8357         BT_DBG("%s", hdev->name);
8358
8359         status = mgmt_le_support(hdev);
8360         if (status)
8361                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8362                                        status);
8363
8364         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8365                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8366                                        MGMT_STATUS_INVALID_PARAMS);
8367
8368         /* The purpose of breaking add_advertising into two separate MGMT calls
8369          * for params and data is to allow more parameters to be added to this
8370          * structure in the future. For this reason, we verify that we have the
8371          * bare minimum structure we know of when the interface was defined. Any
8372          * extra parameters we don't know about will be ignored in this request.
8373          */
8374         if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8375                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8376                                        MGMT_STATUS_INVALID_PARAMS);
8377
8378         flags = __le32_to_cpu(cp->flags);
8379
8380         if (!requested_adv_flags_are_valid(hdev, flags))
8381                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8382                                        MGMT_STATUS_INVALID_PARAMS);
8383
8384         hci_dev_lock(hdev);
8385
8386         /* In new interface, we require that we are powered to register */
8387         if (!hdev_is_powered(hdev)) {
8388                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8389                                       MGMT_STATUS_REJECTED);
8390                 goto unlock;
8391         }
8392
8393         if (adv_busy(hdev)) {
8394                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8395                                       MGMT_STATUS_BUSY);
8396                 goto unlock;
8397         }
8398
8399         /* Parse defined parameters from request, use defaults otherwise */
8400         timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8401                   __le16_to_cpu(cp->timeout) : 0;
8402
8403         duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8404                    __le16_to_cpu(cp->duration) :
8405                    hdev->def_multi_adv_rotation_duration;
8406
8407         min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8408                        __le32_to_cpu(cp->min_interval) :
8409                        hdev->le_adv_min_interval;
8410
8411         max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8412                        __le32_to_cpu(cp->max_interval) :
8413                        hdev->le_adv_max_interval;
8414
8415         tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8416                    cp->tx_power :
8417                    HCI_ADV_TX_POWER_NO_PREFERENCE;
8418
8419         /* Create advertising instance with no advertising or response data */
8420         adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8421                                    timeout, duration, tx_power, min_interval,
8422                                    max_interval);
8423
8424         if (IS_ERR(adv)) {
8425                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8426                                       MGMT_STATUS_FAILED);
8427                 goto unlock;
8428         }
8429
8430         /* Submit request for advertising params if ext adv available */
8431         if (ext_adv_capable(hdev)) {
8432                 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8433                                        data, data_len);
8434                 if (!cmd) {
8435                         err = -ENOMEM;
8436                         hci_remove_adv_instance(hdev, cp->instance);
8437                         goto unlock;
8438                 }
8439
8440                 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8441                                          add_ext_adv_params_complete);
8442                 if (err < 0)
8443                         mgmt_pending_free(cmd);
8444         } else {
8445                 rp.instance = cp->instance;
8446                 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8447                 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8448                 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8449                 err = mgmt_cmd_complete(sk, hdev->id,
8450                                         MGMT_OP_ADD_EXT_ADV_PARAMS,
8451                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8452         }
8453
8454 unlock:
8455         hci_dev_unlock(hdev);
8456
8457         return err;
8458 }
8459
8460 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8461 {
8462         struct mgmt_pending_cmd *cmd = data;
8463         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8464         struct mgmt_rp_add_advertising rp;
8465
8466         add_adv_complete(hdev, cmd->sk, cp->instance, err);
8467
8468         memset(&rp, 0, sizeof(rp));
8469
8470         rp.instance = cp->instance;
8471
8472         if (err)
8473                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8474                                 mgmt_status(err));
8475         else
8476                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8477                                   mgmt_status(err), &rp, sizeof(rp));
8478
8479         mgmt_pending_free(cmd);
8480 }
8481
8482 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8483 {
8484         struct mgmt_pending_cmd *cmd = data;
8485         struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8486         int err;
8487
8488         if (ext_adv_capable(hdev)) {
8489                 err = hci_update_adv_data_sync(hdev, cp->instance);
8490                 if (err)
8491                         return err;
8492
8493                 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8494                 if (err)
8495                         return err;
8496
8497                 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8498         }
8499
8500         return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8501 }
8502
8503 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8504                             u16 data_len)
8505 {
8506         struct mgmt_cp_add_ext_adv_data *cp = data;
8507         struct mgmt_rp_add_ext_adv_data rp;
8508         u8 schedule_instance = 0;
8509         struct adv_info *next_instance;
8510         struct adv_info *adv_instance;
8511         int err = 0;
8512         struct mgmt_pending_cmd *cmd;
8513
8514         BT_DBG("%s", hdev->name);
8515
8516         hci_dev_lock(hdev);
8517
8518         adv_instance = hci_find_adv_instance(hdev, cp->instance);
8519
8520         if (!adv_instance) {
8521                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8522                                       MGMT_STATUS_INVALID_PARAMS);
8523                 goto unlock;
8524         }
8525
8526         /* In new interface, we require that we are powered to register */
8527         if (!hdev_is_powered(hdev)) {
8528                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8529                                       MGMT_STATUS_REJECTED);
8530                 goto clear_new_instance;
8531         }
8532
8533         if (adv_busy(hdev)) {
8534                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8535                                       MGMT_STATUS_BUSY);
8536                 goto clear_new_instance;
8537         }
8538
8539         /* Validate new data */
8540         if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8541                                cp->adv_data_len, true) ||
8542             !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8543                                cp->adv_data_len, cp->scan_rsp_len, false)) {
8544                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8545                                       MGMT_STATUS_INVALID_PARAMS);
8546                 goto clear_new_instance;
8547         }
8548
8549         /* Set the data in the advertising instance */
8550         hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8551                                   cp->data, cp->scan_rsp_len,
8552                                   cp->data + cp->adv_data_len);
8553
8554         /* If using software rotation, determine next instance to use */
8555         if (hdev->cur_adv_instance == cp->instance) {
8556                 /* If the currently advertised instance is being changed
8557                  * then cancel the current advertising and schedule the
8558                  * next instance. If there is only one instance then the
8559                  * overridden advertising data will be visible right
8560                  * away
8561                  */
8562                 cancel_adv_timeout(hdev);
8563
8564                 next_instance = hci_get_next_instance(hdev, cp->instance);
8565                 if (next_instance)
8566                         schedule_instance = next_instance->instance;
8567         } else if (!hdev->adv_instance_timeout) {
8568                 /* Immediately advertise the new instance if no other
8569                  * instance is currently being advertised.
8570                  */
8571                 schedule_instance = cp->instance;
8572         }
8573
8574         /* If the HCI_ADVERTISING flag is set or there is no instance to
8575          * be advertised then we have no HCI communication to make.
8576          * Simply return.
8577          */
8578         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8579                 if (adv_instance->pending) {
8580                         mgmt_advertising_added(sk, hdev, cp->instance);
8581                         adv_instance->pending = false;
8582                 }
8583                 rp.instance = cp->instance;
8584                 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8585                                         MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8586                 goto unlock;
8587         }
8588
8589         cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8590                                data_len);
8591         if (!cmd) {
8592                 err = -ENOMEM;
8593                 goto clear_new_instance;
8594         }
8595
8596         err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8597                                  add_ext_adv_data_complete);
8598         if (err < 0) {
8599                 mgmt_pending_free(cmd);
8600                 goto clear_new_instance;
8601         }
8602
8603         /* We were successful in updating data, so trigger advertising_added
8604          * event if this is an instance that wasn't previously advertising. If
8605          * a failure occurs in the requests we initiated, we will remove the
8606          * instance again in add_advertising_complete
8607          */
8608         if (adv_instance->pending)
8609                 mgmt_advertising_added(sk, hdev, cp->instance);
8610
8611         goto unlock;
8612
8613 clear_new_instance:
8614         hci_remove_adv_instance(hdev, cp->instance);
8615
8616 unlock:
8617         hci_dev_unlock(hdev);
8618
8619         return err;
8620 }
8621
8622 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8623                                         int err)
8624 {
8625         struct mgmt_pending_cmd *cmd = data;
8626         struct mgmt_cp_remove_advertising *cp = cmd->param;
8627         struct mgmt_rp_remove_advertising rp;
8628
8629         bt_dev_dbg(hdev, "err %d", err);
8630
8631         memset(&rp, 0, sizeof(rp));
8632         rp.instance = cp->instance;
8633
8634         if (err)
8635                 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8636                                 mgmt_status(err));
8637         else
8638                 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8639                                   MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8640
8641         mgmt_pending_free(cmd);
8642 }
8643
8644 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8645 {
8646         struct mgmt_pending_cmd *cmd = data;
8647         struct mgmt_cp_remove_advertising *cp = cmd->param;
8648         int err;
8649
8650         err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8651         if (err)
8652                 return err;
8653
8654         if (list_empty(&hdev->adv_instances))
8655                 err = hci_disable_advertising_sync(hdev);
8656
8657         return err;
8658 }
8659
8660 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8661                               void *data, u16 data_len)
8662 {
8663         struct mgmt_cp_remove_advertising *cp = data;
8664         struct mgmt_pending_cmd *cmd;
8665         int err;
8666
8667         bt_dev_dbg(hdev, "sock %p", sk);
8668
8669         hci_dev_lock(hdev);
8670
8671         if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8672                 err = mgmt_cmd_status(sk, hdev->id,
8673                                       MGMT_OP_REMOVE_ADVERTISING,
8674                                       MGMT_STATUS_INVALID_PARAMS);
8675                 goto unlock;
8676         }
8677
8678         if (pending_find(MGMT_OP_SET_LE, hdev)) {
8679                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8680                                       MGMT_STATUS_BUSY);
8681                 goto unlock;
8682         }
8683
8684         if (list_empty(&hdev->adv_instances)) {
8685                 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8686                                       MGMT_STATUS_INVALID_PARAMS);
8687                 goto unlock;
8688         }
8689
8690         cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8691                                data_len);
8692         if (!cmd) {
8693                 err = -ENOMEM;
8694                 goto unlock;
8695         }
8696
8697         err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8698                                  remove_advertising_complete);
8699         if (err < 0)
8700                 mgmt_pending_free(cmd);
8701
8702 unlock:
8703         hci_dev_unlock(hdev);
8704
8705         return err;
8706 }
8707
8708 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8709                              void *data, u16 data_len)
8710 {
8711         struct mgmt_cp_get_adv_size_info *cp = data;
8712         struct mgmt_rp_get_adv_size_info rp;
8713         u32 flags, supported_flags;
8714
8715         bt_dev_dbg(hdev, "sock %p", sk);
8716
8717         if (!lmp_le_capable(hdev))
8718                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8719                                        MGMT_STATUS_REJECTED);
8720
8721         if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8722                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8723                                        MGMT_STATUS_INVALID_PARAMS);
8724
8725         flags = __le32_to_cpu(cp->flags);
8726
8727         /* The current implementation only supports a subset of the specified
8728          * flags.
8729          */
8730         supported_flags = get_supported_adv_flags(hdev);
8731         if (flags & ~supported_flags)
8732                 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8733                                        MGMT_STATUS_INVALID_PARAMS);
8734
8735         rp.instance = cp->instance;
8736         rp.flags = cp->flags;
8737         rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8738         rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8739
8740         return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8741                                  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8742 }
8743
8744 static const struct hci_mgmt_handler mgmt_handlers[] = {
8745         { NULL }, /* 0x0000 (no command) */
8746         { read_version,            MGMT_READ_VERSION_SIZE,
8747                                                 HCI_MGMT_NO_HDEV |
8748                                                 HCI_MGMT_UNTRUSTED },
8749         { read_commands,           MGMT_READ_COMMANDS_SIZE,
8750                                                 HCI_MGMT_NO_HDEV |
8751                                                 HCI_MGMT_UNTRUSTED },
8752         { read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8753                                                 HCI_MGMT_NO_HDEV |
8754                                                 HCI_MGMT_UNTRUSTED },
8755         { read_controller_info,    MGMT_READ_INFO_SIZE,
8756                                                 HCI_MGMT_UNTRUSTED },
8757         { set_powered,             MGMT_SETTING_SIZE },
8758         { set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8759         { set_connectable,         MGMT_SETTING_SIZE },
8760         { set_fast_connectable,    MGMT_SETTING_SIZE },
8761         { set_bondable,            MGMT_SETTING_SIZE },
8762         { set_link_security,       MGMT_SETTING_SIZE },
8763         { set_ssp,                 MGMT_SETTING_SIZE },
8764         { set_hs,                  MGMT_SETTING_SIZE },
8765         { set_le,                  MGMT_SETTING_SIZE },
8766         { set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8767         { set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8768         { add_uuid,                MGMT_ADD_UUID_SIZE },
8769         { remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8770         { load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8771                                                 HCI_MGMT_VAR_LEN },
8772         { load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8773                                                 HCI_MGMT_VAR_LEN },
8774         { disconnect,              MGMT_DISCONNECT_SIZE },
8775         { get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8776         { pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8777         { pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8778         { set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8779         { pair_device,             MGMT_PAIR_DEVICE_SIZE },
8780         { cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8781         { unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8782         { user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8783         { user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8784         { user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8785         { user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8786         { read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8787         { add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8788                                                 HCI_MGMT_VAR_LEN },
8789         { remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8790         { start_discovery,         MGMT_START_DISCOVERY_SIZE },
8791         { stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8792         { confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8793         { block_device,            MGMT_BLOCK_DEVICE_SIZE },
8794         { unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8795         { set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8796         { set_advertising,         MGMT_SETTING_SIZE },
8797         { set_bredr,               MGMT_SETTING_SIZE },
8798         { set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8799         { set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8800         { set_secure_conn,         MGMT_SETTING_SIZE },
8801         { set_debug_keys,          MGMT_SETTING_SIZE },
8802         { set_privacy,             MGMT_SET_PRIVACY_SIZE },
8803         { load_irks,               MGMT_LOAD_IRKS_SIZE,
8804                                                 HCI_MGMT_VAR_LEN },
8805         { get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8806         { get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8807         { add_device,              MGMT_ADD_DEVICE_SIZE },
8808         { remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8809         { load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8810                                                 HCI_MGMT_VAR_LEN },
8811         { read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8812                                                 HCI_MGMT_NO_HDEV |
8813                                                 HCI_MGMT_UNTRUSTED },
8814         { read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8815                                                 HCI_MGMT_UNCONFIGURED |
8816                                                 HCI_MGMT_UNTRUSTED },
8817         { set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8818                                                 HCI_MGMT_UNCONFIGURED },
8819         { set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8820                                                 HCI_MGMT_UNCONFIGURED },
8821         { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8822                                                 HCI_MGMT_VAR_LEN },
8823         { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8824         { read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8825                                                 HCI_MGMT_NO_HDEV |
8826                                                 HCI_MGMT_UNTRUSTED },
8827         { read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8828         { add_advertising,         MGMT_ADD_ADVERTISING_SIZE,
8829                                                 HCI_MGMT_VAR_LEN },
8830         { remove_advertising,      MGMT_REMOVE_ADVERTISING_SIZE },
8831         { get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8832         { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8833         { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8834                                                 HCI_MGMT_UNTRUSTED },
8835         { set_appearance,          MGMT_SET_APPEARANCE_SIZE },
8836         { get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8837         { set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8838         { set_blocked_keys,        MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8839                                                 HCI_MGMT_VAR_LEN },
8840         { set_wideband_speech,     MGMT_SETTING_SIZE },
8841         { read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8842                                                 HCI_MGMT_UNTRUSTED },
8843         { read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8844                                                 HCI_MGMT_UNTRUSTED |
8845                                                 HCI_MGMT_HDEV_OPTIONAL },
8846         { set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8847                                                 HCI_MGMT_VAR_LEN |
8848                                                 HCI_MGMT_HDEV_OPTIONAL },
8849         { read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8850                                                 HCI_MGMT_UNTRUSTED },
8851         { set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8852                                                 HCI_MGMT_VAR_LEN },
8853         { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8854                                                 HCI_MGMT_UNTRUSTED },
8855         { set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8856                                                 HCI_MGMT_VAR_LEN },
8857         { get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8858         { set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8859         { read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8860         { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8861                                                 HCI_MGMT_VAR_LEN },
8862         { remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8863         { add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8864                                                 HCI_MGMT_VAR_LEN },
8865         { add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8866                                                 HCI_MGMT_VAR_LEN },
8867         { add_adv_patterns_monitor_rssi,
8868                                    MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8869                                                 HCI_MGMT_VAR_LEN },
8870 };
8871
8872 void mgmt_index_added(struct hci_dev *hdev)
8873 {
8874         struct mgmt_ev_ext_index ev;
8875
8876         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8877                 return;
8878
8879         switch (hdev->dev_type) {
8880         case HCI_PRIMARY:
8881                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8882                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8883                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8884                         ev.type = 0x01;
8885                 } else {
8886                         mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8887                                          HCI_MGMT_INDEX_EVENTS);
8888                         ev.type = 0x00;
8889                 }
8890                 break;
8891         case HCI_AMP:
8892                 ev.type = 0x02;
8893                 break;
8894         default:
8895                 return;
8896         }
8897
8898         ev.bus = hdev->bus;
8899
8900         mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8901                          HCI_MGMT_EXT_INDEX_EVENTS);
8902 }
8903
8904 void mgmt_index_removed(struct hci_dev *hdev)
8905 {
8906         struct mgmt_ev_ext_index ev;
8907         u8 status = MGMT_STATUS_INVALID_INDEX;
8908
8909         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8910                 return;
8911
8912         switch (hdev->dev_type) {
8913         case HCI_PRIMARY:
8914                 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8915
8916                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8917                         mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8918                                          NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8919                         ev.type = 0x01;
8920                 } else {
8921                         mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8922                                          HCI_MGMT_INDEX_EVENTS);
8923                         ev.type = 0x00;
8924                 }
8925                 break;
8926         case HCI_AMP:
8927                 ev.type = 0x02;
8928                 break;
8929         default:
8930                 return;
8931         }
8932
8933         ev.bus = hdev->bus;
8934
8935         mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8936                          HCI_MGMT_EXT_INDEX_EVENTS);
8937
8938         /* Cancel any remaining timed work */
8939         cancel_delayed_work_sync(&hdev->discov_off);
8940         cancel_delayed_work_sync(&hdev->service_cache);
8941         cancel_delayed_work_sync(&hdev->rpa_expired);
8942 }
8943
8944 void mgmt_power_on(struct hci_dev *hdev, int err)
8945 {
8946         struct cmd_lookup match = { NULL, hdev };
8947
8948         bt_dev_dbg(hdev, "err %d", err);
8949
8950         hci_dev_lock(hdev);
8951
8952         if (!err) {
8953                 restart_le_actions(hdev);
8954                 hci_update_passive_scan(hdev);
8955         }
8956
8957         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8958
8959         new_settings(hdev, match.sk);
8960
8961         if (match.sk)
8962                 sock_put(match.sk);
8963
8964         hci_dev_unlock(hdev);
8965 }
8966
8967 void __mgmt_power_off(struct hci_dev *hdev)
8968 {
8969         struct cmd_lookup match = { NULL, hdev };
8970         u8 status, zero_cod[] = { 0, 0, 0 };
8971
8972         mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8973
8974         /* If the power off is because of hdev unregistration let
8975          * use the appropriate INVALID_INDEX status. Otherwise use
8976          * NOT_POWERED. We cover both scenarios here since later in
8977          * mgmt_index_removed() any hci_conn callbacks will have already
8978          * been triggered, potentially causing misleading DISCONNECTED
8979          * status responses.
8980          */
8981         if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8982                 status = MGMT_STATUS_INVALID_INDEX;
8983         else
8984                 status = MGMT_STATUS_NOT_POWERED;
8985
8986         mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8987
8988         if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8989                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8990                                    zero_cod, sizeof(zero_cod),
8991                                    HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8992                 ext_info_changed(hdev, NULL);
8993         }
8994
8995         new_settings(hdev, match.sk);
8996
8997         if (match.sk)
8998                 sock_put(match.sk);
8999 }
9000
9001 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9002 {
9003         struct mgmt_pending_cmd *cmd;
9004         u8 status;
9005
9006         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9007         if (!cmd)
9008                 return;
9009
9010         if (err == -ERFKILL)
9011                 status = MGMT_STATUS_RFKILLED;
9012         else
9013                 status = MGMT_STATUS_FAILED;
9014
9015         mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9016
9017         mgmt_pending_remove(cmd);
9018 }
9019
9020 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9021                        bool persistent)
9022 {
9023         struct mgmt_ev_new_link_key ev;
9024
9025         memset(&ev, 0, sizeof(ev));
9026
9027         ev.store_hint = persistent;
9028         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9029         ev.key.addr.type = BDADDR_BREDR;
9030         ev.key.type = key->type;
9031         memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9032         ev.key.pin_len = key->pin_len;
9033
9034         mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9035 }
9036
9037 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9038 {
9039         switch (ltk->type) {
9040         case SMP_LTK:
9041         case SMP_LTK_RESPONDER:
9042                 if (ltk->authenticated)
9043                         return MGMT_LTK_AUTHENTICATED;
9044                 return MGMT_LTK_UNAUTHENTICATED;
9045         case SMP_LTK_P256:
9046                 if (ltk->authenticated)
9047                         return MGMT_LTK_P256_AUTH;
9048                 return MGMT_LTK_P256_UNAUTH;
9049         case SMP_LTK_P256_DEBUG:
9050                 return MGMT_LTK_P256_DEBUG;
9051         }
9052
9053         return MGMT_LTK_UNAUTHENTICATED;
9054 }
9055
9056 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9057 {
9058         struct mgmt_ev_new_long_term_key ev;
9059
9060         memset(&ev, 0, sizeof(ev));
9061
9062         /* Devices using resolvable or non-resolvable random addresses
9063          * without providing an identity resolving key don't require
9064          * to store long term keys. Their addresses will change the
9065          * next time around.
9066          *
9067          * Only when a remote device provides an identity address
9068          * make sure the long term key is stored. If the remote
9069          * identity is known, the long term keys are internally
9070          * mapped to the identity address. So allow static random
9071          * and public addresses here.
9072          */
9073         if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9074             (key->bdaddr.b[5] & 0xc0) != 0xc0)
9075                 ev.store_hint = 0x00;
9076         else
9077                 ev.store_hint = persistent;
9078
9079         bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9080         ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9081         ev.key.type = mgmt_ltk_type(key);
9082         ev.key.enc_size = key->enc_size;
9083         ev.key.ediv = key->ediv;
9084         ev.key.rand = key->rand;
9085
9086         if (key->type == SMP_LTK)
9087                 ev.key.initiator = 1;
9088
9089         /* Make sure we copy only the significant bytes based on the
9090          * encryption key size, and set the rest of the value to zeroes.
9091          */
9092         memcpy(ev.key.val, key->val, key->enc_size);
9093         memset(ev.key.val + key->enc_size, 0,
9094                sizeof(ev.key.val) - key->enc_size);
9095
9096         mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9097 }
9098
9099 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9100 {
9101         struct mgmt_ev_new_irk ev;
9102
9103         memset(&ev, 0, sizeof(ev));
9104
9105         ev.store_hint = persistent;
9106
9107         bacpy(&ev.rpa, &irk->rpa);
9108         bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9109         ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9110         memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9111
9112         mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9113 }
9114
9115 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9116                    bool persistent)
9117 {
9118         struct mgmt_ev_new_csrk ev;
9119
9120         memset(&ev, 0, sizeof(ev));
9121
9122         /* Devices using resolvable or non-resolvable random addresses
9123          * without providing an identity resolving key don't require
9124          * to store signature resolving keys. Their addresses will change
9125          * the next time around.
9126          *
9127          * Only when a remote device provides an identity address
9128          * make sure the signature resolving key is stored. So allow
9129          * static random and public addresses here.
9130          */
9131         if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9132             (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9133                 ev.store_hint = 0x00;
9134         else
9135                 ev.store_hint = persistent;
9136
9137         bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9138         ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9139         ev.key.type = csrk->type;
9140         memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9141
9142         mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9143 }
9144
9145 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9146                          u8 bdaddr_type, u8 store_hint, u16 min_interval,
9147                          u16 max_interval, u16 latency, u16 timeout)
9148 {
9149         struct mgmt_ev_new_conn_param ev;
9150
9151         if (!hci_is_identity_address(bdaddr, bdaddr_type))
9152                 return;
9153
9154         memset(&ev, 0, sizeof(ev));
9155         bacpy(&ev.addr.bdaddr, bdaddr);
9156         ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9157         ev.store_hint = store_hint;
9158         ev.min_interval = cpu_to_le16(min_interval);
9159         ev.max_interval = cpu_to_le16(max_interval);
9160         ev.latency = cpu_to_le16(latency);
9161         ev.timeout = cpu_to_le16(timeout);
9162
9163         mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9164 }
9165
9166 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9167                            u8 *name, u8 name_len)
9168 {
9169         struct sk_buff *skb;
9170         struct mgmt_ev_device_connected *ev;
9171         u16 eir_len = 0;
9172         u32 flags = 0;
9173
9174         /* allocate buff for LE or BR/EDR adv */
9175         if (conn->le_adv_data_len > 0)
9176                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9177                                      sizeof(*ev) + conn->le_adv_data_len);
9178         else
9179                 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9180                                      sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9181                                      eir_precalc_len(sizeof(conn->dev_class)));
9182
9183         ev = skb_put(skb, sizeof(*ev));
9184         bacpy(&ev->addr.bdaddr, &conn->dst);
9185         ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9186
9187         if (conn->out)
9188                 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9189
9190         ev->flags = __cpu_to_le32(flags);
9191
9192         /* We must ensure that the EIR Data fields are ordered and
9193          * unique. Keep it simple for now and avoid the problem by not
9194          * adding any BR/EDR data to the LE adv.
9195          */
9196         if (conn->le_adv_data_len > 0) {
9197                 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9198                 eir_len = conn->le_adv_data_len;
9199         } else {
9200                 if (name)
9201                         eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9202
9203                 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9204                         eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9205                                                     conn->dev_class, sizeof(conn->dev_class));
9206         }
9207
9208         ev->eir_len = cpu_to_le16(eir_len);
9209
9210         mgmt_event_skb(skb, NULL);
9211 }
9212
9213 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9214 {
9215         struct sock **sk = data;
9216
9217         cmd->cmd_complete(cmd, 0);
9218
9219         *sk = cmd->sk;
9220         sock_hold(*sk);
9221
9222         mgmt_pending_remove(cmd);
9223 }
9224
9225 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9226 {
9227         struct hci_dev *hdev = data;
9228         struct mgmt_cp_unpair_device *cp = cmd->param;
9229
9230         device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9231
9232         cmd->cmd_complete(cmd, 0);
9233         mgmt_pending_remove(cmd);
9234 }
9235
9236 bool mgmt_powering_down(struct hci_dev *hdev)
9237 {
9238         struct mgmt_pending_cmd *cmd;
9239         struct mgmt_mode *cp;
9240
9241         cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9242         if (!cmd)
9243                 return false;
9244
9245         cp = cmd->param;
9246         if (!cp->val)
9247                 return true;
9248
9249         return false;
9250 }
9251
9252 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9253                               u8 link_type, u8 addr_type, u8 reason,
9254                               bool mgmt_connected)
9255 {
9256         struct mgmt_ev_device_disconnected ev;
9257         struct sock *sk = NULL;
9258
9259         /* The connection is still in hci_conn_hash so test for 1
9260          * instead of 0 to know if this is the last one.
9261          */
9262         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9263                 cancel_delayed_work(&hdev->power_off);
9264                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9265         }
9266
9267         if (!mgmt_connected)
9268                 return;
9269
9270         if (link_type != ACL_LINK && link_type != LE_LINK)
9271                 return;
9272
9273         mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9274
9275         bacpy(&ev.addr.bdaddr, bdaddr);
9276         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9277         ev.reason = reason;
9278
9279         /* Report disconnects due to suspend */
9280         if (hdev->suspended)
9281                 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9282
9283         mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9284
9285         if (sk)
9286                 sock_put(sk);
9287
9288         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9289                              hdev);
9290 }
9291
9292 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9293                             u8 link_type, u8 addr_type, u8 status)
9294 {
9295         u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9296         struct mgmt_cp_disconnect *cp;
9297         struct mgmt_pending_cmd *cmd;
9298
9299         mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9300                              hdev);
9301
9302         cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9303         if (!cmd)
9304                 return;
9305
9306         cp = cmd->param;
9307
9308         if (bacmp(bdaddr, &cp->addr.bdaddr))
9309                 return;
9310
9311         if (cp->addr.type != bdaddr_type)
9312                 return;
9313
9314         cmd->cmd_complete(cmd, mgmt_status(status));
9315         mgmt_pending_remove(cmd);
9316 }
9317
9318 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9319                          u8 addr_type, u8 status)
9320 {
9321         struct mgmt_ev_connect_failed ev;
9322
9323         /* The connection is still in hci_conn_hash so test for 1
9324          * instead of 0 to know if this is the last one.
9325          */
9326         if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9327                 cancel_delayed_work(&hdev->power_off);
9328                 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9329         }
9330
9331         bacpy(&ev.addr.bdaddr, bdaddr);
9332         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9333         ev.status = mgmt_status(status);
9334
9335         mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9336 }
9337
9338 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9339 {
9340         struct mgmt_ev_pin_code_request ev;
9341
9342         bacpy(&ev.addr.bdaddr, bdaddr);
9343         ev.addr.type = BDADDR_BREDR;
9344         ev.secure = secure;
9345
9346         mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9347 }
9348
9349 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9350                                   u8 status)
9351 {
9352         struct mgmt_pending_cmd *cmd;
9353
9354         cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9355         if (!cmd)
9356                 return;
9357
9358         cmd->cmd_complete(cmd, mgmt_status(status));
9359         mgmt_pending_remove(cmd);
9360 }
9361
9362 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9363                                       u8 status)
9364 {
9365         struct mgmt_pending_cmd *cmd;
9366
9367         cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9368         if (!cmd)
9369                 return;
9370
9371         cmd->cmd_complete(cmd, mgmt_status(status));
9372         mgmt_pending_remove(cmd);
9373 }
9374
9375 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9376                               u8 link_type, u8 addr_type, u32 value,
9377                               u8 confirm_hint)
9378 {
9379         struct mgmt_ev_user_confirm_request ev;
9380
9381         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9382
9383         bacpy(&ev.addr.bdaddr, bdaddr);
9384         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9385         ev.confirm_hint = confirm_hint;
9386         ev.value = cpu_to_le32(value);
9387
9388         return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9389                           NULL);
9390 }
9391
9392 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9393                               u8 link_type, u8 addr_type)
9394 {
9395         struct mgmt_ev_user_passkey_request ev;
9396
9397         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9398
9399         bacpy(&ev.addr.bdaddr, bdaddr);
9400         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9401
9402         return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9403                           NULL);
9404 }
9405
9406 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9407                                       u8 link_type, u8 addr_type, u8 status,
9408                                       u8 opcode)
9409 {
9410         struct mgmt_pending_cmd *cmd;
9411
9412         cmd = pending_find(opcode, hdev);
9413         if (!cmd)
9414                 return -ENOENT;
9415
9416         cmd->cmd_complete(cmd, mgmt_status(status));
9417         mgmt_pending_remove(cmd);
9418
9419         return 0;
9420 }
9421
9422 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9423                                      u8 link_type, u8 addr_type, u8 status)
9424 {
9425         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9426                                           status, MGMT_OP_USER_CONFIRM_REPLY);
9427 }
9428
9429 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9430                                          u8 link_type, u8 addr_type, u8 status)
9431 {
9432         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9433                                           status,
9434                                           MGMT_OP_USER_CONFIRM_NEG_REPLY);
9435 }
9436
9437 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9438                                      u8 link_type, u8 addr_type, u8 status)
9439 {
9440         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9441                                           status, MGMT_OP_USER_PASSKEY_REPLY);
9442 }
9443
9444 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9445                                          u8 link_type, u8 addr_type, u8 status)
9446 {
9447         return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9448                                           status,
9449                                           MGMT_OP_USER_PASSKEY_NEG_REPLY);
9450 }
9451
9452 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9453                              u8 link_type, u8 addr_type, u32 passkey,
9454                              u8 entered)
9455 {
9456         struct mgmt_ev_passkey_notify ev;
9457
9458         bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9459
9460         bacpy(&ev.addr.bdaddr, bdaddr);
9461         ev.addr.type = link_to_bdaddr(link_type, addr_type);
9462         ev.passkey = __cpu_to_le32(passkey);
9463         ev.entered = entered;
9464
9465         return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9466 }
9467
9468 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9469 {
9470         struct mgmt_ev_auth_failed ev;
9471         struct mgmt_pending_cmd *cmd;
9472         u8 status = mgmt_status(hci_status);
9473
9474         bacpy(&ev.addr.bdaddr, &conn->dst);
9475         ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9476         ev.status = status;
9477
9478         cmd = find_pairing(conn);
9479
9480         mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9481                     cmd ? cmd->sk : NULL);
9482
9483         if (cmd) {
9484                 cmd->cmd_complete(cmd, status);
9485                 mgmt_pending_remove(cmd);
9486         }
9487 }
9488
9489 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9490 {
9491         struct cmd_lookup match = { NULL, hdev };
9492         bool changed;
9493
9494         if (status) {
9495                 u8 mgmt_err = mgmt_status(status);
9496                 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9497                                      cmd_status_rsp, &mgmt_err);
9498                 return;
9499         }
9500
9501         if (test_bit(HCI_AUTH, &hdev->flags))
9502                 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9503         else
9504                 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9505
9506         mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9507                              &match);
9508
9509         if (changed)
9510                 new_settings(hdev, match.sk);
9511
9512         if (match.sk)
9513                 sock_put(match.sk);
9514 }
9515
9516 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9517 {
9518         struct cmd_lookup *match = data;
9519
9520         if (match->sk == NULL) {
9521                 match->sk = cmd->sk;
9522                 sock_hold(match->sk);
9523         }
9524 }
9525
9526 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9527                                     u8 status)
9528 {
9529         struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9530
9531         mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9532         mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9533         mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9534
9535         if (!status) {
9536                 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9537                                    3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9538                 ext_info_changed(hdev, NULL);
9539         }
9540
9541         if (match.sk)
9542                 sock_put(match.sk);
9543 }
9544
9545 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9546 {
9547         struct mgmt_cp_set_local_name ev;
9548         struct mgmt_pending_cmd *cmd;
9549
9550         if (status)
9551                 return;
9552
9553         memset(&ev, 0, sizeof(ev));
9554         memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9555         memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9556
9557         cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9558         if (!cmd) {
9559                 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9560
9561                 /* If this is a HCI command related to powering on the
9562                  * HCI dev don't send any mgmt signals.
9563                  */
9564                 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9565                         return;
9566         }
9567
9568         mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9569                            HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9570         ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9571 }
9572
9573 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9574 {
9575         int i;
9576
9577         for (i = 0; i < uuid_count; i++) {
9578                 if (!memcmp(uuid, uuids[i], 16))
9579                         return true;
9580         }
9581
9582         return false;
9583 }
9584
9585 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9586 {
9587         u16 parsed = 0;
9588
9589         while (parsed < eir_len) {
9590                 u8 field_len = eir[0];
9591                 u8 uuid[16];
9592                 int i;
9593
9594                 if (field_len == 0)
9595                         break;
9596
9597                 if (eir_len - parsed < field_len + 1)
9598                         break;
9599
9600                 switch (eir[1]) {
9601                 case EIR_UUID16_ALL:
9602                 case EIR_UUID16_SOME:
9603                         for (i = 0; i + 3 <= field_len; i += 2) {
9604                                 memcpy(uuid, bluetooth_base_uuid, 16);
9605                                 uuid[13] = eir[i + 3];
9606                                 uuid[12] = eir[i + 2];
9607                                 if (has_uuid(uuid, uuid_count, uuids))
9608                                         return true;
9609                         }
9610                         break;
9611                 case EIR_UUID32_ALL:
9612                 case EIR_UUID32_SOME:
9613                         for (i = 0; i + 5 <= field_len; i += 4) {
9614                                 memcpy(uuid, bluetooth_base_uuid, 16);
9615                                 uuid[15] = eir[i + 5];
9616                                 uuid[14] = eir[i + 4];
9617                                 uuid[13] = eir[i + 3];
9618                                 uuid[12] = eir[i + 2];
9619                                 if (has_uuid(uuid, uuid_count, uuids))
9620                                         return true;
9621                         }
9622                         break;
9623                 case EIR_UUID128_ALL:
9624                 case EIR_UUID128_SOME:
9625                         for (i = 0; i + 17 <= field_len; i += 16) {
9626                                 memcpy(uuid, eir + i + 2, 16);
9627                                 if (has_uuid(uuid, uuid_count, uuids))
9628                                         return true;
9629                         }
9630                         break;
9631                 }
9632
9633                 parsed += field_len + 1;
9634                 eir += field_len + 1;
9635         }
9636
9637         return false;
9638 }
9639
9640 static void restart_le_scan(struct hci_dev *hdev)
9641 {
9642         /* If controller is not scanning we are done. */
9643         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9644                 return;
9645
9646         if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9647                        hdev->discovery.scan_start +
9648                        hdev->discovery.scan_duration))
9649                 return;
9650
9651         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9652                            DISCOV_LE_RESTART_DELAY);
9653 }
9654
9655 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9656                             u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9657 {
9658         /* If a RSSI threshold has been specified, and
9659          * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9660          * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9661          * is set, let it through for further processing, as we might need to
9662          * restart the scan.
9663          *
9664          * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9665          * the results are also dropped.
9666          */
9667         if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9668             (rssi == HCI_RSSI_INVALID ||
9669             (rssi < hdev->discovery.rssi &&
9670              !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9671                 return  false;
9672
9673         if (hdev->discovery.uuid_count != 0) {
9674                 /* If a list of UUIDs is provided in filter, results with no
9675                  * matching UUID should be dropped.
9676                  */
9677                 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9678                                    hdev->discovery.uuids) &&
9679                     !eir_has_uuids(scan_rsp, scan_rsp_len,
9680                                    hdev->discovery.uuid_count,
9681                                    hdev->discovery.uuids))
9682                         return false;
9683         }
9684
9685         /* If duplicate filtering does not report RSSI changes, then restart
9686          * scanning to ensure updated result with updated RSSI values.
9687          */
9688         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9689                 restart_le_scan(hdev);
9690
9691                 /* Validate RSSI value against the RSSI threshold once more. */
9692                 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9693                     rssi < hdev->discovery.rssi)
9694                         return false;
9695         }
9696
9697         return true;
9698 }
9699
9700 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9701                                   bdaddr_t *bdaddr, u8 addr_type)
9702 {
9703         struct mgmt_ev_adv_monitor_device_lost ev;
9704
9705         ev.monitor_handle = cpu_to_le16(handle);
9706         bacpy(&ev.addr.bdaddr, bdaddr);
9707         ev.addr.type = addr_type;
9708
9709         mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9710                    NULL);
9711 }
9712
9713 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9714                                                struct sk_buff *skb,
9715                                                struct sock *skip_sk,
9716                                                u16 handle)
9717 {
9718         struct sk_buff *advmon_skb;
9719         size_t advmon_skb_len;
9720         __le16 *monitor_handle;
9721
9722         if (!skb)
9723                 return;
9724
9725         advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9726                           sizeof(struct mgmt_ev_device_found)) + skb->len;
9727         advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9728                                     advmon_skb_len);
9729         if (!advmon_skb)
9730                 return;
9731
9732         /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9733          * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9734          * store monitor_handle of the matched monitor.
9735          */
9736         monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9737         *monitor_handle = cpu_to_le16(handle);
9738         skb_put_data(advmon_skb, skb->data, skb->len);
9739
9740         mgmt_event_skb(advmon_skb, skip_sk);
9741 }
9742
9743 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9744                                           bdaddr_t *bdaddr, bool report_device,
9745                                           struct sk_buff *skb,
9746                                           struct sock *skip_sk)
9747 {
9748         struct monitored_device *dev, *tmp;
9749         bool matched = false;
9750         bool notified = false;
9751
9752         /* We have received the Advertisement Report because:
9753          * 1. the kernel has initiated active discovery
9754          * 2. if not, we have pend_le_reports > 0 in which case we are doing
9755          *    passive scanning
9756          * 3. if none of the above is true, we have one or more active
9757          *    Advertisement Monitor
9758          *
9759          * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9760          * and report ONLY one advertisement per device for the matched Monitor
9761          * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9762          *
9763          * For case 3, since we are not active scanning and all advertisements
9764          * received are due to a matched Advertisement Monitor, report all
9765          * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9766          */
9767         if (report_device && !hdev->advmon_pend_notify) {
9768                 mgmt_event_skb(skb, skip_sk);
9769                 return;
9770         }
9771
9772         hdev->advmon_pend_notify = false;
9773
9774         list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9775                 if (!bacmp(&dev->bdaddr, bdaddr)) {
9776                         matched = true;
9777
9778                         if (!dev->notified) {
9779                                 mgmt_send_adv_monitor_device_found(hdev, skb,
9780                                                                    skip_sk,
9781                                                                    dev->handle);
9782                                 notified = true;
9783                                 dev->notified = true;
9784                         }
9785                 }
9786
9787                 if (!dev->notified)
9788                         hdev->advmon_pend_notify = true;
9789         }
9790
9791         if (!report_device &&
9792             ((matched && !notified) || !msft_monitor_supported(hdev))) {
9793                 /* Handle 0 indicates that we are not active scanning and this
9794                  * is a subsequent advertisement report for an already matched
9795                  * Advertisement Monitor or the controller offloading support
9796                  * is not available.
9797                  */
9798                 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9799         }
9800
9801         if (report_device)
9802                 mgmt_event_skb(skb, skip_sk);
9803         else
9804                 kfree_skb(skb);
9805 }
9806
9807 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9808                        u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9809                        u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9810 {
9811         struct sk_buff *skb;
9812         struct mgmt_ev_device_found *ev;
9813         bool report_device = hci_discovery_active(hdev);
9814
9815         /* Don't send events for a non-kernel initiated discovery. With
9816          * LE one exception is if we have pend_le_reports > 0 in which
9817          * case we're doing passive scanning and want these events.
9818          */
9819         if (!hci_discovery_active(hdev)) {
9820                 if (link_type == ACL_LINK)
9821                         return;
9822                 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9823                         report_device = true;
9824                 else if (!hci_is_adv_monitoring(hdev))
9825                         return;
9826         }
9827
9828         if (hdev->discovery.result_filtering) {
9829                 /* We are using service discovery */
9830                 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9831                                      scan_rsp_len))
9832                         return;
9833         }
9834
9835         if (hdev->discovery.limited) {
9836                 /* Check for limited discoverable bit */
9837                 if (dev_class) {
9838                         if (!(dev_class[1] & 0x20))
9839                                 return;
9840                 } else {
9841                         u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9842                         if (!flags || !(flags[0] & LE_AD_LIMITED))
9843                                 return;
9844                 }
9845         }
9846
9847         /* Allocate skb. The 5 extra bytes are for the potential CoD field */
9848         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9849                              sizeof(*ev) + eir_len + scan_rsp_len + 5);
9850         if (!skb)
9851                 return;
9852
9853         ev = skb_put(skb, sizeof(*ev));
9854
9855         /* In case of device discovery with BR/EDR devices (pre 1.2), the
9856          * RSSI value was reported as 0 when not available. This behavior
9857          * is kept when using device discovery. This is required for full
9858          * backwards compatibility with the API.
9859          *
9860          * However when using service discovery, the value 127 will be
9861          * returned when the RSSI is not available.
9862          */
9863         if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9864             link_type == ACL_LINK)
9865                 rssi = 0;
9866
9867         bacpy(&ev->addr.bdaddr, bdaddr);
9868         ev->addr.type = link_to_bdaddr(link_type, addr_type);
9869         ev->rssi = rssi;
9870         ev->flags = cpu_to_le32(flags);
9871
9872         if (eir_len > 0)
9873                 /* Copy EIR or advertising data into event */
9874                 skb_put_data(skb, eir, eir_len);
9875
9876         if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9877                 u8 eir_cod[5];
9878
9879                 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9880                                            dev_class, 3);
9881                 skb_put_data(skb, eir_cod, sizeof(eir_cod));
9882         }
9883
9884         if (scan_rsp_len > 0)
9885                 /* Append scan response data to event */
9886                 skb_put_data(skb, scan_rsp, scan_rsp_len);
9887
9888         ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9889
9890         mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9891 }
9892
9893 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9894                       u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9895 {
9896         struct sk_buff *skb;
9897         struct mgmt_ev_device_found *ev;
9898         u16 eir_len = 0;
9899         u32 flags = 0;
9900
9901         skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9902                              sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9903
9904         ev = skb_put(skb, sizeof(*ev));
9905         bacpy(&ev->addr.bdaddr, bdaddr);
9906         ev->addr.type = link_to_bdaddr(link_type, addr_type);
9907         ev->rssi = rssi;
9908
9909         if (name)
9910                 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9911         else
9912                 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9913
9914         ev->eir_len = cpu_to_le16(eir_len);
9915         ev->flags = cpu_to_le32(flags);
9916
9917         mgmt_event_skb(skb, NULL);
9918 }
9919
9920 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9921 {
9922         struct mgmt_ev_discovering ev;
9923
9924         bt_dev_dbg(hdev, "discovering %u", discovering);
9925
9926         memset(&ev, 0, sizeof(ev));
9927         ev.type = hdev->discovery.type;
9928         ev.discovering = discovering;
9929
9930         mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9931 }
9932
9933 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9934 {
9935         struct mgmt_ev_controller_suspend ev;
9936
9937         ev.suspend_state = state;
9938         mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9939 }
9940
9941 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9942                    u8 addr_type)
9943 {
9944         struct mgmt_ev_controller_resume ev;
9945
9946         ev.wake_reason = reason;
9947         if (bdaddr) {
9948                 bacpy(&ev.addr.bdaddr, bdaddr);
9949                 ev.addr.type = addr_type;
9950         } else {
9951                 memset(&ev.addr, 0, sizeof(ev.addr));
9952         }
9953
9954         mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9955 }
9956
9957 static struct hci_mgmt_chan chan = {
9958         .channel        = HCI_CHANNEL_CONTROL,
9959         .handler_count  = ARRAY_SIZE(mgmt_handlers),
9960         .handlers       = mgmt_handlers,
9961         .hdev_init      = mgmt_init_hdev,
9962 };
9963
9964 int mgmt_init(void)
9965 {
9966         return hci_mgmt_chan_register(&chan);
9967 }
9968
9969 void mgmt_exit(void)
9970 {
9971         hci_mgmt_chan_unregister(&chan);
9972 }