2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
134 static const u16 mgmt_events[] = {
135 MGMT_EV_CONTROLLER_ERROR,
137 MGMT_EV_INDEX_REMOVED,
138 MGMT_EV_NEW_SETTINGS,
139 MGMT_EV_CLASS_OF_DEV_CHANGED,
140 MGMT_EV_LOCAL_NAME_CHANGED,
141 MGMT_EV_NEW_LINK_KEY,
142 MGMT_EV_NEW_LONG_TERM_KEY,
143 MGMT_EV_DEVICE_CONNECTED,
144 MGMT_EV_DEVICE_DISCONNECTED,
145 MGMT_EV_CONNECT_FAILED,
146 MGMT_EV_PIN_CODE_REQUEST,
147 MGMT_EV_USER_CONFIRM_REQUEST,
148 MGMT_EV_USER_PASSKEY_REQUEST,
150 MGMT_EV_DEVICE_FOUND,
152 MGMT_EV_DEVICE_BLOCKED,
153 MGMT_EV_DEVICE_UNBLOCKED,
154 MGMT_EV_DEVICE_UNPAIRED,
155 MGMT_EV_PASSKEY_NOTIFY,
158 MGMT_EV_DEVICE_ADDED,
159 MGMT_EV_DEVICE_REMOVED,
160 MGMT_EV_NEW_CONN_PARAM,
161 MGMT_EV_UNCONF_INDEX_ADDED,
162 MGMT_EV_UNCONF_INDEX_REMOVED,
163 MGMT_EV_NEW_CONFIG_OPTIONS,
164 MGMT_EV_EXT_INDEX_ADDED,
165 MGMT_EV_EXT_INDEX_REMOVED,
166 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 MGMT_EV_ADVERTISING_ADDED,
168 MGMT_EV_ADVERTISING_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170 MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 MGMT_EV_EXP_FEATURE_CHANGED,
172 MGMT_EV_DEVICE_FLAGS_CHANGED,
173 MGMT_EV_ADV_MONITOR_ADDED,
174 MGMT_EV_ADV_MONITOR_REMOVED,
175 MGMT_EV_CONTROLLER_SUSPEND,
176 MGMT_EV_CONTROLLER_RESUME,
177 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_errno_status(int err)
286 return MGMT_STATUS_SUCCESS;
288 return MGMT_STATUS_REJECTED;
290 return MGMT_STATUS_INVALID_PARAMS;
292 return MGMT_STATUS_NOT_SUPPORTED;
294 return MGMT_STATUS_BUSY;
296 return MGMT_STATUS_AUTH_FAILED;
298 return MGMT_STATUS_NO_RESOURCES;
300 return MGMT_STATUS_ALREADY_CONNECTED;
302 return MGMT_STATUS_DISCONNECTED;
305 return MGMT_STATUS_FAILED;
308 static u8 mgmt_status(int err)
311 return mgmt_errno_status(err);
313 if (err < ARRAY_SIZE(mgmt_status_table))
314 return mgmt_status_table[err];
316 return MGMT_STATUS_FAILED;
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
322 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 u16 len, int flag, struct sock *skip_sk)
329 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 struct sock *skip_sk)
336 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 HCI_SOCK_TRUSTED, skip_sk);
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
342 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 static u8 le_addr_type(u8 mgmt_addr_type)
348 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 return ADDR_LE_DEV_PUBLIC;
351 return ADDR_LE_DEV_RANDOM;
354 void mgmt_fill_version_info(void *ver)
356 struct mgmt_rp_read_version *rp = ver;
358 rp->version = MGMT_VERSION;
359 rp->revision = cpu_to_le16(MGMT_REVISION);
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
365 struct mgmt_rp_read_version rp;
367 bt_dev_dbg(hdev, "sock %p", sk);
369 mgmt_fill_version_info(&rp);
371 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
378 struct mgmt_rp_read_commands *rp;
379 u16 num_commands, num_events;
383 bt_dev_dbg(hdev, "sock %p", sk);
385 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 num_commands = ARRAY_SIZE(mgmt_commands);
387 num_events = ARRAY_SIZE(mgmt_events);
389 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 num_events = ARRAY_SIZE(mgmt_untrusted_events);
393 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
395 rp = kmalloc(rp_size, GFP_KERNEL);
399 rp->num_commands = cpu_to_le16(num_commands);
400 rp->num_events = cpu_to_le16(num_events);
402 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 __le16 *opcode = rp->opcodes;
405 for (i = 0; i < num_commands; i++, opcode++)
406 put_unaligned_le16(mgmt_commands[i], opcode);
408 for (i = 0; i < num_events; i++, opcode++)
409 put_unaligned_le16(mgmt_events[i], opcode);
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
420 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
430 struct mgmt_rp_read_index_list *rp;
436 bt_dev_dbg(hdev, "sock %p", sk);
438 read_lock(&hci_dev_list_lock);
441 list_for_each_entry(d, &hci_dev_list, list) {
442 if (d->dev_type == HCI_PRIMARY &&
443 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 rp_len = sizeof(*rp) + (2 * count);
448 rp = kmalloc(rp_len, GFP_ATOMIC);
450 read_unlock(&hci_dev_list_lock);
455 list_for_each_entry(d, &hci_dev_list, list) {
456 if (hci_dev_test_flag(d, HCI_SETUP) ||
457 hci_dev_test_flag(d, HCI_CONFIG) ||
458 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 /* Devices marked as raw-only are neither configured
462 * nor unconfigured controllers.
464 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 if (d->dev_type == HCI_PRIMARY &&
468 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 rp->index[count++] = cpu_to_le16(d->id);
470 bt_dev_dbg(hdev, "Added hci%u", d->id);
474 rp->num_controllers = cpu_to_le16(count);
475 rp_len = sizeof(*rp) + (2 * count);
477 read_unlock(&hci_dev_list_lock);
479 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 void *data, u16 data_len)
490 struct mgmt_rp_read_unconf_index_list *rp;
496 bt_dev_dbg(hdev, "sock %p", sk);
498 read_lock(&hci_dev_list_lock);
501 list_for_each_entry(d, &hci_dev_list, list) {
502 if (d->dev_type == HCI_PRIMARY &&
503 hci_dev_test_flag(d, HCI_UNCONFIGURED))
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
510 read_unlock(&hci_dev_list_lock);
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
527 if (d->dev_type == HCI_PRIMARY &&
528 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
537 read_unlock(&hci_dev_list_lock);
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
550 struct mgmt_rp_read_ext_index_list *rp;
555 bt_dev_dbg(hdev, "sock %p", sk);
557 read_lock(&hci_dev_list_lock);
560 list_for_each_entry(d, &hci_dev_list, list) {
561 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
565 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
567 read_unlock(&hci_dev_list_lock);
572 list_for_each_entry(d, &hci_dev_list, list) {
573 if (hci_dev_test_flag(d, HCI_SETUP) ||
574 hci_dev_test_flag(d, HCI_CONFIG) ||
575 hci_dev_test_flag(d, HCI_USER_CHANNEL))
578 /* Devices marked as raw-only are neither configured
579 * nor unconfigured controllers.
581 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
584 if (d->dev_type == HCI_PRIMARY) {
585 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 rp->entry[count].type = 0x01;
588 rp->entry[count].type = 0x00;
589 } else if (d->dev_type == HCI_AMP) {
590 rp->entry[count].type = 0x02;
595 rp->entry[count].bus = d->bus;
596 rp->entry[count++].index = cpu_to_le16(d->id);
597 bt_dev_dbg(hdev, "Added hci%u", d->id);
600 rp->num_controllers = cpu_to_le16(count);
602 read_unlock(&hci_dev_list_lock);
604 /* If this command is called at least once, then all the
605 * default index and unconfigured index events are disabled
606 * and from now on only extended index events are used.
608 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
612 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 struct_size(rp, entry, count));
621 static bool is_configured(struct hci_dev *hdev)
623 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
627 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 !bacmp(&hdev->public_addr, BDADDR_ANY))
635 static __le32 get_missing_options(struct hci_dev *hdev)
639 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 options |= MGMT_OPTION_EXTERNAL_CONFIG;
643 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 !bacmp(&hdev->public_addr, BDADDR_ANY))
646 options |= MGMT_OPTION_PUBLIC_ADDRESS;
648 return cpu_to_le32(options);
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
653 __le32 options = get_missing_options(hdev);
655 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 void *data, u16 data_len)
670 struct mgmt_rp_read_config_info rp;
673 bt_dev_dbg(hdev, "sock %p", sk);
677 memset(&rp, 0, sizeof(rp));
678 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
680 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 options |= MGMT_OPTION_EXTERNAL_CONFIG;
683 if (hdev->set_bdaddr)
684 options |= MGMT_OPTION_PUBLIC_ADDRESS;
686 rp.supported_options = cpu_to_le32(options);
687 rp.missing_options = get_missing_options(hdev);
689 hci_dev_unlock(hdev);
691 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
695 static u32 get_supported_phys(struct hci_dev *hdev)
697 u32 supported_phys = 0;
699 if (lmp_bredr_capable(hdev)) {
700 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
702 if (hdev->features[0][0] & LMP_3SLOT)
703 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
705 if (hdev->features[0][0] & LMP_5SLOT)
706 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
708 if (lmp_edr_2m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
717 if (lmp_edr_3m_capable(hdev)) {
718 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
720 if (lmp_edr_3slot_capable(hdev))
721 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
723 if (lmp_edr_5slot_capable(hdev))
724 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 if (lmp_le_capable(hdev)) {
730 supported_phys |= MGMT_PHY_LE_1M_TX;
731 supported_phys |= MGMT_PHY_LE_1M_RX;
733 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 supported_phys |= MGMT_PHY_LE_2M_TX;
735 supported_phys |= MGMT_PHY_LE_2M_RX;
738 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 supported_phys |= MGMT_PHY_LE_CODED_TX;
740 supported_phys |= MGMT_PHY_LE_CODED_RX;
744 return supported_phys;
747 static u32 get_selected_phys(struct hci_dev *hdev)
749 u32 selected_phys = 0;
751 if (lmp_bredr_capable(hdev)) {
752 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
754 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
757 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
760 if (lmp_edr_2m_capable(hdev)) {
761 if (!(hdev->pkt_type & HCI_2DH1))
762 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
764 if (lmp_edr_3slot_capable(hdev) &&
765 !(hdev->pkt_type & HCI_2DH3))
766 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
768 if (lmp_edr_5slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH5))
770 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
772 if (lmp_edr_3m_capable(hdev)) {
773 if (!(hdev->pkt_type & HCI_3DH1))
774 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
776 if (lmp_edr_3slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_3DH3))
778 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
780 if (lmp_edr_5slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH5))
782 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 if (lmp_le_capable(hdev)) {
788 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 selected_phys |= MGMT_PHY_LE_1M_TX;
791 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 selected_phys |= MGMT_PHY_LE_1M_RX;
794 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 selected_phys |= MGMT_PHY_LE_2M_TX;
797 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 selected_phys |= MGMT_PHY_LE_2M_RX;
800 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 selected_phys |= MGMT_PHY_LE_CODED_TX;
803 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 selected_phys |= MGMT_PHY_LE_CODED_RX;
807 return selected_phys;
810 static u32 get_configurable_phys(struct hci_dev *hdev)
812 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
816 static u32 get_supported_settings(struct hci_dev *hdev)
820 settings |= MGMT_SETTING_POWERED;
821 settings |= MGMT_SETTING_BONDABLE;
822 settings |= MGMT_SETTING_DEBUG_KEYS;
823 settings |= MGMT_SETTING_CONNECTABLE;
824 settings |= MGMT_SETTING_DISCOVERABLE;
826 if (lmp_bredr_capable(hdev)) {
827 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 settings |= MGMT_SETTING_BREDR;
830 settings |= MGMT_SETTING_LINK_SECURITY;
832 if (lmp_ssp_capable(hdev)) {
833 settings |= MGMT_SETTING_SSP;
834 if (IS_ENABLED(CONFIG_BT_HS))
835 settings |= MGMT_SETTING_HS;
838 if (lmp_sc_capable(hdev))
839 settings |= MGMT_SETTING_SECURE_CONN;
841 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
843 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846 if (lmp_le_capable(hdev)) {
847 settings |= MGMT_SETTING_LE;
848 settings |= MGMT_SETTING_SECURE_CONN;
849 settings |= MGMT_SETTING_PRIVACY;
850 settings |= MGMT_SETTING_STATIC_ADDRESS;
851 settings |= MGMT_SETTING_ADVERTISING;
854 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
856 settings |= MGMT_SETTING_CONFIGURATION;
858 settings |= MGMT_SETTING_PHY_CONFIGURATION;
863 static u32 get_current_settings(struct hci_dev *hdev)
867 if (hdev_is_powered(hdev))
868 settings |= MGMT_SETTING_POWERED;
870 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 settings |= MGMT_SETTING_CONNECTABLE;
873 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 settings |= MGMT_SETTING_FAST_CONNECTABLE;
876 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 settings |= MGMT_SETTING_DISCOVERABLE;
879 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 settings |= MGMT_SETTING_BONDABLE;
882 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 settings |= MGMT_SETTING_BREDR;
885 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 settings |= MGMT_SETTING_LE;
888 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 settings |= MGMT_SETTING_LINK_SECURITY;
891 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 settings |= MGMT_SETTING_SSP;
894 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 settings |= MGMT_SETTING_HS;
897 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 settings |= MGMT_SETTING_ADVERTISING;
900 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 settings |= MGMT_SETTING_SECURE_CONN;
903 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 settings |= MGMT_SETTING_DEBUG_KEYS;
906 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 settings |= MGMT_SETTING_PRIVACY;
909 /* The current setting for static address has two purposes. The
910 * first is to indicate if the static address will be used and
911 * the second is to indicate if it is actually set.
913 * This means if the static address is not configured, this flag
914 * will never be set. If the address is configured, then if the
915 * address is actually used decides if the flag is set or not.
917 * For single mode LE only controllers and dual-mode controllers
918 * with BR/EDR disabled, the existence of the static address will
921 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 settings |= MGMT_SETTING_STATIC_ADDRESS;
928 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
936 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flags will not yet have
944 * their final values, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
950 return LE_AD_GENERAL;
951 else if (cp->val == 0x02)
952 return LE_AD_LIMITED;
954 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 return LE_AD_LIMITED;
956 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 return LE_AD_GENERAL;
963 bool mgmt_get_connectable(struct hci_dev *hdev)
965 struct mgmt_pending_cmd *cmd;
967 /* If there's a pending mgmt command the flag will not yet have
968 * it's final value, so check for this first.
970 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
972 struct mgmt_mode *cp = cmd->param;
977 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
982 hci_update_eir_sync(hdev);
983 hci_update_class_sync(hdev);
988 static void service_cache_off(struct work_struct *work)
990 struct hci_dev *hdev = container_of(work, struct hci_dev,
993 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
996 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1001 /* The generation of a new RPA and programming it into the
1002 * controller happens in the hci_req_enable_advertising()
1005 if (ext_adv_capable(hdev))
1006 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1008 return hci_enable_advertising_sync(hdev);
1011 static void rpa_expired(struct work_struct *work)
1013 struct hci_dev *hdev = container_of(work, struct hci_dev,
1016 bt_dev_dbg(hdev, "");
1018 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1020 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1023 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1028 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1031 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1034 /* Non-mgmt controlled devices get this bit set
1035 * implicitly so that pairing works for them, however
1036 * for mgmt we require user-space to explicitly enable
1039 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 void *data, u16 data_len)
1045 struct mgmt_rp_read_info rp;
1047 bt_dev_dbg(hdev, "sock %p", sk);
1051 memset(&rp, 0, sizeof(rp));
1053 bacpy(&rp.bdaddr, &hdev->bdaddr);
1055 rp.version = hdev->hci_ver;
1056 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1058 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1061 memcpy(rp.dev_class, hdev->dev_class, 3);
1063 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1066 hci_dev_unlock(hdev);
1068 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1077 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 hdev->dev_class, 3);
1081 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1085 name_len = strlen(hdev->dev_name);
1086 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 hdev->dev_name, name_len);
1089 name_len = strlen(hdev->short_name);
1090 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 hdev->short_name, name_len);
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 void *data, u16 data_len)
1100 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1103 bt_dev_dbg(hdev, "sock %p", sk);
1105 memset(&buf, 0, sizeof(buf));
1109 bacpy(&rp->bdaddr, &hdev->bdaddr);
1111 rp->version = hdev->hci_ver;
1112 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1114 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1118 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 rp->eir_len = cpu_to_le16(eir_len);
1121 hci_dev_unlock(hdev);
1123 /* If this command is called at least once, then the events
1124 * for class of device and local name changes are disabled
1125 * and only the new extended controller information event
1128 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1132 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 sizeof(*rp) + eir_len);
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1139 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1142 memset(buf, 0, sizeof(buf));
1144 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 ev->eir_len = cpu_to_le16(eir_len);
1147 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 sizeof(*ev) + eir_len,
1149 HCI_MGMT_EXT_INFO_EVENTS, skip);
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1154 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1156 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1162 struct mgmt_ev_advertising_added ev;
1164 ev.instance = instance;
1166 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1172 struct mgmt_ev_advertising_removed ev;
1174 ev.instance = instance;
1176 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1181 if (hdev->adv_instance_timeout) {
1182 hdev->adv_instance_timeout = 0;
1183 cancel_delayed_work(&hdev->adv_instance_expire);
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1190 struct hci_conn_params *p;
1192 list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 /* Needed for AUTO_OFF case where might not "really"
1194 * have been powered off.
1196 list_del_init(&p->action);
1198 switch (p->auto_connect) {
1199 case HCI_AUTO_CONN_DIRECT:
1200 case HCI_AUTO_CONN_ALWAYS:
1201 list_add(&p->action, &hdev->pend_le_conns);
1203 case HCI_AUTO_CONN_REPORT:
1204 list_add(&p->action, &hdev->pend_le_reports);
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1214 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1216 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1222 struct mgmt_pending_cmd *cmd = data;
1223 struct mgmt_mode *cp;
1225 /* Make sure cmd still outstanding. */
1226 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1231 bt_dev_dbg(hdev, "err %d", err);
1236 restart_le_actions(hdev);
1237 hci_update_passive_scan(hdev);
1238 hci_dev_unlock(hdev);
1241 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1243 /* Only call new_setting for power on as power off is deferred
1244 * to hdev->power_off work which does call hci_dev_do_close.
1247 new_settings(hdev, cmd->sk);
1249 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1253 mgmt_pending_remove(cmd);
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1258 struct mgmt_pending_cmd *cmd = data;
1259 struct mgmt_mode *cp = cmd->param;
1261 BT_DBG("%s", hdev->name);
1263 return hci_set_powered_sync(hdev, cp->val);
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1269 struct mgmt_mode *cp = data;
1270 struct mgmt_pending_cmd *cmd;
1273 bt_dev_dbg(hdev, "sock %p", sk);
1275 if (cp->val != 0x00 && cp->val != 0x01)
1276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 MGMT_STATUS_INVALID_PARAMS);
1281 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1287 if (!!cp->val == hdev_is_powered(hdev)) {
1288 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1292 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1298 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 mgmt_set_powered_complete);
1302 mgmt_pending_remove(cmd);
1305 hci_dev_unlock(hdev);
1309 int mgmt_new_settings(struct hci_dev *hdev)
1311 return new_settings(hdev, NULL);
1316 struct hci_dev *hdev;
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1322 struct cmd_lookup *match = data;
1324 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1326 list_del(&cmd->list);
1328 if (match->sk == NULL) {
1329 match->sk = cmd->sk;
1330 sock_hold(match->sk);
1333 mgmt_pending_free(cmd);
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1340 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 mgmt_pending_remove(cmd);
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1346 if (cmd->cmd_complete) {
1349 cmd->cmd_complete(cmd, *status);
1350 mgmt_pending_remove(cmd);
1355 cmd_status_rsp(cmd, data);
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1360 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 cmd->param, cmd->param_len);
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1366 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 cmd->param, sizeof(struct mgmt_addr_info));
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1372 if (!lmp_bredr_capable(hdev))
1373 return MGMT_STATUS_NOT_SUPPORTED;
1374 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 return MGMT_STATUS_REJECTED;
1377 return MGMT_STATUS_SUCCESS;
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1382 if (!lmp_le_capable(hdev))
1383 return MGMT_STATUS_NOT_SUPPORTED;
1384 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 return MGMT_STATUS_REJECTED;
1387 return MGMT_STATUS_SUCCESS;
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1393 struct mgmt_pending_cmd *cmd = data;
1395 bt_dev_dbg(hdev, "err %d", err);
1397 /* Make sure cmd still outstanding. */
1398 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1404 u8 mgmt_err = mgmt_status(err);
1405 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1410 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 hdev->discov_timeout > 0) {
1412 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1416 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 new_settings(hdev, cmd->sk);
1420 mgmt_pending_remove(cmd);
1421 hci_dev_unlock(hdev);
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1426 BT_DBG("%s", hdev->name);
1428 return hci_update_discoverable_sync(hdev);
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1434 struct mgmt_cp_set_discoverable *cp = data;
1435 struct mgmt_pending_cmd *cmd;
1439 bt_dev_dbg(hdev, "sock %p", sk);
1441 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 MGMT_STATUS_REJECTED);
1446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 MGMT_STATUS_INVALID_PARAMS);
1450 timeout = __le16_to_cpu(cp->timeout);
1452 /* Disabling discoverable requires that no timeout is set,
1453 * and enabling limited discoverable requires a timeout.
1455 if ((cp->val == 0x00 && timeout > 0) ||
1456 (cp->val == 0x02 && timeout == 0))
1457 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 MGMT_STATUS_INVALID_PARAMS);
1462 if (!hdev_is_powered(hdev) && timeout > 0) {
1463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 MGMT_STATUS_NOT_POWERED);
1468 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1475 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 MGMT_STATUS_REJECTED);
1481 if (hdev->advertising_paused) {
1482 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1487 if (!hdev_is_powered(hdev)) {
1488 bool changed = false;
1490 /* Setting limited discoverable when powered off is
1491 * not a valid operation since it requires a timeout
1492 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1494 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1499 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1504 err = new_settings(hdev, sk);
1509 /* If the current mode is the same, then just update the timeout
1510 * value with the new value. And if only the timeout gets updated,
1511 * then no need for any HCI transactions.
1513 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 HCI_LIMITED_DISCOVERABLE)) {
1516 cancel_delayed_work(&hdev->discov_off);
1517 hdev->discov_timeout = timeout;
1519 if (cp->val && hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->req_workqueue,
1522 &hdev->discov_off, to);
1525 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1529 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1535 /* Cancel any potential discoverable timeout that might be
1536 * still active and store new timeout value. The arming of
1537 * the timeout happens in the complete handler.
1539 cancel_delayed_work(&hdev->discov_off);
1540 hdev->discov_timeout = timeout;
1543 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1545 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1547 /* Limited discoverable mode */
1548 if (cp->val == 0x02)
1549 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1553 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 mgmt_set_discoverable_complete);
1557 mgmt_pending_remove(cmd);
1560 hci_dev_unlock(hdev);
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1567 struct mgmt_pending_cmd *cmd = data;
1569 bt_dev_dbg(hdev, "err %d", err);
1571 /* Make sure cmd still outstanding. */
1572 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1578 u8 mgmt_err = mgmt_status(err);
1579 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1583 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 new_settings(hdev, cmd->sk);
1588 mgmt_pending_remove(cmd);
1590 hci_dev_unlock(hdev);
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 struct sock *sk, u8 val)
1596 bool changed = false;
1599 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1603 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1605 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1609 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1614 hci_req_update_scan(hdev);
1615 hci_update_passive_scan(hdev);
1616 return new_settings(hdev, sk);
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1624 BT_DBG("%s", hdev->name);
1626 return hci_update_connectable_sync(hdev);
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1632 struct mgmt_mode *cp = data;
1633 struct mgmt_pending_cmd *cmd;
1636 bt_dev_dbg(hdev, "sock %p", sk);
1638 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 MGMT_STATUS_REJECTED);
1643 if (cp->val != 0x00 && cp->val != 0x01)
1644 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 MGMT_STATUS_INVALID_PARAMS);
1649 if (!hdev_is_powered(hdev)) {
1650 err = set_connectable_update_settings(hdev, sk, cp->val);
1654 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1661 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1668 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1670 if (hdev->discov_timeout > 0)
1671 cancel_delayed_work(&hdev->discov_off);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1678 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 mgmt_set_connectable_complete);
1682 mgmt_pending_remove(cmd);
1685 hci_dev_unlock(hdev);
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1692 struct mgmt_mode *cp = data;
1696 bt_dev_dbg(hdev, "sock %p", sk);
1698 if (cp->val != 0x00 && cp->val != 0x01)
1699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 MGMT_STATUS_INVALID_PARAMS);
1705 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1707 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1709 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1714 /* In limited privacy mode the change of bondable mode
1715 * may affect the local advertising address.
1717 hci_update_discoverable(hdev);
1719 err = new_settings(hdev, sk);
1723 hci_dev_unlock(hdev);
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1730 struct mgmt_mode *cp = data;
1731 struct mgmt_pending_cmd *cmd;
1735 bt_dev_dbg(hdev, "sock %p", sk);
1737 status = mgmt_bredr_support(hdev);
1739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1742 if (cp->val != 0x00 && cp->val != 0x01)
1743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 MGMT_STATUS_INVALID_PARAMS);
1748 if (!hdev_is_powered(hdev)) {
1749 bool changed = false;
1751 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1756 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1761 err = new_settings(hdev, sk);
1766 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1774 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1785 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1787 mgmt_pending_remove(cmd);
1792 hci_dev_unlock(hdev);
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1798 struct cmd_lookup match = { NULL, hdev };
1799 struct mgmt_pending_cmd *cmd = data;
1800 struct mgmt_mode *cp = cmd->param;
1801 u8 enable = cp->val;
1804 /* Make sure cmd still outstanding. */
1805 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1809 u8 mgmt_err = mgmt_status(err);
1811 if (enable && hci_dev_test_and_clear_flag(hdev,
1813 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 new_settings(hdev, NULL);
1817 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1823 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1825 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1828 changed = hci_dev_test_and_clear_flag(hdev,
1831 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1834 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1837 new_settings(hdev, match.sk);
1842 hci_update_eir_sync(hdev);
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1847 struct mgmt_pending_cmd *cmd = data;
1848 struct mgmt_mode *cp = cmd->param;
1849 bool changed = false;
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1855 err = hci_write_ssp_mode_sync(hdev, cp->val);
1857 if (!err && changed)
1858 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1865 struct mgmt_mode *cp = data;
1866 struct mgmt_pending_cmd *cmd;
1870 bt_dev_dbg(hdev, "sock %p", sk);
1872 status = mgmt_bredr_support(hdev);
1874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1876 if (!lmp_ssp_capable(hdev))
1877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 MGMT_STATUS_NOT_SUPPORTED);
1880 if (cp->val != 0x00 && cp->val != 0x01)
1881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 MGMT_STATUS_INVALID_PARAMS);
1886 if (!hdev_is_powered(hdev)) {
1890 changed = !hci_dev_test_and_set_flag(hdev,
1893 changed = hci_dev_test_and_clear_flag(hdev,
1896 changed = hci_dev_test_and_clear_flag(hdev,
1899 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1902 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1907 err = new_settings(hdev, sk);
1912 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1918 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1923 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1927 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1931 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 MGMT_STATUS_FAILED);
1935 mgmt_pending_remove(cmd);
1939 hci_dev_unlock(hdev);
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 struct mgmt_mode *cp = data;
1950 bt_dev_dbg(hdev, "sock %p", sk);
1952 if (!IS_ENABLED(CONFIG_BT_HS))
1953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 MGMT_STATUS_NOT_SUPPORTED);
1956 status = mgmt_bredr_support(hdev);
1958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1960 if (!lmp_ssp_capable(hdev))
1961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 MGMT_STATUS_NOT_SUPPORTED);
1964 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 MGMT_STATUS_REJECTED);
1968 if (cp->val != 0x00 && cp->val != 0x01)
1969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 MGMT_STATUS_INVALID_PARAMS);
1974 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1981 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1983 if (hdev_is_powered(hdev)) {
1984 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 MGMT_STATUS_REJECTED);
1989 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1992 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1997 err = new_settings(hdev, sk);
2000 hci_dev_unlock(hdev);
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2006 struct cmd_lookup match = { NULL, hdev };
2007 u8 status = mgmt_status(err);
2009 bt_dev_dbg(hdev, "err %d", err);
2012 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2017 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2019 new_settings(hdev, match.sk);
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2027 struct mgmt_pending_cmd *cmd = data;
2028 struct mgmt_mode *cp = cmd->param;
2033 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 hci_disable_advertising_sync(hdev);
2036 if (ext_adv_capable(hdev))
2037 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2039 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2042 err = hci_write_le_host_supported_sync(hdev, val, 0);
2044 /* Make sure the controller has a good default for
2045 * advertising data. Restrict the update to when LE
2046 * has actually been enabled. During power on, the
2047 * update in powered_update_hci will take care of it.
2049 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 if (ext_adv_capable(hdev)) {
2053 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2055 hci_update_scan_rsp_data_sync(hdev, 0x00);
2057 hci_update_adv_data_sync(hdev, 0x00);
2058 hci_update_scan_rsp_data_sync(hdev, 0x00);
2061 hci_update_passive_scan(hdev);
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2069 struct mgmt_mode *cp = data;
2070 struct mgmt_pending_cmd *cmd;
2074 bt_dev_dbg(hdev, "sock %p", sk);
2076 if (!lmp_le_capable(hdev))
2077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 MGMT_STATUS_NOT_SUPPORTED);
2080 if (cp->val != 0x00 && cp->val != 0x01)
2081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 MGMT_STATUS_INVALID_PARAMS);
2084 /* Bluetooth single mode LE only controllers or dual-mode
2085 * controllers configured as LE only devices, do not allow
2086 * switching LE off. These have either LE enabled explicitly
2087 * or BR/EDR has been previously switched off.
2089 * When trying to enable an already enabled LE, then gracefully
2090 * send a positive response. Trying to disable it however will
2091 * result into rejection.
2093 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 if (cp->val == 0x01)
2095 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 MGMT_STATUS_REJECTED);
2104 enabled = lmp_host_le_capable(hdev);
2107 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2109 if (!hdev_is_powered(hdev) || val == enabled) {
2110 bool changed = false;
2112 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2117 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2122 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2127 err = new_settings(hdev, sk);
2132 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2139 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2143 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2147 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 MGMT_STATUS_FAILED);
2151 mgmt_pending_remove(cmd);
2155 hci_dev_unlock(hdev);
2159 /* This is a helper function to test for pending mgmt commands that can
2160 * cause CoD or EIR HCI commands. We can only allow one such pending
2161 * mgmt command at a time since otherwise we cannot easily track what
2162 * the current values are, will be, and based on that calculate if a new
2163 * HCI command needs to be sent and if yes with what value.
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2167 struct mgmt_pending_cmd *cmd;
2169 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 switch (cmd->opcode) {
2171 case MGMT_OP_ADD_UUID:
2172 case MGMT_OP_REMOVE_UUID:
2173 case MGMT_OP_SET_DEV_CLASS:
2174 case MGMT_OP_SET_POWERED:
2182 static const u8 bluetooth_base_uuid[] = {
2183 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2187 static u8 get_uuid_size(const u8 *uuid)
2191 if (memcmp(uuid, bluetooth_base_uuid, 12))
2194 val = get_unaligned_le32(&uuid[12]);
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2203 struct mgmt_pending_cmd *cmd = data;
2205 bt_dev_dbg(hdev, "err %d", err);
2207 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 mgmt_status(err), hdev->dev_class, 3);
2210 mgmt_pending_free(cmd);
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2217 err = hci_update_class_sync(hdev);
2221 return hci_update_eir_sync(hdev);
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2226 struct mgmt_cp_add_uuid *cp = data;
2227 struct mgmt_pending_cmd *cmd;
2228 struct bt_uuid *uuid;
2231 bt_dev_dbg(hdev, "sock %p", sk);
2235 if (pending_eir_or_class(hdev)) {
2236 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2241 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2247 memcpy(uuid->uuid, cp->uuid, 16);
2248 uuid->svc_hint = cp->svc_hint;
2249 uuid->size = get_uuid_size(cp->uuid);
2251 list_add_tail(&uuid->list, &hdev->uuids);
2253 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2259 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2261 mgmt_pending_free(cmd);
2266 hci_dev_unlock(hdev);
2270 static bool enable_service_cache(struct hci_dev *hdev)
2272 if (!hdev_is_powered(hdev))
2275 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2288 err = hci_update_class_sync(hdev);
2292 return hci_update_eir_sync(hdev);
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2298 struct mgmt_cp_remove_uuid *cp = data;
2299 struct mgmt_pending_cmd *cmd;
2300 struct bt_uuid *match, *tmp;
2301 static const u8 bt_uuid_any[] = {
2302 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2306 bt_dev_dbg(hdev, "sock %p", sk);
2310 if (pending_eir_or_class(hdev)) {
2311 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2316 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 hci_uuids_clear(hdev);
2319 if (enable_service_cache(hdev)) {
2320 err = mgmt_cmd_complete(sk, hdev->id,
2321 MGMT_OP_REMOVE_UUID,
2322 0, hdev->dev_class, 3);
2331 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2335 list_del(&match->list);
2341 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 MGMT_STATUS_INVALID_PARAMS);
2347 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2353 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 mgmt_class_complete);
2356 mgmt_pending_free(cmd);
2359 hci_dev_unlock(hdev);
2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2367 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 cancel_delayed_work_sync(&hdev->service_cache);
2369 err = hci_update_eir_sync(hdev);
2375 return hci_update_class_sync(hdev);
2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2381 struct mgmt_cp_set_dev_class *cp = data;
2382 struct mgmt_pending_cmd *cmd;
2385 bt_dev_dbg(hdev, "sock %p", sk);
2387 if (!lmp_bredr_capable(hdev))
2388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 MGMT_STATUS_NOT_SUPPORTED);
2393 if (pending_eir_or_class(hdev)) {
2394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2399 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 MGMT_STATUS_INVALID_PARAMS);
2405 hdev->major_class = cp->major;
2406 hdev->minor_class = cp->minor;
2408 if (!hdev_is_powered(hdev)) {
2409 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 hdev->dev_class, 3);
2414 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2420 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 mgmt_class_complete);
2423 mgmt_pending_free(cmd);
2426 hci_dev_unlock(hdev);
2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2433 struct mgmt_cp_load_link_keys *cp = data;
2434 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 sizeof(struct mgmt_link_key_info));
2436 u16 key_count, expected_len;
2440 bt_dev_dbg(hdev, "sock %p", sk);
2442 if (!lmp_bredr_capable(hdev))
2443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 MGMT_STATUS_NOT_SUPPORTED);
2446 key_count = __le16_to_cpu(cp->key_count);
2447 if (key_count > max_key_count) {
2448 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 MGMT_STATUS_INVALID_PARAMS);
2454 expected_len = struct_size(cp, keys, key_count);
2455 if (expected_len != len) {
2456 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 MGMT_STATUS_INVALID_PARAMS);
2462 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 MGMT_STATUS_INVALID_PARAMS);
2466 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2469 for (i = 0; i < key_count; i++) {
2470 struct mgmt_link_key_info *key = &cp->keys[i];
2472 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 return mgmt_cmd_status(sk, hdev->id,
2474 MGMT_OP_LOAD_LINK_KEYS,
2475 MGMT_STATUS_INVALID_PARAMS);
2480 hci_link_keys_clear(hdev);
2483 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2485 changed = hci_dev_test_and_clear_flag(hdev,
2486 HCI_KEEP_DEBUG_KEYS);
2489 new_settings(hdev, NULL);
2491 for (i = 0; i < key_count; i++) {
2492 struct mgmt_link_key_info *key = &cp->keys[i];
2494 if (hci_is_blocked_key(hdev,
2495 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2497 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2502 /* Always ignore debug keys and require a new pairing if
2503 * the user wants to use them.
2505 if (key->type == HCI_LK_DEBUG_COMBINATION)
2508 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 key->type, key->pin_len, NULL);
2512 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2514 hci_dev_unlock(hdev);
2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 u8 addr_type, struct sock *skip_sk)
2522 struct mgmt_ev_device_unpaired ev;
2524 bacpy(&ev.addr.bdaddr, bdaddr);
2525 ev.addr.type = addr_type;
2527 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2534 struct mgmt_cp_unpair_device *cp = data;
2535 struct mgmt_rp_unpair_device rp;
2536 struct hci_conn_params *params;
2537 struct mgmt_pending_cmd *cmd;
2538 struct hci_conn *conn;
2542 memset(&rp, 0, sizeof(rp));
2543 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 rp.addr.type = cp->addr.type;
2546 if (!bdaddr_type_is_valid(cp->addr.type))
2547 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 MGMT_STATUS_INVALID_PARAMS,
2551 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 MGMT_STATUS_INVALID_PARAMS,
2558 if (!hdev_is_powered(hdev)) {
2559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 MGMT_STATUS_NOT_POWERED, &rp,
2565 if (cp->addr.type == BDADDR_BREDR) {
2566 /* If disconnection is requested, then look up the
2567 * connection. If the remote device is connected, it
2568 * will be later used to terminate the link.
2570 * Setting it to NULL explicitly will cause no
2571 * termination of the link.
2574 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2579 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2581 err = mgmt_cmd_complete(sk, hdev->id,
2582 MGMT_OP_UNPAIR_DEVICE,
2583 MGMT_STATUS_NOT_PAIRED, &rp,
2591 /* LE address type */
2592 addr_type = le_addr_type(cp->addr.type);
2594 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2597 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 MGMT_STATUS_NOT_PAIRED, &rp,
2603 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2605 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2610 /* Defer clearing up the connection parameters until closing to
2611 * give a chance of keeping them if a repairing happens.
2613 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2615 /* Disable auto-connection parameters if present */
2616 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2618 if (params->explicit_connect)
2619 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2621 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2624 /* If disconnection is not requested, then clear the connection
2625 * variable so that the link is not terminated.
2627 if (!cp->disconnect)
2631 /* If the connection variable is set, then termination of the
2632 * link is requested.
2635 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2637 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2641 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2648 cmd->cmd_complete = addr_cmd_complete;
2650 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2652 mgmt_pending_remove(cmd);
2655 hci_dev_unlock(hdev);
2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2662 struct mgmt_cp_disconnect *cp = data;
2663 struct mgmt_rp_disconnect rp;
2664 struct mgmt_pending_cmd *cmd;
2665 struct hci_conn *conn;
2668 bt_dev_dbg(hdev, "sock %p", sk);
2670 memset(&rp, 0, sizeof(rp));
2671 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 rp.addr.type = cp->addr.type;
2674 if (!bdaddr_type_is_valid(cp->addr.type))
2675 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 MGMT_STATUS_INVALID_PARAMS,
2681 if (!test_bit(HCI_UP, &hdev->flags)) {
2682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 MGMT_STATUS_NOT_POWERED, &rp,
2688 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2694 if (cp->addr.type == BDADDR_BREDR)
2695 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2698 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 le_addr_type(cp->addr.type));
2701 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 MGMT_STATUS_NOT_CONNECTED, &rp,
2708 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2714 cmd->cmd_complete = generic_cmd_complete;
2716 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2718 mgmt_pending_remove(cmd);
2721 hci_dev_unlock(hdev);
2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2727 switch (link_type) {
2729 switch (addr_type) {
2730 case ADDR_LE_DEV_PUBLIC:
2731 return BDADDR_LE_PUBLIC;
2734 /* Fallback to LE Random address type */
2735 return BDADDR_LE_RANDOM;
2739 /* Fallback to BR/EDR type */
2740 return BDADDR_BREDR;
2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2747 struct mgmt_rp_get_connections *rp;
2752 bt_dev_dbg(hdev, "sock %p", sk);
2756 if (!hdev_is_powered(hdev)) {
2757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 MGMT_STATUS_NOT_POWERED);
2763 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2768 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2775 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2778 bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2785 rp->conn_count = cpu_to_le16(i);
2787 /* Recalculate length in case of filtered SCO connections, etc */
2788 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 struct_size(rp, addr, i));
2794 hci_dev_unlock(hdev);
2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 struct mgmt_cp_pin_code_neg_reply *cp)
2801 struct mgmt_pending_cmd *cmd;
2804 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2809 cmd->cmd_complete = addr_cmd_complete;
2811 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2814 mgmt_pending_remove(cmd);
2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2822 struct hci_conn *conn;
2823 struct mgmt_cp_pin_code_reply *cp = data;
2824 struct hci_cp_pin_code_reply reply;
2825 struct mgmt_pending_cmd *cmd;
2828 bt_dev_dbg(hdev, "sock %p", sk);
2832 if (!hdev_is_powered(hdev)) {
2833 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 MGMT_STATUS_NOT_POWERED);
2838 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 MGMT_STATUS_NOT_CONNECTED);
2845 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 struct mgmt_cp_pin_code_neg_reply ncp;
2848 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2850 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2852 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 MGMT_STATUS_INVALID_PARAMS);
2860 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2866 cmd->cmd_complete = addr_cmd_complete;
2868 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 reply.pin_len = cp->pin_len;
2870 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2872 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2874 mgmt_pending_remove(cmd);
2877 hci_dev_unlock(hdev);
2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2884 struct mgmt_cp_set_io_capability *cp = data;
2886 bt_dev_dbg(hdev, "sock %p", sk);
2888 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 MGMT_STATUS_INVALID_PARAMS);
2894 hdev->io_capability = cp->io_capability;
2896 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2898 hci_dev_unlock(hdev);
2900 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2906 struct hci_dev *hdev = conn->hdev;
2907 struct mgmt_pending_cmd *cmd;
2909 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2913 if (cmd->user_data != conn)
2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2924 struct mgmt_rp_pair_device rp;
2925 struct hci_conn *conn = cmd->user_data;
2928 bacpy(&rp.addr.bdaddr, &conn->dst);
2929 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2931 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 status, &rp, sizeof(rp));
2934 /* So we don't get further callbacks for this connection */
2935 conn->connect_cfm_cb = NULL;
2936 conn->security_cfm_cb = NULL;
2937 conn->disconn_cfm_cb = NULL;
2939 hci_conn_drop(conn);
2941 /* The device is paired so there is no need to remove
2942 * its connection parameters anymore.
2944 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2953 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 struct mgmt_pending_cmd *cmd;
2956 cmd = find_pairing(conn);
2958 cmd->cmd_complete(cmd, status);
2959 mgmt_pending_remove(cmd);
2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2965 struct mgmt_pending_cmd *cmd;
2967 BT_DBG("status %u", status);
2969 cmd = find_pairing(conn);
2971 BT_DBG("Unable to find a pending command");
2975 cmd->cmd_complete(cmd, mgmt_status(status));
2976 mgmt_pending_remove(cmd);
2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2981 struct mgmt_pending_cmd *cmd;
2983 BT_DBG("status %u", status);
2988 cmd = find_pairing(conn);
2990 BT_DBG("Unable to find a pending command");
2994 cmd->cmd_complete(cmd, mgmt_status(status));
2995 mgmt_pending_remove(cmd);
2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3001 struct mgmt_cp_pair_device *cp = data;
3002 struct mgmt_rp_pair_device rp;
3003 struct mgmt_pending_cmd *cmd;
3004 u8 sec_level, auth_type;
3005 struct hci_conn *conn;
3008 bt_dev_dbg(hdev, "sock %p", sk);
3010 memset(&rp, 0, sizeof(rp));
3011 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 rp.addr.type = cp->addr.type;
3014 if (!bdaddr_type_is_valid(cp->addr.type))
3015 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 MGMT_STATUS_INVALID_PARAMS,
3019 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 MGMT_STATUS_INVALID_PARAMS,
3026 if (!hdev_is_powered(hdev)) {
3027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 MGMT_STATUS_NOT_POWERED, &rp,
3033 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 MGMT_STATUS_ALREADY_PAIRED, &rp,
3040 sec_level = BT_SECURITY_MEDIUM;
3041 auth_type = HCI_AT_DEDICATED_BONDING;
3043 if (cp->addr.type == BDADDR_BREDR) {
3044 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 auth_type, CONN_REASON_PAIR_DEVICE);
3047 u8 addr_type = le_addr_type(cp->addr.type);
3048 struct hci_conn_params *p;
3050 /* When pairing a new device, it is expected to remember
3051 * this device for future connections. Adding the connection
3052 * parameter information ahead of time allows tracking
3053 * of the peripheral preferred values and will speed up any
3054 * further connection establishment.
3056 * If connection parameters already exist, then they
3057 * will be kept and this function does nothing.
3059 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3061 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3064 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 sec_level, HCI_LE_CONN_TIMEOUT,
3066 CONN_REASON_PAIR_DEVICE);
3072 if (PTR_ERR(conn) == -EBUSY)
3073 status = MGMT_STATUS_BUSY;
3074 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 status = MGMT_STATUS_NOT_SUPPORTED;
3076 else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 status = MGMT_STATUS_REJECTED;
3079 status = MGMT_STATUS_CONNECT_FAILED;
3081 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 status, &rp, sizeof(rp));
3086 if (conn->connect_cfm_cb) {
3087 hci_conn_drop(conn);
3088 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3093 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3096 hci_conn_drop(conn);
3100 cmd->cmd_complete = pairing_complete;
3102 /* For LE, just connecting isn't a proof that the pairing finished */
3103 if (cp->addr.type == BDADDR_BREDR) {
3104 conn->connect_cfm_cb = pairing_complete_cb;
3105 conn->security_cfm_cb = pairing_complete_cb;
3106 conn->disconn_cfm_cb = pairing_complete_cb;
3108 conn->connect_cfm_cb = le_pairing_complete_cb;
3109 conn->security_cfm_cb = le_pairing_complete_cb;
3110 conn->disconn_cfm_cb = le_pairing_complete_cb;
3113 conn->io_capability = cp->io_cap;
3114 cmd->user_data = hci_conn_get(conn);
3116 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 hci_conn_security(conn, sec_level, auth_type, true)) {
3118 cmd->cmd_complete(cmd, 0);
3119 mgmt_pending_remove(cmd);
3125 hci_dev_unlock(hdev);
3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3132 struct mgmt_addr_info *addr = data;
3133 struct mgmt_pending_cmd *cmd;
3134 struct hci_conn *conn;
3137 bt_dev_dbg(hdev, "sock %p", sk);
3141 if (!hdev_is_powered(hdev)) {
3142 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 MGMT_STATUS_NOT_POWERED);
3147 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3149 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 MGMT_STATUS_INVALID_PARAMS);
3154 conn = cmd->user_data;
3156 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 MGMT_STATUS_INVALID_PARAMS);
3162 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 mgmt_pending_remove(cmd);
3165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 addr, sizeof(*addr));
3168 /* Since user doesn't want to proceed with the connection, abort any
3169 * ongoing pairing and then terminate the link if it was created
3170 * because of the pair device action.
3172 if (addr->type == BDADDR_BREDR)
3173 hci_remove_link_key(hdev, &addr->bdaddr);
3175 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 le_addr_type(addr->type));
3178 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3182 hci_dev_unlock(hdev);
3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 struct mgmt_addr_info *addr, u16 mgmt_op,
3188 u16 hci_op, __le32 passkey)
3190 struct mgmt_pending_cmd *cmd;
3191 struct hci_conn *conn;
3196 if (!hdev_is_powered(hdev)) {
3197 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 MGMT_STATUS_NOT_POWERED, addr,
3203 if (addr->type == BDADDR_BREDR)
3204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3206 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 le_addr_type(addr->type));
3210 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 MGMT_STATUS_NOT_CONNECTED, addr,
3216 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3219 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 MGMT_STATUS_SUCCESS, addr,
3223 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 MGMT_STATUS_FAILED, addr,
3230 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3236 cmd->cmd_complete = addr_cmd_complete;
3238 /* Continue with pairing via HCI */
3239 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 struct hci_cp_user_passkey_reply cp;
3242 bacpy(&cp.bdaddr, &addr->bdaddr);
3243 cp.passkey = passkey;
3244 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3246 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3250 mgmt_pending_remove(cmd);
3253 hci_dev_unlock(hdev);
3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 void *data, u16 len)
3260 struct mgmt_cp_pin_code_neg_reply *cp = data;
3262 bt_dev_dbg(hdev, "sock %p", sk);
3264 return user_pairing_resp(sk, hdev, &cp->addr,
3265 MGMT_OP_PIN_CODE_NEG_REPLY,
3266 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3272 struct mgmt_cp_user_confirm_reply *cp = data;
3274 bt_dev_dbg(hdev, "sock %p", sk);
3276 if (len != sizeof(*cp))
3277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 MGMT_STATUS_INVALID_PARAMS);
3280 return user_pairing_resp(sk, hdev, &cp->addr,
3281 MGMT_OP_USER_CONFIRM_REPLY,
3282 HCI_OP_USER_CONFIRM_REPLY, 0);
3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 void *data, u16 len)
3288 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3290 bt_dev_dbg(hdev, "sock %p", sk);
3292 return user_pairing_resp(sk, hdev, &cp->addr,
3293 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3300 struct mgmt_cp_user_passkey_reply *cp = data;
3302 bt_dev_dbg(hdev, "sock %p", sk);
3304 return user_pairing_resp(sk, hdev, &cp->addr,
3305 MGMT_OP_USER_PASSKEY_REPLY,
3306 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 void *data, u16 len)
3312 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3314 bt_dev_dbg(hdev, "sock %p", sk);
3316 return user_pairing_resp(sk, hdev, &cp->addr,
3317 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3323 struct adv_info *adv_instance;
3325 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3329 /* stop if current instance doesn't need to be changed */
3330 if (!(adv_instance->flags & flags))
3333 cancel_adv_timeout(hdev);
3335 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3339 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3346 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3351 struct mgmt_pending_cmd *cmd = data;
3352 struct mgmt_cp_set_local_name *cp = cmd->param;
3353 u8 status = mgmt_status(err);
3355 bt_dev_dbg(hdev, "err %d", err);
3357 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3361 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3364 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3367 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3371 mgmt_pending_remove(cmd);
3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3376 if (lmp_bredr_capable(hdev)) {
3377 hci_update_name_sync(hdev);
3378 hci_update_eir_sync(hdev);
3381 /* The name is stored in the scan response data and so
3382 * no need to update the advertising data here.
3384 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3393 struct mgmt_cp_set_local_name *cp = data;
3394 struct mgmt_pending_cmd *cmd;
3397 bt_dev_dbg(hdev, "sock %p", sk);
3401 /* If the old values are the same as the new ones just return a
3402 * direct command complete event.
3404 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 !memcmp(hdev->short_name, cp->short_name,
3406 sizeof(hdev->short_name))) {
3407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3412 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3414 if (!hdev_is_powered(hdev)) {
3415 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3417 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3422 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 ext_info_changed(hdev, sk);
3429 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3433 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 MGMT_STATUS_FAILED);
3441 mgmt_pending_remove(cmd);
3446 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3449 hci_dev_unlock(hdev);
3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3455 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3461 struct mgmt_cp_set_appearance *cp = data;
3465 bt_dev_dbg(hdev, "sock %p", sk);
3467 if (!lmp_le_capable(hdev))
3468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 MGMT_STATUS_NOT_SUPPORTED);
3471 appearance = le16_to_cpu(cp->appearance);
3475 if (hdev->appearance != appearance) {
3476 hdev->appearance = appearance;
3478 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3482 ext_info_changed(hdev, sk);
3485 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3488 hci_dev_unlock(hdev);
3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 void *data, u16 len)
3496 struct mgmt_rp_get_phy_configuration rp;
3498 bt_dev_dbg(hdev, "sock %p", sk);
3502 memset(&rp, 0, sizeof(rp));
3504 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3508 hci_dev_unlock(hdev);
3510 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3516 struct mgmt_ev_phy_configuration_changed ev;
3518 memset(&ev, 0, sizeof(ev));
3520 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3522 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3528 struct mgmt_pending_cmd *cmd = data;
3529 struct sk_buff *skb = cmd->skb;
3530 u8 status = mgmt_status(err);
3532 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3537 status = MGMT_STATUS_FAILED;
3538 else if (IS_ERR(skb))
3539 status = mgmt_status(PTR_ERR(skb));
3541 status = mgmt_status(skb->data[0]);
3544 bt_dev_dbg(hdev, "status %d", status);
3547 mgmt_cmd_status(cmd->sk, hdev->id,
3548 MGMT_OP_SET_PHY_CONFIGURATION, status);
3550 mgmt_cmd_complete(cmd->sk, hdev->id,
3551 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3554 mgmt_phy_configuration_changed(hdev, cmd->sk);
3557 if (skb && !IS_ERR(skb))
3560 mgmt_pending_remove(cmd);
3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3565 struct mgmt_pending_cmd *cmd = data;
3566 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 struct hci_cp_le_set_default_phy cp_phy;
3568 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3570 memset(&cp_phy, 0, sizeof(cp_phy));
3572 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 cp_phy.all_phys |= 0x01;
3575 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 cp_phy.all_phys |= 0x02;
3578 if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3581 if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3584 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3587 if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3596 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 void *data, u16 len)
3605 struct mgmt_cp_set_phy_configuration *cp = data;
3606 struct mgmt_pending_cmd *cmd;
3607 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 bool changed = false;
3612 bt_dev_dbg(hdev, "sock %p", sk);
3614 configurable_phys = get_configurable_phys(hdev);
3615 supported_phys = get_supported_phys(hdev);
3616 selected_phys = __le32_to_cpu(cp->selected_phys);
3618 if (selected_phys & ~supported_phys)
3619 return mgmt_cmd_status(sk, hdev->id,
3620 MGMT_OP_SET_PHY_CONFIGURATION,
3621 MGMT_STATUS_INVALID_PARAMS);
3623 unconfigure_phys = supported_phys & ~configurable_phys;
3625 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 return mgmt_cmd_status(sk, hdev->id,
3627 MGMT_OP_SET_PHY_CONFIGURATION,
3628 MGMT_STATUS_INVALID_PARAMS);
3630 if (selected_phys == get_selected_phys(hdev))
3631 return mgmt_cmd_complete(sk, hdev->id,
3632 MGMT_OP_SET_PHY_CONFIGURATION,
3637 if (!hdev_is_powered(hdev)) {
3638 err = mgmt_cmd_status(sk, hdev->id,
3639 MGMT_OP_SET_PHY_CONFIGURATION,
3640 MGMT_STATUS_REJECTED);
3644 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 err = mgmt_cmd_status(sk, hdev->id,
3646 MGMT_OP_SET_PHY_CONFIGURATION,
3651 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 pkt_type |= (HCI_DH3 | HCI_DM3);
3654 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3656 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 pkt_type |= (HCI_DH5 | HCI_DM5);
3659 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3661 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 pkt_type &= ~HCI_2DH1;
3664 pkt_type |= HCI_2DH1;
3666 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 pkt_type &= ~HCI_2DH3;
3669 pkt_type |= HCI_2DH3;
3671 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 pkt_type &= ~HCI_2DH5;
3674 pkt_type |= HCI_2DH5;
3676 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 pkt_type &= ~HCI_3DH1;
3679 pkt_type |= HCI_3DH1;
3681 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 pkt_type &= ~HCI_3DH3;
3684 pkt_type |= HCI_3DH3;
3686 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 pkt_type &= ~HCI_3DH5;
3689 pkt_type |= HCI_3DH5;
3691 if (pkt_type != hdev->pkt_type) {
3692 hdev->pkt_type = pkt_type;
3696 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3699 mgmt_phy_configuration_changed(hdev, sk);
3701 err = mgmt_cmd_complete(sk, hdev->id,
3702 MGMT_OP_SET_PHY_CONFIGURATION,
3708 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3713 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 set_default_phy_complete);
3717 err = mgmt_cmd_status(sk, hdev->id,
3718 MGMT_OP_SET_PHY_CONFIGURATION,
3719 MGMT_STATUS_FAILED);
3722 mgmt_pending_remove(cmd);
3726 hci_dev_unlock(hdev);
3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3734 int err = MGMT_STATUS_SUCCESS;
3735 struct mgmt_cp_set_blocked_keys *keys = data;
3736 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 sizeof(struct mgmt_blocked_key_info));
3738 u16 key_count, expected_len;
3741 bt_dev_dbg(hdev, "sock %p", sk);
3743 key_count = __le16_to_cpu(keys->key_count);
3744 if (key_count > max_key_count) {
3745 bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 MGMT_STATUS_INVALID_PARAMS);
3750 expected_len = struct_size(keys, keys, key_count);
3751 if (expected_len != len) {
3752 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 MGMT_STATUS_INVALID_PARAMS);
3760 hci_blocked_keys_clear(hdev);
3762 for (i = 0; i < keys->key_count; ++i) {
3763 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3766 err = MGMT_STATUS_NO_RESOURCES;
3770 b->type = keys->keys[i].type;
3771 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 list_add_rcu(&b->list, &hdev->blocked_keys);
3774 hci_dev_unlock(hdev);
3776 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 void *data, u16 len)
3783 struct mgmt_mode *cp = data;
3785 bool changed = false;
3787 bt_dev_dbg(hdev, "sock %p", sk);
3789 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 return mgmt_cmd_status(sk, hdev->id,
3791 MGMT_OP_SET_WIDEBAND_SPEECH,
3792 MGMT_STATUS_NOT_SUPPORTED);
3794 if (cp->val != 0x00 && cp->val != 0x01)
3795 return mgmt_cmd_status(sk, hdev->id,
3796 MGMT_OP_SET_WIDEBAND_SPEECH,
3797 MGMT_STATUS_INVALID_PARAMS);
3801 if (hdev_is_powered(hdev) &&
3802 !!cp->val != hci_dev_test_flag(hdev,
3803 HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 err = mgmt_cmd_status(sk, hdev->id,
3805 MGMT_OP_SET_WIDEBAND_SPEECH,
3806 MGMT_STATUS_REJECTED);
3811 changed = !hci_dev_test_and_set_flag(hdev,
3812 HCI_WIDEBAND_SPEECH_ENABLED);
3814 changed = hci_dev_test_and_clear_flag(hdev,
3815 HCI_WIDEBAND_SPEECH_ENABLED);
3817 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3822 err = new_settings(hdev, sk);
3825 hci_dev_unlock(hdev);
3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 void *data, u16 data_len)
3833 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3836 u8 tx_power_range[2];
3838 bt_dev_dbg(hdev, "sock %p", sk);
3840 memset(&buf, 0, sizeof(buf));
3844 /* When the Read Simple Pairing Options command is supported, then
3845 * the remote public key validation is supported.
3847 * Alternatively, when Microsoft extensions are available, they can
3848 * indicate support for public key validation as well.
3850 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3853 flags |= 0x02; /* Remote public key validation (LE) */
3855 /* When the Read Encryption Key Size command is supported, then the
3856 * encryption key size is enforced.
3858 if (hdev->commands[20] & 0x10)
3859 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3861 flags |= 0x08; /* Encryption key size enforcement (LE) */
3863 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3866 /* When the Read Simple Pairing Options command is supported, then
3867 * also max encryption key size information is provided.
3869 if (hdev->commands[41] & 0x08)
3870 cap_len = eir_append_le16(rp->cap, cap_len,
3871 MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 hdev->max_enc_key_size);
3874 cap_len = eir_append_le16(rp->cap, cap_len,
3875 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 SMP_MAX_ENC_KEY_SIZE);
3878 /* Append the min/max LE tx power parameters if we were able to fetch
3879 * it from the controller
3881 if (hdev->commands[38] & 0x80) {
3882 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3888 rp->cap_len = cpu_to_le16(cap_len);
3890 hci_dev_unlock(hdev);
3892 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 rp, sizeof(*rp) + cap_len);
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 void *data, u16 data_len)
3931 char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */
3932 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3936 bt_dev_dbg(hdev, "sock %p", sk);
3938 memset(&buf, 0, sizeof(buf));
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3942 flags = bt_dbg_get() ? BIT(0) : 0;
3944 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 rp->features[idx].flags = cpu_to_le32(flags);
3950 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3956 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 rp->features[idx].flags = cpu_to_le32(flags);
3961 if (hdev && ll_privacy_capable(hdev)) {
3962 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 flags = BIT(0) | BIT(1);
3967 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 rp->features[idx].flags = cpu_to_le32(flags);
3972 if (hdev && (aosp_has_quality_report(hdev) ||
3973 hdev->set_quality_report)) {
3974 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3979 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 rp->features[idx].flags = cpu_to_le32(flags);
3984 if (hdev && hdev->get_data_path_id) {
3985 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3990 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 rp->features[idx].flags = cpu_to_le32(flags);
3995 rp->feature_count = cpu_to_le16(idx);
3997 /* After reading the experimental features information, enable
3998 * the events to update client on any future change.
4000 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4002 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 0, rp, sizeof(*rp) + (20 * idx));
4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4010 struct mgmt_ev_exp_feature_changed ev;
4012 memset(&ev, 0, sizeof(ev));
4013 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4016 // Do we need to be atomic with the conn_flags?
4017 if (enabled && privacy_mode_capable(hdev))
4018 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4020 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4022 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4024 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4028 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4029 bool enabled, struct sock *skip)
4031 struct mgmt_ev_exp_feature_changed ev;
4033 memset(&ev, 0, sizeof(ev));
4034 memcpy(ev.uuid, uuid, 16);
4035 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4037 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4039 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4042 #define EXP_FEAT(_uuid, _set_func) \
4045 .set_func = _set_func, \
4048 /* The zero key uuid is special. Multiple exp features are set through it. */
4049 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4050 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4052 struct mgmt_rp_set_exp_feature rp;
4054 memset(rp.uuid, 0, 16);
4055 rp.flags = cpu_to_le32(0);
4057 #ifdef CONFIG_BT_FEATURE_DEBUG
4059 bool changed = bt_dbg_get();
4064 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4068 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4071 changed = hci_dev_test_and_clear_flag(hdev,
4072 HCI_ENABLE_LL_PRIVACY);
4074 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4078 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4080 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4081 MGMT_OP_SET_EXP_FEATURE, 0,
4085 #ifdef CONFIG_BT_FEATURE_DEBUG
4086 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4087 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4089 struct mgmt_rp_set_exp_feature rp;
4094 /* Command requires to use the non-controller index */
4096 return mgmt_cmd_status(sk, hdev->id,
4097 MGMT_OP_SET_EXP_FEATURE,
4098 MGMT_STATUS_INVALID_INDEX);
4100 /* Parameters are limited to a single octet */
4101 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4102 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4103 MGMT_OP_SET_EXP_FEATURE,
4104 MGMT_STATUS_INVALID_PARAMS);
4106 /* Only boolean on/off is supported */
4107 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4108 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4109 MGMT_OP_SET_EXP_FEATURE,
4110 MGMT_STATUS_INVALID_PARAMS);
4112 val = !!cp->param[0];
4113 changed = val ? !bt_dbg_get() : bt_dbg_get();
4116 memcpy(rp.uuid, debug_uuid, 16);
4117 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4119 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4121 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4122 MGMT_OP_SET_EXP_FEATURE, 0,
4126 exp_feature_changed(hdev, debug_uuid, val, sk);
4132 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4133 struct mgmt_cp_set_exp_feature *cp,
4136 struct mgmt_rp_set_exp_feature rp;
4141 /* Command requires to use the controller index */
4143 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4144 MGMT_OP_SET_EXP_FEATURE,
4145 MGMT_STATUS_INVALID_INDEX);
4147 /* Changes can only be made when controller is powered down */
4148 if (hdev_is_powered(hdev))
4149 return mgmt_cmd_status(sk, hdev->id,
4150 MGMT_OP_SET_EXP_FEATURE,
4151 MGMT_STATUS_REJECTED);
4153 /* Parameters are limited to a single octet */
4154 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4155 return mgmt_cmd_status(sk, hdev->id,
4156 MGMT_OP_SET_EXP_FEATURE,
4157 MGMT_STATUS_INVALID_PARAMS);
4159 /* Only boolean on/off is supported */
4160 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4161 return mgmt_cmd_status(sk, hdev->id,
4162 MGMT_OP_SET_EXP_FEATURE,
4163 MGMT_STATUS_INVALID_PARAMS);
4165 val = !!cp->param[0];
4168 changed = !hci_dev_test_and_set_flag(hdev,
4169 HCI_ENABLE_LL_PRIVACY);
4170 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4172 /* Enable LL privacy + supported settings changed */
4173 flags = BIT(0) | BIT(1);
4175 changed = hci_dev_test_and_clear_flag(hdev,
4176 HCI_ENABLE_LL_PRIVACY);
4178 /* Disable LL privacy + supported settings changed */
4182 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4183 rp.flags = cpu_to_le32(flags);
4185 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4187 err = mgmt_cmd_complete(sk, hdev->id,
4188 MGMT_OP_SET_EXP_FEATURE, 0,
4192 exp_ll_privacy_feature_changed(val, hdev, sk);
4197 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4198 struct mgmt_cp_set_exp_feature *cp,
4201 struct mgmt_rp_set_exp_feature rp;
4205 /* Command requires to use a valid controller index */
4207 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4208 MGMT_OP_SET_EXP_FEATURE,
4209 MGMT_STATUS_INVALID_INDEX);
4211 /* Parameters are limited to a single octet */
4212 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4213 return mgmt_cmd_status(sk, hdev->id,
4214 MGMT_OP_SET_EXP_FEATURE,
4215 MGMT_STATUS_INVALID_PARAMS);
4217 /* Only boolean on/off is supported */
4218 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4219 return mgmt_cmd_status(sk, hdev->id,
4220 MGMT_OP_SET_EXP_FEATURE,
4221 MGMT_STATUS_INVALID_PARAMS);
4223 hci_req_sync_lock(hdev);
4225 val = !!cp->param[0];
4226 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4228 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4229 err = mgmt_cmd_status(sk, hdev->id,
4230 MGMT_OP_SET_EXP_FEATURE,
4231 MGMT_STATUS_NOT_SUPPORTED);
4232 goto unlock_quality_report;
4236 if (hdev->set_quality_report)
4237 err = hdev->set_quality_report(hdev, val);
4239 err = aosp_set_quality_report(hdev, val);
4242 err = mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_SET_EXP_FEATURE,
4244 MGMT_STATUS_FAILED);
4245 goto unlock_quality_report;
4249 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4251 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4254 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4256 memcpy(rp.uuid, quality_report_uuid, 16);
4257 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4258 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4264 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4266 unlock_quality_report:
4267 hci_req_sync_unlock(hdev);
4271 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4272 struct mgmt_cp_set_exp_feature *cp,
4277 struct mgmt_rp_set_exp_feature rp;
4279 /* Command requires to use a valid controller index */
4281 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4282 MGMT_OP_SET_EXP_FEATURE,
4283 MGMT_STATUS_INVALID_INDEX);
4285 /* Parameters are limited to a single octet */
4286 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4287 return mgmt_cmd_status(sk, hdev->id,
4288 MGMT_OP_SET_EXP_FEATURE,
4289 MGMT_STATUS_INVALID_PARAMS);
4291 /* Only boolean on/off is supported */
4292 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4293 return mgmt_cmd_status(sk, hdev->id,
4294 MGMT_OP_SET_EXP_FEATURE,
4295 MGMT_STATUS_INVALID_PARAMS);
4297 val = !!cp->param[0];
4298 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4300 if (!hdev->get_data_path_id) {
4301 return mgmt_cmd_status(sk, hdev->id,
4302 MGMT_OP_SET_EXP_FEATURE,
4303 MGMT_STATUS_NOT_SUPPORTED);
4308 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4310 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4313 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4316 memcpy(rp.uuid, offload_codecs_uuid, 16);
4317 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4318 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4319 err = mgmt_cmd_complete(sk, hdev->id,
4320 MGMT_OP_SET_EXP_FEATURE, 0,
4324 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4329 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4330 struct mgmt_cp_set_exp_feature *cp,
4335 struct mgmt_rp_set_exp_feature rp;
4337 /* Command requires to use a valid controller index */
4339 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4340 MGMT_OP_SET_EXP_FEATURE,
4341 MGMT_STATUS_INVALID_INDEX);
4343 /* Parameters are limited to a single octet */
4344 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4345 return mgmt_cmd_status(sk, hdev->id,
4346 MGMT_OP_SET_EXP_FEATURE,
4347 MGMT_STATUS_INVALID_PARAMS);
4349 /* Only boolean on/off is supported */
4350 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4351 return mgmt_cmd_status(sk, hdev->id,
4352 MGMT_OP_SET_EXP_FEATURE,
4353 MGMT_STATUS_INVALID_PARAMS);
4355 val = !!cp->param[0];
4356 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4358 if (!hci_dev_le_state_simultaneous(hdev)) {
4359 return mgmt_cmd_status(sk, hdev->id,
4360 MGMT_OP_SET_EXP_FEATURE,
4361 MGMT_STATUS_NOT_SUPPORTED);
4366 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4368 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4371 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4374 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4375 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4376 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4377 err = mgmt_cmd_complete(sk, hdev->id,
4378 MGMT_OP_SET_EXP_FEATURE, 0,
4382 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4387 static const struct mgmt_exp_feature {
4389 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4390 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4391 } exp_features[] = {
4392 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4393 #ifdef CONFIG_BT_FEATURE_DEBUG
4394 EXP_FEAT(debug_uuid, set_debug_func),
4396 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4397 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4398 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4399 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4401 /* end with a null feature */
4402 EXP_FEAT(NULL, NULL)
4405 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4406 void *data, u16 data_len)
4408 struct mgmt_cp_set_exp_feature *cp = data;
4411 bt_dev_dbg(hdev, "sock %p", sk);
4413 for (i = 0; exp_features[i].uuid; i++) {
4414 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4415 return exp_features[i].set_func(sk, hdev, cp, data_len);
4418 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4419 MGMT_OP_SET_EXP_FEATURE,
4420 MGMT_STATUS_NOT_SUPPORTED);
4423 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4426 struct mgmt_cp_get_device_flags *cp = data;
4427 struct mgmt_rp_get_device_flags rp;
4428 struct bdaddr_list_with_flags *br_params;
4429 struct hci_conn_params *params;
4430 u32 supported_flags;
4431 u32 current_flags = 0;
4432 u8 status = MGMT_STATUS_INVALID_PARAMS;
4434 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4435 &cp->addr.bdaddr, cp->addr.type);
4439 supported_flags = hdev->conn_flags;
4441 memset(&rp, 0, sizeof(rp));
4443 if (cp->addr.type == BDADDR_BREDR) {
4444 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4450 current_flags = br_params->flags;
4452 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4453 le_addr_type(cp->addr.type));
4458 current_flags = params->flags;
4461 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4462 rp.addr.type = cp->addr.type;
4463 rp.supported_flags = cpu_to_le32(supported_flags);
4464 rp.current_flags = cpu_to_le32(current_flags);
4466 status = MGMT_STATUS_SUCCESS;
4469 hci_dev_unlock(hdev);
4471 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4475 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4476 bdaddr_t *bdaddr, u8 bdaddr_type,
4477 u32 supported_flags, u32 current_flags)
4479 struct mgmt_ev_device_flags_changed ev;
4481 bacpy(&ev.addr.bdaddr, bdaddr);
4482 ev.addr.type = bdaddr_type;
4483 ev.supported_flags = cpu_to_le32(supported_flags);
4484 ev.current_flags = cpu_to_le32(current_flags);
4486 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4489 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4492 struct mgmt_cp_set_device_flags *cp = data;
4493 struct bdaddr_list_with_flags *br_params;
4494 struct hci_conn_params *params;
4495 u8 status = MGMT_STATUS_INVALID_PARAMS;
4496 u32 supported_flags;
4497 u32 current_flags = __le32_to_cpu(cp->current_flags);
4499 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4500 &cp->addr.bdaddr, cp->addr.type,
4501 __le32_to_cpu(current_flags));
4503 // We should take hci_dev_lock() early, I think.. conn_flags can change
4504 supported_flags = hdev->conn_flags;
4506 if ((supported_flags | current_flags) != supported_flags) {
4507 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4508 current_flags, supported_flags);
4514 if (cp->addr.type == BDADDR_BREDR) {
4515 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4520 br_params->flags = current_flags;
4521 status = MGMT_STATUS_SUCCESS;
4523 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4524 &cp->addr.bdaddr, cp->addr.type);
4527 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4528 le_addr_type(cp->addr.type));
4530 /* Devices using RPAs can only be programmed in the
4531 * acceptlist LL Privacy has been enable otherwise they
4532 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4534 if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
4535 !use_ll_privacy(hdev) &&
4536 hci_find_irk_by_addr(hdev, ¶ms->addr,
4537 params->addr_type)) {
4539 "Cannot set wakeable for RPA");
4543 params->flags = current_flags;
4544 status = MGMT_STATUS_SUCCESS;
4546 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4549 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4550 hci_update_passive_scan(hdev);
4552 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4554 le_addr_type(cp->addr.type));
4559 hci_dev_unlock(hdev);
4562 if (status == MGMT_STATUS_SUCCESS)
4563 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4564 supported_flags, current_flags);
4566 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4567 &cp->addr, sizeof(cp->addr));
4570 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4573 struct mgmt_ev_adv_monitor_added ev;
4575 ev.monitor_handle = cpu_to_le16(handle);
4577 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4580 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4582 struct mgmt_ev_adv_monitor_removed ev;
4583 struct mgmt_pending_cmd *cmd;
4584 struct sock *sk_skip = NULL;
4585 struct mgmt_cp_remove_adv_monitor *cp;
4587 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4591 if (cp->monitor_handle)
4595 ev.monitor_handle = cpu_to_le16(handle);
4597 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4600 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4601 void *data, u16 len)
4603 struct adv_monitor *monitor = NULL;
4604 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4607 __u32 supported = 0;
4609 __u16 num_handles = 0;
4610 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4612 BT_DBG("request for %s", hdev->name);
4616 if (msft_monitor_supported(hdev))
4617 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4619 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4620 handles[num_handles++] = monitor->handle;
4622 hci_dev_unlock(hdev);
4624 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4625 rp = kmalloc(rp_size, GFP_KERNEL);
4629 /* All supported features are currently enabled */
4630 enabled = supported;
4632 rp->supported_features = cpu_to_le32(supported);
4633 rp->enabled_features = cpu_to_le32(enabled);
4634 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4635 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4636 rp->num_handles = cpu_to_le16(num_handles);
4638 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4640 err = mgmt_cmd_complete(sk, hdev->id,
4641 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4642 MGMT_STATUS_SUCCESS, rp, rp_size);
4649 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4651 struct mgmt_rp_add_adv_patterns_monitor rp;
4652 struct mgmt_pending_cmd *cmd;
4653 struct adv_monitor *monitor;
4658 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4660 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4665 monitor = cmd->user_data;
4666 rp.monitor_handle = cpu_to_le16(monitor->handle);
4669 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4670 hdev->adv_monitors_cnt++;
4671 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4672 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4673 hci_update_passive_scan(hdev);
4676 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4677 mgmt_status(status), &rp, sizeof(rp));
4678 mgmt_pending_remove(cmd);
4681 hci_dev_unlock(hdev);
4682 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4683 rp.monitor_handle, status);
4688 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4689 struct adv_monitor *m, u8 status,
4690 void *data, u16 len, u16 op)
4692 struct mgmt_rp_add_adv_patterns_monitor rp;
4693 struct mgmt_pending_cmd *cmd;
4702 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4703 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4704 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4705 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4706 status = MGMT_STATUS_BUSY;
4710 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4712 status = MGMT_STATUS_NO_RESOURCES;
4717 pending = hci_add_adv_monitor(hdev, m, &err);
4719 if (err == -ENOSPC || err == -ENOMEM)
4720 status = MGMT_STATUS_NO_RESOURCES;
4721 else if (err == -EINVAL)
4722 status = MGMT_STATUS_INVALID_PARAMS;
4724 status = MGMT_STATUS_FAILED;
4726 mgmt_pending_remove(cmd);
4731 mgmt_pending_remove(cmd);
4732 rp.monitor_handle = cpu_to_le16(m->handle);
4733 mgmt_adv_monitor_added(sk, hdev, m->handle);
4734 m->state = ADV_MONITOR_STATE_REGISTERED;
4735 hdev->adv_monitors_cnt++;
4737 hci_dev_unlock(hdev);
4738 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4742 hci_dev_unlock(hdev);
4747 hci_free_adv_monitor(hdev, m);
4748 hci_dev_unlock(hdev);
4749 return mgmt_cmd_status(sk, hdev->id, op, status);
4752 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4753 struct mgmt_adv_rssi_thresholds *rssi)
4756 m->rssi.low_threshold = rssi->low_threshold;
4757 m->rssi.low_threshold_timeout =
4758 __le16_to_cpu(rssi->low_threshold_timeout);
4759 m->rssi.high_threshold = rssi->high_threshold;
4760 m->rssi.high_threshold_timeout =
4761 __le16_to_cpu(rssi->high_threshold_timeout);
4762 m->rssi.sampling_period = rssi->sampling_period;
4764 /* Default values. These numbers are the least constricting
4765 * parameters for MSFT API to work, so it behaves as if there
4766 * are no rssi parameter to consider. May need to be changed
4767 * if other API are to be supported.
4769 m->rssi.low_threshold = -127;
4770 m->rssi.low_threshold_timeout = 60;
4771 m->rssi.high_threshold = -127;
4772 m->rssi.high_threshold_timeout = 0;
4773 m->rssi.sampling_period = 0;
4777 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4778 struct mgmt_adv_pattern *patterns)
4780 u8 offset = 0, length = 0;
4781 struct adv_pattern *p = NULL;
4784 for (i = 0; i < pattern_count; i++) {
4785 offset = patterns[i].offset;
4786 length = patterns[i].length;
4787 if (offset >= HCI_MAX_AD_LENGTH ||
4788 length > HCI_MAX_AD_LENGTH ||
4789 (offset + length) > HCI_MAX_AD_LENGTH)
4790 return MGMT_STATUS_INVALID_PARAMS;
4792 p = kmalloc(sizeof(*p), GFP_KERNEL);
4794 return MGMT_STATUS_NO_RESOURCES;
4796 p->ad_type = patterns[i].ad_type;
4797 p->offset = patterns[i].offset;
4798 p->length = patterns[i].length;
4799 memcpy(p->value, patterns[i].value, p->length);
4801 INIT_LIST_HEAD(&p->list);
4802 list_add(&p->list, &m->patterns);
4805 return MGMT_STATUS_SUCCESS;
4808 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4809 void *data, u16 len)
4811 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4812 struct adv_monitor *m = NULL;
4813 u8 status = MGMT_STATUS_SUCCESS;
4814 size_t expected_size = sizeof(*cp);
4816 BT_DBG("request for %s", hdev->name);
4818 if (len <= sizeof(*cp)) {
4819 status = MGMT_STATUS_INVALID_PARAMS;
4823 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4824 if (len != expected_size) {
4825 status = MGMT_STATUS_INVALID_PARAMS;
4829 m = kzalloc(sizeof(*m), GFP_KERNEL);
4831 status = MGMT_STATUS_NO_RESOURCES;
4835 INIT_LIST_HEAD(&m->patterns);
4837 parse_adv_monitor_rssi(m, NULL);
4838 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4841 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4842 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4845 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4846 void *data, u16 len)
4848 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4849 struct adv_monitor *m = NULL;
4850 u8 status = MGMT_STATUS_SUCCESS;
4851 size_t expected_size = sizeof(*cp);
4853 BT_DBG("request for %s", hdev->name);
4855 if (len <= sizeof(*cp)) {
4856 status = MGMT_STATUS_INVALID_PARAMS;
4860 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4861 if (len != expected_size) {
4862 status = MGMT_STATUS_INVALID_PARAMS;
4866 m = kzalloc(sizeof(*m), GFP_KERNEL);
4868 status = MGMT_STATUS_NO_RESOURCES;
4872 INIT_LIST_HEAD(&m->patterns);
4874 parse_adv_monitor_rssi(m, &cp->rssi);
4875 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4878 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4879 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4882 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4884 struct mgmt_rp_remove_adv_monitor rp;
4885 struct mgmt_cp_remove_adv_monitor *cp;
4886 struct mgmt_pending_cmd *cmd;
4891 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4896 rp.monitor_handle = cp->monitor_handle;
4899 hci_update_passive_scan(hdev);
4901 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4902 mgmt_status(status), &rp, sizeof(rp));
4903 mgmt_pending_remove(cmd);
4906 hci_dev_unlock(hdev);
4907 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4908 rp.monitor_handle, status);
4913 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4914 void *data, u16 len)
4916 struct mgmt_cp_remove_adv_monitor *cp = data;
4917 struct mgmt_rp_remove_adv_monitor rp;
4918 struct mgmt_pending_cmd *cmd;
4919 u16 handle = __le16_to_cpu(cp->monitor_handle);
4923 BT_DBG("request for %s", hdev->name);
4924 rp.monitor_handle = cp->monitor_handle;
4928 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4929 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4930 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4931 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4932 status = MGMT_STATUS_BUSY;
4936 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4938 status = MGMT_STATUS_NO_RESOURCES;
4943 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4945 pending = hci_remove_all_adv_monitor(hdev, &err);
4948 mgmt_pending_remove(cmd);
4951 status = MGMT_STATUS_INVALID_INDEX;
4953 status = MGMT_STATUS_FAILED;
4958 /* monitor can be removed without forwarding request to controller */
4960 mgmt_pending_remove(cmd);
4961 hci_dev_unlock(hdev);
4963 return mgmt_cmd_complete(sk, hdev->id,
4964 MGMT_OP_REMOVE_ADV_MONITOR,
4965 MGMT_STATUS_SUCCESS,
4969 hci_dev_unlock(hdev);
4973 hci_dev_unlock(hdev);
4974 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4978 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4980 struct mgmt_rp_read_local_oob_data mgmt_rp;
4981 size_t rp_size = sizeof(mgmt_rp);
4982 struct mgmt_pending_cmd *cmd = data;
4983 struct sk_buff *skb = cmd->skb;
4984 u8 status = mgmt_status(err);
4988 status = MGMT_STATUS_FAILED;
4989 else if (IS_ERR(skb))
4990 status = mgmt_status(PTR_ERR(skb));
4992 status = mgmt_status(skb->data[0]);
4995 bt_dev_dbg(hdev, "status %d", status);
4998 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5002 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5004 if (!bredr_sc_enabled(hdev)) {
5005 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5007 if (skb->len < sizeof(*rp)) {
5008 mgmt_cmd_status(cmd->sk, hdev->id,
5009 MGMT_OP_READ_LOCAL_OOB_DATA,
5010 MGMT_STATUS_FAILED);
5014 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5015 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5017 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5021 if (skb->len < sizeof(*rp)) {
5022 mgmt_cmd_status(cmd->sk, hdev->id,
5023 MGMT_OP_READ_LOCAL_OOB_DATA,
5024 MGMT_STATUS_FAILED);
5028 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5029 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5031 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5032 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5035 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5036 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5039 if (skb && !IS_ERR(skb))
5042 mgmt_pending_free(cmd);
5045 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5047 struct mgmt_pending_cmd *cmd = data;
5049 if (bredr_sc_enabled(hdev))
5050 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5052 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5054 if (IS_ERR(cmd->skb))
5055 return PTR_ERR(cmd->skb);
5060 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5061 void *data, u16 data_len)
5063 struct mgmt_pending_cmd *cmd;
5066 bt_dev_dbg(hdev, "sock %p", sk);
5070 if (!hdev_is_powered(hdev)) {
5071 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5072 MGMT_STATUS_NOT_POWERED);
5076 if (!lmp_ssp_capable(hdev)) {
5077 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5078 MGMT_STATUS_NOT_SUPPORTED);
5082 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5086 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5087 read_local_oob_data_complete);
5090 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5091 MGMT_STATUS_FAILED);
5094 mgmt_pending_free(cmd);
5098 hci_dev_unlock(hdev);
5102 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5103 void *data, u16 len)
5105 struct mgmt_addr_info *addr = data;
5108 bt_dev_dbg(hdev, "sock %p", sk);
5110 if (!bdaddr_type_is_valid(addr->type))
5111 return mgmt_cmd_complete(sk, hdev->id,
5112 MGMT_OP_ADD_REMOTE_OOB_DATA,
5113 MGMT_STATUS_INVALID_PARAMS,
5114 addr, sizeof(*addr));
5118 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5119 struct mgmt_cp_add_remote_oob_data *cp = data;
5122 if (cp->addr.type != BDADDR_BREDR) {
5123 err = mgmt_cmd_complete(sk, hdev->id,
5124 MGMT_OP_ADD_REMOTE_OOB_DATA,
5125 MGMT_STATUS_INVALID_PARAMS,
5126 &cp->addr, sizeof(cp->addr));
5130 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5131 cp->addr.type, cp->hash,
5132 cp->rand, NULL, NULL);
5134 status = MGMT_STATUS_FAILED;
5136 status = MGMT_STATUS_SUCCESS;
5138 err = mgmt_cmd_complete(sk, hdev->id,
5139 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5140 &cp->addr, sizeof(cp->addr));
5141 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5142 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5143 u8 *rand192, *hash192, *rand256, *hash256;
5146 if (bdaddr_type_is_le(cp->addr.type)) {
5147 /* Enforce zero-valued 192-bit parameters as
5148 * long as legacy SMP OOB isn't implemented.
5150 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5151 memcmp(cp->hash192, ZERO_KEY, 16)) {
5152 err = mgmt_cmd_complete(sk, hdev->id,
5153 MGMT_OP_ADD_REMOTE_OOB_DATA,
5154 MGMT_STATUS_INVALID_PARAMS,
5155 addr, sizeof(*addr));
5162 /* In case one of the P-192 values is set to zero,
5163 * then just disable OOB data for P-192.
5165 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5166 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5170 rand192 = cp->rand192;
5171 hash192 = cp->hash192;
5175 /* In case one of the P-256 values is set to zero, then just
5176 * disable OOB data for P-256.
5178 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5179 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5183 rand256 = cp->rand256;
5184 hash256 = cp->hash256;
5187 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5188 cp->addr.type, hash192, rand192,
5191 status = MGMT_STATUS_FAILED;
5193 status = MGMT_STATUS_SUCCESS;
5195 err = mgmt_cmd_complete(sk, hdev->id,
5196 MGMT_OP_ADD_REMOTE_OOB_DATA,
5197 status, &cp->addr, sizeof(cp->addr));
5199 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5201 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5202 MGMT_STATUS_INVALID_PARAMS);
5206 hci_dev_unlock(hdev);
5210 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5211 void *data, u16 len)
5213 struct mgmt_cp_remove_remote_oob_data *cp = data;
5217 bt_dev_dbg(hdev, "sock %p", sk);
5219 if (cp->addr.type != BDADDR_BREDR)
5220 return mgmt_cmd_complete(sk, hdev->id,
5221 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5222 MGMT_STATUS_INVALID_PARAMS,
5223 &cp->addr, sizeof(cp->addr));
5227 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5228 hci_remote_oob_data_clear(hdev);
5229 status = MGMT_STATUS_SUCCESS;
5233 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5235 status = MGMT_STATUS_INVALID_PARAMS;
5237 status = MGMT_STATUS_SUCCESS;
5240 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5241 status, &cp->addr, sizeof(cp->addr));
5243 hci_dev_unlock(hdev);
5247 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5249 struct mgmt_pending_cmd *cmd;
5251 bt_dev_dbg(hdev, "status %u", status);
5255 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5257 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5260 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5263 cmd->cmd_complete(cmd, mgmt_status(status));
5264 mgmt_pending_remove(cmd);
5267 hci_dev_unlock(hdev);
5270 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5271 uint8_t *mgmt_status)
5274 case DISCOV_TYPE_LE:
5275 *mgmt_status = mgmt_le_support(hdev);
5279 case DISCOV_TYPE_INTERLEAVED:
5280 *mgmt_status = mgmt_le_support(hdev);
5284 case DISCOV_TYPE_BREDR:
5285 *mgmt_status = mgmt_bredr_support(hdev);
5290 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5297 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5299 struct mgmt_pending_cmd *cmd = data;
5301 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5302 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5303 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5306 bt_dev_dbg(hdev, "err %d", err);
5308 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5310 mgmt_pending_remove(cmd);
5312 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5316 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5318 return hci_start_discovery_sync(hdev);
5321 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5322 u16 op, void *data, u16 len)
5324 struct mgmt_cp_start_discovery *cp = data;
5325 struct mgmt_pending_cmd *cmd;
5329 bt_dev_dbg(hdev, "sock %p", sk);
5333 if (!hdev_is_powered(hdev)) {
5334 err = mgmt_cmd_complete(sk, hdev->id, op,
5335 MGMT_STATUS_NOT_POWERED,
5336 &cp->type, sizeof(cp->type));
5340 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5341 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5342 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5343 &cp->type, sizeof(cp->type));
5347 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5348 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5349 &cp->type, sizeof(cp->type));
5353 /* Can't start discovery when it is paused */
5354 if (hdev->discovery_paused) {
5355 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5356 &cp->type, sizeof(cp->type));
5360 /* Clear the discovery filter first to free any previously
5361 * allocated memory for the UUID list.
5363 hci_discovery_filter_clear(hdev);
5365 hdev->discovery.type = cp->type;
5366 hdev->discovery.report_invalid_rssi = false;
5367 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5368 hdev->discovery.limited = true;
5370 hdev->discovery.limited = false;
5372 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5378 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5379 start_discovery_complete);
5381 mgmt_pending_remove(cmd);
5385 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5388 hci_dev_unlock(hdev);
5392 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5393 void *data, u16 len)
5395 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5399 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5400 void *data, u16 len)
5402 return start_discovery_internal(sk, hdev,
5403 MGMT_OP_START_LIMITED_DISCOVERY,
5407 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5408 void *data, u16 len)
5410 struct mgmt_cp_start_service_discovery *cp = data;
5411 struct mgmt_pending_cmd *cmd;
5412 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5413 u16 uuid_count, expected_len;
5417 bt_dev_dbg(hdev, "sock %p", sk);
5421 if (!hdev_is_powered(hdev)) {
5422 err = mgmt_cmd_complete(sk, hdev->id,
5423 MGMT_OP_START_SERVICE_DISCOVERY,
5424 MGMT_STATUS_NOT_POWERED,
5425 &cp->type, sizeof(cp->type));
5429 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5430 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5431 err = mgmt_cmd_complete(sk, hdev->id,
5432 MGMT_OP_START_SERVICE_DISCOVERY,
5433 MGMT_STATUS_BUSY, &cp->type,
5438 if (hdev->discovery_paused) {
5439 err = mgmt_cmd_complete(sk, hdev->id,
5440 MGMT_OP_START_SERVICE_DISCOVERY,
5441 MGMT_STATUS_BUSY, &cp->type,
5446 uuid_count = __le16_to_cpu(cp->uuid_count);
5447 if (uuid_count > max_uuid_count) {
5448 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5450 err = mgmt_cmd_complete(sk, hdev->id,
5451 MGMT_OP_START_SERVICE_DISCOVERY,
5452 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5457 expected_len = sizeof(*cp) + uuid_count * 16;
5458 if (expected_len != len) {
5459 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5461 err = mgmt_cmd_complete(sk, hdev->id,
5462 MGMT_OP_START_SERVICE_DISCOVERY,
5463 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5468 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5469 err = mgmt_cmd_complete(sk, hdev->id,
5470 MGMT_OP_START_SERVICE_DISCOVERY,
5471 status, &cp->type, sizeof(cp->type));
5475 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5482 /* Clear the discovery filter first to free any previously
5483 * allocated memory for the UUID list.
5485 hci_discovery_filter_clear(hdev);
5487 hdev->discovery.result_filtering = true;
5488 hdev->discovery.type = cp->type;
5489 hdev->discovery.rssi = cp->rssi;
5490 hdev->discovery.uuid_count = uuid_count;
5492 if (uuid_count > 0) {
5493 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5495 if (!hdev->discovery.uuids) {
5496 err = mgmt_cmd_complete(sk, hdev->id,
5497 MGMT_OP_START_SERVICE_DISCOVERY,
5499 &cp->type, sizeof(cp->type));
5500 mgmt_pending_remove(cmd);
5505 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5506 start_discovery_complete);
5508 mgmt_pending_remove(cmd);
5512 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5515 hci_dev_unlock(hdev);
5519 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5521 struct mgmt_pending_cmd *cmd;
5523 bt_dev_dbg(hdev, "status %u", status);
5527 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5529 cmd->cmd_complete(cmd, mgmt_status(status));
5530 mgmt_pending_remove(cmd);
5533 hci_dev_unlock(hdev);
5536 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5538 struct mgmt_pending_cmd *cmd = data;
5540 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5543 bt_dev_dbg(hdev, "err %d", err);
5545 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5547 mgmt_pending_remove(cmd);
5550 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5553 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5555 return hci_stop_discovery_sync(hdev);
5558 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5561 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5562 struct mgmt_pending_cmd *cmd;
5565 bt_dev_dbg(hdev, "sock %p", sk);
5569 if (!hci_discovery_active(hdev)) {
5570 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5571 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5572 sizeof(mgmt_cp->type));
5576 if (hdev->discovery.type != mgmt_cp->type) {
5577 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5578 MGMT_STATUS_INVALID_PARAMS,
5579 &mgmt_cp->type, sizeof(mgmt_cp->type));
5583 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5589 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5590 stop_discovery_complete);
5592 mgmt_pending_remove(cmd);
5596 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5599 hci_dev_unlock(hdev);
5603 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5606 struct mgmt_cp_confirm_name *cp = data;
5607 struct inquiry_entry *e;
5610 bt_dev_dbg(hdev, "sock %p", sk);
5614 if (!hci_discovery_active(hdev)) {
5615 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5616 MGMT_STATUS_FAILED, &cp->addr,
5621 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5623 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5624 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5629 if (cp->name_known) {
5630 e->name_state = NAME_KNOWN;
5633 e->name_state = NAME_NEEDED;
5634 hci_inquiry_cache_update_resolve(hdev, e);
5637 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5638 &cp->addr, sizeof(cp->addr));
5641 hci_dev_unlock(hdev);
5645 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5648 struct mgmt_cp_block_device *cp = data;
5652 bt_dev_dbg(hdev, "sock %p", sk);
5654 if (!bdaddr_type_is_valid(cp->addr.type))
5655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5656 MGMT_STATUS_INVALID_PARAMS,
5657 &cp->addr, sizeof(cp->addr));
5661 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5664 status = MGMT_STATUS_FAILED;
5668 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5670 status = MGMT_STATUS_SUCCESS;
5673 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5674 &cp->addr, sizeof(cp->addr));
5676 hci_dev_unlock(hdev);
5681 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5684 struct mgmt_cp_unblock_device *cp = data;
5688 bt_dev_dbg(hdev, "sock %p", sk);
5690 if (!bdaddr_type_is_valid(cp->addr.type))
5691 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5692 MGMT_STATUS_INVALID_PARAMS,
5693 &cp->addr, sizeof(cp->addr));
5697 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5700 status = MGMT_STATUS_INVALID_PARAMS;
5704 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5706 status = MGMT_STATUS_SUCCESS;
5709 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5710 &cp->addr, sizeof(cp->addr));
5712 hci_dev_unlock(hdev);
5717 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5719 return hci_update_eir_sync(hdev);
5722 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5725 struct mgmt_cp_set_device_id *cp = data;
5729 bt_dev_dbg(hdev, "sock %p", sk);
5731 source = __le16_to_cpu(cp->source);
5733 if (source > 0x0002)
5734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5735 MGMT_STATUS_INVALID_PARAMS);
5739 hdev->devid_source = source;
5740 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5741 hdev->devid_product = __le16_to_cpu(cp->product);
5742 hdev->devid_version = __le16_to_cpu(cp->version);
5744 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5747 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5749 hci_dev_unlock(hdev);
5754 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5757 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5759 bt_dev_dbg(hdev, "status %d", err);
5762 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5764 struct cmd_lookup match = { NULL, hdev };
5766 struct adv_info *adv_instance;
5767 u8 status = mgmt_status(err);
5770 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5771 cmd_status_rsp, &status);
5775 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5776 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5778 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5780 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5783 new_settings(hdev, match.sk);
5788 /* If "Set Advertising" was just disabled and instance advertising was
5789 * set up earlier, then re-enable multi-instance advertising.
5791 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5792 list_empty(&hdev->adv_instances))
5795 instance = hdev->cur_adv_instance;
5797 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5798 struct adv_info, list);
5802 instance = adv_instance->instance;
5805 err = hci_schedule_adv_instance_sync(hdev, instance, true);
5807 enable_advertising_instance(hdev, err);
5810 static int set_adv_sync(struct hci_dev *hdev, void *data)
5812 struct mgmt_pending_cmd *cmd = data;
5813 struct mgmt_mode *cp = cmd->param;
5816 if (cp->val == 0x02)
5817 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5819 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5821 cancel_adv_timeout(hdev);
5824 /* Switch to instance "0" for the Set Advertising setting.
5825 * We cannot use update_[adv|scan_rsp]_data() here as the
5826 * HCI_ADVERTISING flag is not yet set.
5828 hdev->cur_adv_instance = 0x00;
5830 if (ext_adv_capable(hdev)) {
5831 hci_start_ext_adv_sync(hdev, 0x00);
5833 hci_update_adv_data_sync(hdev, 0x00);
5834 hci_update_scan_rsp_data_sync(hdev, 0x00);
5835 hci_enable_advertising_sync(hdev);
5838 hci_disable_advertising_sync(hdev);
5844 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5847 struct mgmt_mode *cp = data;
5848 struct mgmt_pending_cmd *cmd;
5852 bt_dev_dbg(hdev, "sock %p", sk);
5854 status = mgmt_le_support(hdev);
5856 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5859 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5861 MGMT_STATUS_INVALID_PARAMS);
5863 if (hdev->advertising_paused)
5864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5871 /* The following conditions are ones which mean that we should
5872 * not do any HCI communication but directly send a mgmt
5873 * response to user space (after toggling the flag if
5876 if (!hdev_is_powered(hdev) ||
5877 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5878 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5879 hci_conn_num(hdev, LE_LINK) > 0 ||
5880 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5881 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5885 hdev->cur_adv_instance = 0x00;
5886 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5887 if (cp->val == 0x02)
5888 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5890 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5892 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5893 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5896 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5901 err = new_settings(hdev, sk);
5906 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5907 pending_find(MGMT_OP_SET_LE, hdev)) {
5908 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5913 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5917 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5918 set_advertising_complete);
5921 mgmt_pending_remove(cmd);
5924 hci_dev_unlock(hdev);
5928 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5929 void *data, u16 len)
5931 struct mgmt_cp_set_static_address *cp = data;
5934 bt_dev_dbg(hdev, "sock %p", sk);
5936 if (!lmp_le_capable(hdev))
5937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5938 MGMT_STATUS_NOT_SUPPORTED);
5940 if (hdev_is_powered(hdev))
5941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5942 MGMT_STATUS_REJECTED);
5944 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5945 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5946 return mgmt_cmd_status(sk, hdev->id,
5947 MGMT_OP_SET_STATIC_ADDRESS,
5948 MGMT_STATUS_INVALID_PARAMS);
5950 /* Two most significant bits shall be set */
5951 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5952 return mgmt_cmd_status(sk, hdev->id,
5953 MGMT_OP_SET_STATIC_ADDRESS,
5954 MGMT_STATUS_INVALID_PARAMS);
5959 bacpy(&hdev->static_addr, &cp->bdaddr);
5961 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5965 err = new_settings(hdev, sk);
5968 hci_dev_unlock(hdev);
5972 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5973 void *data, u16 len)
5975 struct mgmt_cp_set_scan_params *cp = data;
5976 __u16 interval, window;
5979 bt_dev_dbg(hdev, "sock %p", sk);
5981 if (!lmp_le_capable(hdev))
5982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5983 MGMT_STATUS_NOT_SUPPORTED);
5985 interval = __le16_to_cpu(cp->interval);
5987 if (interval < 0x0004 || interval > 0x4000)
5988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5989 MGMT_STATUS_INVALID_PARAMS);
5991 window = __le16_to_cpu(cp->window);
5993 if (window < 0x0004 || window > 0x4000)
5994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5995 MGMT_STATUS_INVALID_PARAMS);
5997 if (window > interval)
5998 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5999 MGMT_STATUS_INVALID_PARAMS);
6003 hdev->le_scan_interval = interval;
6004 hdev->le_scan_window = window;
6006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6009 /* If background scan is running, restart it so new parameters are
6012 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6013 hdev->discovery.state == DISCOVERY_STOPPED)
6014 hci_update_passive_scan(hdev);
6016 hci_dev_unlock(hdev);
6021 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6023 struct mgmt_pending_cmd *cmd = data;
6025 bt_dev_dbg(hdev, "err %d", err);
6028 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6031 struct mgmt_mode *cp = cmd->param;
6034 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6036 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6038 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6039 new_settings(hdev, cmd->sk);
6042 mgmt_pending_free(cmd);
6045 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6047 struct mgmt_pending_cmd *cmd = data;
6048 struct mgmt_mode *cp = cmd->param;
6050 return hci_write_fast_connectable_sync(hdev, cp->val);
6053 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6054 void *data, u16 len)
6056 struct mgmt_mode *cp = data;
6057 struct mgmt_pending_cmd *cmd;
6060 bt_dev_dbg(hdev, "sock %p", sk);
6062 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6063 hdev->hci_ver < BLUETOOTH_VER_1_2)
6064 return mgmt_cmd_status(sk, hdev->id,
6065 MGMT_OP_SET_FAST_CONNECTABLE,
6066 MGMT_STATUS_NOT_SUPPORTED);
6068 if (cp->val != 0x00 && cp->val != 0x01)
6069 return mgmt_cmd_status(sk, hdev->id,
6070 MGMT_OP_SET_FAST_CONNECTABLE,
6071 MGMT_STATUS_INVALID_PARAMS);
6075 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6076 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6080 if (!hdev_is_powered(hdev)) {
6081 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6082 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6083 new_settings(hdev, sk);
6087 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6092 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6093 fast_connectable_complete);
6096 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6097 MGMT_STATUS_FAILED);
6100 mgmt_pending_free(cmd);
6104 hci_dev_unlock(hdev);
6109 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6111 struct mgmt_pending_cmd *cmd = data;
6113 bt_dev_dbg(hdev, "err %d", err);
6116 u8 mgmt_err = mgmt_status(err);
6118 /* We need to restore the flag if related HCI commands
6121 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6123 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6125 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6126 new_settings(hdev, cmd->sk);
6129 mgmt_pending_free(cmd);
6132 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6136 status = hci_write_fast_connectable_sync(hdev, false);
6139 status = hci_update_scan_sync(hdev);
6141 /* Since only the advertising data flags will change, there
6142 * is no need to update the scan response data.
6145 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6150 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6152 struct mgmt_mode *cp = data;
6153 struct mgmt_pending_cmd *cmd;
6156 bt_dev_dbg(hdev, "sock %p", sk);
6158 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6160 MGMT_STATUS_NOT_SUPPORTED);
6162 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6164 MGMT_STATUS_REJECTED);
6166 if (cp->val != 0x00 && cp->val != 0x01)
6167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6168 MGMT_STATUS_INVALID_PARAMS);
6172 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6173 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6177 if (!hdev_is_powered(hdev)) {
6179 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6180 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6181 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6182 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6183 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6186 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6188 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6192 err = new_settings(hdev, sk);
6196 /* Reject disabling when powered on */
6198 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6199 MGMT_STATUS_REJECTED);
6202 /* When configuring a dual-mode controller to operate
6203 * with LE only and using a static address, then switching
6204 * BR/EDR back on is not allowed.
6206 * Dual-mode controllers shall operate with the public
6207 * address as its identity address for BR/EDR and LE. So
6208 * reject the attempt to create an invalid configuration.
6210 * The same restrictions applies when secure connections
6211 * has been enabled. For BR/EDR this is a controller feature
6212 * while for LE it is a host stack feature. This means that
6213 * switching BR/EDR back on when secure connections has been
6214 * enabled is not a supported transaction.
6216 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6217 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6218 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6220 MGMT_STATUS_REJECTED);
6225 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6229 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6230 set_bredr_complete);
6233 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6234 MGMT_STATUS_FAILED);
6236 mgmt_pending_free(cmd);
6241 /* We need to flip the bit already here so that
6242 * hci_req_update_adv_data generates the correct flags.
6244 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6247 hci_dev_unlock(hdev);
6251 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6253 struct mgmt_pending_cmd *cmd = data;
6254 struct mgmt_mode *cp;
6256 bt_dev_dbg(hdev, "err %d", err);
6259 u8 mgmt_err = mgmt_status(err);
6261 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6269 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6270 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6273 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6274 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6277 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6278 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6282 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6283 new_settings(hdev, cmd->sk);
6286 mgmt_pending_free(cmd);
6289 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6291 struct mgmt_pending_cmd *cmd = data;
6292 struct mgmt_mode *cp = cmd->param;
6295 /* Force write of val */
6296 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6298 return hci_write_sc_support_sync(hdev, val);
6301 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6302 void *data, u16 len)
6304 struct mgmt_mode *cp = data;
6305 struct mgmt_pending_cmd *cmd;
6309 bt_dev_dbg(hdev, "sock %p", sk);
6311 if (!lmp_sc_capable(hdev) &&
6312 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6313 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6314 MGMT_STATUS_NOT_SUPPORTED);
6316 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6317 lmp_sc_capable(hdev) &&
6318 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6320 MGMT_STATUS_REJECTED);
6322 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6324 MGMT_STATUS_INVALID_PARAMS);
6328 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6329 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6333 changed = !hci_dev_test_and_set_flag(hdev,
6335 if (cp->val == 0x02)
6336 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6338 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6340 changed = hci_dev_test_and_clear_flag(hdev,
6342 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6345 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6350 err = new_settings(hdev, sk);
6357 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6358 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6359 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6363 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6367 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6368 set_secure_conn_complete);
6371 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6372 MGMT_STATUS_FAILED);
6374 mgmt_pending_free(cmd);
6378 hci_dev_unlock(hdev);
6382 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6383 void *data, u16 len)
6385 struct mgmt_mode *cp = data;
6386 bool changed, use_changed;
6389 bt_dev_dbg(hdev, "sock %p", sk);
6391 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6393 MGMT_STATUS_INVALID_PARAMS);
6398 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6400 changed = hci_dev_test_and_clear_flag(hdev,
6401 HCI_KEEP_DEBUG_KEYS);
6403 if (cp->val == 0x02)
6404 use_changed = !hci_dev_test_and_set_flag(hdev,
6405 HCI_USE_DEBUG_KEYS);
6407 use_changed = hci_dev_test_and_clear_flag(hdev,
6408 HCI_USE_DEBUG_KEYS);
6410 if (hdev_is_powered(hdev) && use_changed &&
6411 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6412 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6413 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6414 sizeof(mode), &mode);
6417 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6422 err = new_settings(hdev, sk);
6425 hci_dev_unlock(hdev);
6429 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6432 struct mgmt_cp_set_privacy *cp = cp_data;
6436 bt_dev_dbg(hdev, "sock %p", sk);
6438 if (!lmp_le_capable(hdev))
6439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6440 MGMT_STATUS_NOT_SUPPORTED);
6442 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6444 MGMT_STATUS_INVALID_PARAMS);
6446 if (hdev_is_powered(hdev))
6447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6448 MGMT_STATUS_REJECTED);
6452 /* If user space supports this command it is also expected to
6453 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6455 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6458 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6459 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6460 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6461 hci_adv_instances_set_rpa_expired(hdev, true);
6462 if (cp->privacy == 0x02)
6463 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6465 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6467 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6468 memset(hdev->irk, 0, sizeof(hdev->irk));
6469 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6470 hci_adv_instances_set_rpa_expired(hdev, false);
6471 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6474 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6479 err = new_settings(hdev, sk);
6482 hci_dev_unlock(hdev);
6486 static bool irk_is_valid(struct mgmt_irk_info *irk)
6488 switch (irk->addr.type) {
6489 case BDADDR_LE_PUBLIC:
6492 case BDADDR_LE_RANDOM:
6493 /* Two most significant bits shall be set */
6494 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6502 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6505 struct mgmt_cp_load_irks *cp = cp_data;
6506 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6507 sizeof(struct mgmt_irk_info));
6508 u16 irk_count, expected_len;
6511 bt_dev_dbg(hdev, "sock %p", sk);
6513 if (!lmp_le_capable(hdev))
6514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6515 MGMT_STATUS_NOT_SUPPORTED);
6517 irk_count = __le16_to_cpu(cp->irk_count);
6518 if (irk_count > max_irk_count) {
6519 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6522 MGMT_STATUS_INVALID_PARAMS);
6525 expected_len = struct_size(cp, irks, irk_count);
6526 if (expected_len != len) {
6527 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6530 MGMT_STATUS_INVALID_PARAMS);
6533 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6535 for (i = 0; i < irk_count; i++) {
6536 struct mgmt_irk_info *key = &cp->irks[i];
6538 if (!irk_is_valid(key))
6539 return mgmt_cmd_status(sk, hdev->id,
6541 MGMT_STATUS_INVALID_PARAMS);
6546 hci_smp_irks_clear(hdev);
6548 for (i = 0; i < irk_count; i++) {
6549 struct mgmt_irk_info *irk = &cp->irks[i];
6551 if (hci_is_blocked_key(hdev,
6552 HCI_BLOCKED_KEY_TYPE_IRK,
6554 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6559 hci_add_irk(hdev, &irk->addr.bdaddr,
6560 le_addr_type(irk->addr.type), irk->val,
6564 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6566 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6568 hci_dev_unlock(hdev);
6573 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6575 if (key->initiator != 0x00 && key->initiator != 0x01)
6578 switch (key->addr.type) {
6579 case BDADDR_LE_PUBLIC:
6582 case BDADDR_LE_RANDOM:
6583 /* Two most significant bits shall be set */
6584 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6592 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6593 void *cp_data, u16 len)
6595 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6596 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6597 sizeof(struct mgmt_ltk_info));
6598 u16 key_count, expected_len;
6601 bt_dev_dbg(hdev, "sock %p", sk);
6603 if (!lmp_le_capable(hdev))
6604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6605 MGMT_STATUS_NOT_SUPPORTED);
6607 key_count = __le16_to_cpu(cp->key_count);
6608 if (key_count > max_key_count) {
6609 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6612 MGMT_STATUS_INVALID_PARAMS);
6615 expected_len = struct_size(cp, keys, key_count);
6616 if (expected_len != len) {
6617 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6620 MGMT_STATUS_INVALID_PARAMS);
6623 bt_dev_dbg(hdev, "key_count %u", key_count);
6625 for (i = 0; i < key_count; i++) {
6626 struct mgmt_ltk_info *key = &cp->keys[i];
6628 if (!ltk_is_valid(key))
6629 return mgmt_cmd_status(sk, hdev->id,
6630 MGMT_OP_LOAD_LONG_TERM_KEYS,
6631 MGMT_STATUS_INVALID_PARAMS);
6636 hci_smp_ltks_clear(hdev);
6638 for (i = 0; i < key_count; i++) {
6639 struct mgmt_ltk_info *key = &cp->keys[i];
6640 u8 type, authenticated;
6642 if (hci_is_blocked_key(hdev,
6643 HCI_BLOCKED_KEY_TYPE_LTK,
6645 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6650 switch (key->type) {
6651 case MGMT_LTK_UNAUTHENTICATED:
6652 authenticated = 0x00;
6653 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6655 case MGMT_LTK_AUTHENTICATED:
6656 authenticated = 0x01;
6657 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6659 case MGMT_LTK_P256_UNAUTH:
6660 authenticated = 0x00;
6661 type = SMP_LTK_P256;
6663 case MGMT_LTK_P256_AUTH:
6664 authenticated = 0x01;
6665 type = SMP_LTK_P256;
6667 case MGMT_LTK_P256_DEBUG:
6668 authenticated = 0x00;
6669 type = SMP_LTK_P256_DEBUG;
6675 hci_add_ltk(hdev, &key->addr.bdaddr,
6676 le_addr_type(key->addr.type), type, authenticated,
6677 key->val, key->enc_size, key->ediv, key->rand);
6680 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6683 hci_dev_unlock(hdev);
6688 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6690 struct mgmt_pending_cmd *cmd = data;
6691 struct hci_conn *conn = cmd->user_data;
6692 struct mgmt_cp_get_conn_info *cp = cmd->param;
6693 struct mgmt_rp_get_conn_info rp;
6696 bt_dev_dbg(hdev, "err %d", err);
6698 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6700 status = mgmt_status(err);
6701 if (status == MGMT_STATUS_SUCCESS) {
6702 rp.rssi = conn->rssi;
6703 rp.tx_power = conn->tx_power;
6704 rp.max_tx_power = conn->max_tx_power;
6706 rp.rssi = HCI_RSSI_INVALID;
6707 rp.tx_power = HCI_TX_POWER_INVALID;
6708 rp.max_tx_power = HCI_TX_POWER_INVALID;
6711 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6715 hci_conn_drop(conn);
6719 mgmt_pending_free(cmd);
6722 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6724 struct mgmt_pending_cmd *cmd = data;
6725 struct mgmt_cp_get_conn_info *cp = cmd->param;
6726 struct hci_conn *conn;
6730 /* Make sure we are still connected */
6731 if (cp->addr.type == BDADDR_BREDR)
6732 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6735 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6737 if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6738 if (cmd->user_data) {
6739 hci_conn_drop(cmd->user_data);
6740 hci_conn_put(cmd->user_data);
6741 cmd->user_data = NULL;
6743 return MGMT_STATUS_NOT_CONNECTED;
6746 handle = cpu_to_le16(conn->handle);
6748 /* Refresh RSSI each time */
6749 err = hci_read_rssi_sync(hdev, handle);
6751 /* For LE links TX power does not change thus we don't need to
6752 * query for it once value is known.
6754 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6755 conn->tx_power == HCI_TX_POWER_INVALID))
6756 err = hci_read_tx_power_sync(hdev, handle, 0x00);
6758 /* Max TX power needs to be read only once per connection */
6759 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6760 err = hci_read_tx_power_sync(hdev, handle, 0x01);
6765 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6768 struct mgmt_cp_get_conn_info *cp = data;
6769 struct mgmt_rp_get_conn_info rp;
6770 struct hci_conn *conn;
6771 unsigned long conn_info_age;
6774 bt_dev_dbg(hdev, "sock %p", sk);
6776 memset(&rp, 0, sizeof(rp));
6777 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6778 rp.addr.type = cp->addr.type;
6780 if (!bdaddr_type_is_valid(cp->addr.type))
6781 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6782 MGMT_STATUS_INVALID_PARAMS,
6787 if (!hdev_is_powered(hdev)) {
6788 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6789 MGMT_STATUS_NOT_POWERED, &rp,
6794 if (cp->addr.type == BDADDR_BREDR)
6795 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6798 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6800 if (!conn || conn->state != BT_CONNECTED) {
6801 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6802 MGMT_STATUS_NOT_CONNECTED, &rp,
6807 /* To avoid client trying to guess when to poll again for information we
6808 * calculate conn info age as random value between min/max set in hdev.
6810 conn_info_age = hdev->conn_info_min_age +
6811 prandom_u32_max(hdev->conn_info_max_age -
6812 hdev->conn_info_min_age);
6814 /* Query controller to refresh cached values if they are too old or were
6817 if (time_after(jiffies, conn->conn_info_timestamp +
6818 msecs_to_jiffies(conn_info_age)) ||
6819 !conn->conn_info_timestamp) {
6820 struct mgmt_pending_cmd *cmd;
6822 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6827 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6828 cmd, get_conn_info_complete);
6831 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6832 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6835 mgmt_pending_free(cmd);
6840 hci_conn_hold(conn);
6841 cmd->user_data = hci_conn_get(conn);
6843 conn->conn_info_timestamp = jiffies;
6845 /* Cache is valid, just reply with values cached in hci_conn */
6846 rp.rssi = conn->rssi;
6847 rp.tx_power = conn->tx_power;
6848 rp.max_tx_power = conn->max_tx_power;
6850 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6851 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6855 hci_dev_unlock(hdev);
6859 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6861 struct mgmt_pending_cmd *cmd = data;
6862 struct mgmt_cp_get_clock_info *cp = cmd->param;
6863 struct mgmt_rp_get_clock_info rp;
6864 struct hci_conn *conn = cmd->user_data;
6865 u8 status = mgmt_status(err);
6867 bt_dev_dbg(hdev, "err %d", err);
6869 memset(&rp, 0, sizeof(rp));
6870 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6871 rp.addr.type = cp->addr.type;
6876 rp.local_clock = cpu_to_le32(hdev->clock);
6879 rp.piconet_clock = cpu_to_le32(conn->clock);
6880 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6881 hci_conn_drop(conn);
6886 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6889 mgmt_pending_free(cmd);
6892 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6894 struct mgmt_pending_cmd *cmd = data;
6895 struct mgmt_cp_get_clock_info *cp = cmd->param;
6896 struct hci_cp_read_clock hci_cp;
6897 struct hci_conn *conn = cmd->user_data;
6900 memset(&hci_cp, 0, sizeof(hci_cp));
6901 err = hci_read_clock_sync(hdev, &hci_cp);
6904 /* Make sure connection still exists */
6905 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6908 if (conn && conn == cmd->user_data &&
6909 conn->state == BT_CONNECTED) {
6910 hci_cp.handle = cpu_to_le16(conn->handle);
6911 hci_cp.which = 0x01; /* Piconet clock */
6912 err = hci_read_clock_sync(hdev, &hci_cp);
6913 } else if (cmd->user_data) {
6914 hci_conn_drop(cmd->user_data);
6915 hci_conn_put(cmd->user_data);
6916 cmd->user_data = NULL;
6923 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6926 struct mgmt_cp_get_clock_info *cp = data;
6927 struct mgmt_rp_get_clock_info rp;
6928 struct mgmt_pending_cmd *cmd;
6929 struct hci_conn *conn;
6932 bt_dev_dbg(hdev, "sock %p", sk);
6934 memset(&rp, 0, sizeof(rp));
6935 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6936 rp.addr.type = cp->addr.type;
6938 if (cp->addr.type != BDADDR_BREDR)
6939 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6940 MGMT_STATUS_INVALID_PARAMS,
6945 if (!hdev_is_powered(hdev)) {
6946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6947 MGMT_STATUS_NOT_POWERED, &rp,
6952 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6955 if (!conn || conn->state != BT_CONNECTED) {
6956 err = mgmt_cmd_complete(sk, hdev->id,
6957 MGMT_OP_GET_CLOCK_INFO,
6958 MGMT_STATUS_NOT_CONNECTED,
6966 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6970 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6971 get_clock_info_complete);
6974 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6975 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6978 mgmt_pending_free(cmd);
6981 hci_conn_hold(conn);
6982 cmd->user_data = hci_conn_get(conn);
6987 hci_dev_unlock(hdev);
6991 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6993 struct hci_conn *conn;
6995 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6999 if (conn->dst_type != type)
7002 if (conn->state != BT_CONNECTED)
7008 /* This function requires the caller holds hdev->lock */
7009 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7010 u8 addr_type, u8 auto_connect)
7012 struct hci_conn_params *params;
7014 params = hci_conn_params_add(hdev, addr, addr_type);
7018 if (params->auto_connect == auto_connect)
7021 list_del_init(¶ms->action);
7023 switch (auto_connect) {
7024 case HCI_AUTO_CONN_DISABLED:
7025 case HCI_AUTO_CONN_LINK_LOSS:
7026 /* If auto connect is being disabled when we're trying to
7027 * connect to device, keep connecting.
7029 if (params->explicit_connect)
7030 list_add(¶ms->action, &hdev->pend_le_conns);
7032 case HCI_AUTO_CONN_REPORT:
7033 if (params->explicit_connect)
7034 list_add(¶ms->action, &hdev->pend_le_conns);
7036 list_add(¶ms->action, &hdev->pend_le_reports);
7038 case HCI_AUTO_CONN_DIRECT:
7039 case HCI_AUTO_CONN_ALWAYS:
7040 if (!is_connected(hdev, addr, addr_type))
7041 list_add(¶ms->action, &hdev->pend_le_conns);
7045 params->auto_connect = auto_connect;
7047 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7048 addr, addr_type, auto_connect);
7053 static void device_added(struct sock *sk, struct hci_dev *hdev,
7054 bdaddr_t *bdaddr, u8 type, u8 action)
7056 struct mgmt_ev_device_added ev;
7058 bacpy(&ev.addr.bdaddr, bdaddr);
7059 ev.addr.type = type;
7062 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7065 static int add_device_sync(struct hci_dev *hdev, void *data)
7067 return hci_update_passive_scan_sync(hdev);
7070 static int add_device(struct sock *sk, struct hci_dev *hdev,
7071 void *data, u16 len)
7073 struct mgmt_cp_add_device *cp = data;
7074 u8 auto_conn, addr_type;
7075 struct hci_conn_params *params;
7077 u32 current_flags = 0;
7078 u32 supported_flags;
7080 bt_dev_dbg(hdev, "sock %p", sk);
7082 if (!bdaddr_type_is_valid(cp->addr.type) ||
7083 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7084 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7085 MGMT_STATUS_INVALID_PARAMS,
7086 &cp->addr, sizeof(cp->addr));
7088 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7089 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7090 MGMT_STATUS_INVALID_PARAMS,
7091 &cp->addr, sizeof(cp->addr));
7095 if (cp->addr.type == BDADDR_BREDR) {
7096 /* Only incoming connections action is supported for now */
7097 if (cp->action != 0x01) {
7098 err = mgmt_cmd_complete(sk, hdev->id,
7100 MGMT_STATUS_INVALID_PARAMS,
7101 &cp->addr, sizeof(cp->addr));
7105 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7111 hci_req_update_scan(hdev);
7116 addr_type = le_addr_type(cp->addr.type);
7118 if (cp->action == 0x02)
7119 auto_conn = HCI_AUTO_CONN_ALWAYS;
7120 else if (cp->action == 0x01)
7121 auto_conn = HCI_AUTO_CONN_DIRECT;
7123 auto_conn = HCI_AUTO_CONN_REPORT;
7125 /* Kernel internally uses conn_params with resolvable private
7126 * address, but Add Device allows only identity addresses.
7127 * Make sure it is enforced before calling
7128 * hci_conn_params_lookup.
7130 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7131 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7132 MGMT_STATUS_INVALID_PARAMS,
7133 &cp->addr, sizeof(cp->addr));
7137 /* If the connection parameters don't exist for this device,
7138 * they will be created and configured with defaults.
7140 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7143 MGMT_STATUS_FAILED, &cp->addr,
7147 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7150 current_flags = params->flags;
7153 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7158 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7159 supported_flags = hdev->conn_flags;
7160 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7161 supported_flags, current_flags);
7163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7164 MGMT_STATUS_SUCCESS, &cp->addr,
7168 hci_dev_unlock(hdev);
7172 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7173 bdaddr_t *bdaddr, u8 type)
7175 struct mgmt_ev_device_removed ev;
7177 bacpy(&ev.addr.bdaddr, bdaddr);
7178 ev.addr.type = type;
7180 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7183 static int remove_device_sync(struct hci_dev *hdev, void *data)
7185 return hci_update_passive_scan_sync(hdev);
7188 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7189 void *data, u16 len)
7191 struct mgmt_cp_remove_device *cp = data;
7194 bt_dev_dbg(hdev, "sock %p", sk);
7198 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7199 struct hci_conn_params *params;
7202 if (!bdaddr_type_is_valid(cp->addr.type)) {
7203 err = mgmt_cmd_complete(sk, hdev->id,
7204 MGMT_OP_REMOVE_DEVICE,
7205 MGMT_STATUS_INVALID_PARAMS,
7206 &cp->addr, sizeof(cp->addr));
7210 if (cp->addr.type == BDADDR_BREDR) {
7211 err = hci_bdaddr_list_del(&hdev->accept_list,
7215 err = mgmt_cmd_complete(sk, hdev->id,
7216 MGMT_OP_REMOVE_DEVICE,
7217 MGMT_STATUS_INVALID_PARAMS,
7223 hci_req_update_scan(hdev);
7225 device_removed(sk, hdev, &cp->addr.bdaddr,
7230 addr_type = le_addr_type(cp->addr.type);
7232 /* Kernel internally uses conn_params with resolvable private
7233 * address, but Remove Device allows only identity addresses.
7234 * Make sure it is enforced before calling
7235 * hci_conn_params_lookup.
7237 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7238 err = mgmt_cmd_complete(sk, hdev->id,
7239 MGMT_OP_REMOVE_DEVICE,
7240 MGMT_STATUS_INVALID_PARAMS,
7241 &cp->addr, sizeof(cp->addr));
7245 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7248 err = mgmt_cmd_complete(sk, hdev->id,
7249 MGMT_OP_REMOVE_DEVICE,
7250 MGMT_STATUS_INVALID_PARAMS,
7251 &cp->addr, sizeof(cp->addr));
7255 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7256 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7257 err = mgmt_cmd_complete(sk, hdev->id,
7258 MGMT_OP_REMOVE_DEVICE,
7259 MGMT_STATUS_INVALID_PARAMS,
7260 &cp->addr, sizeof(cp->addr));
7264 list_del(¶ms->action);
7265 list_del(¶ms->list);
7268 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7270 struct hci_conn_params *p, *tmp;
7271 struct bdaddr_list *b, *btmp;
7273 if (cp->addr.type) {
7274 err = mgmt_cmd_complete(sk, hdev->id,
7275 MGMT_OP_REMOVE_DEVICE,
7276 MGMT_STATUS_INVALID_PARAMS,
7277 &cp->addr, sizeof(cp->addr));
7281 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7282 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7287 hci_req_update_scan(hdev);
7289 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7290 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7292 device_removed(sk, hdev, &p->addr, p->addr_type);
7293 if (p->explicit_connect) {
7294 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7297 list_del(&p->action);
7302 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7305 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7308 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7309 MGMT_STATUS_SUCCESS, &cp->addr,
7312 hci_dev_unlock(hdev);
7316 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7319 struct mgmt_cp_load_conn_param *cp = data;
7320 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7321 sizeof(struct mgmt_conn_param));
7322 u16 param_count, expected_len;
7325 if (!lmp_le_capable(hdev))
7326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7327 MGMT_STATUS_NOT_SUPPORTED);
7329 param_count = __le16_to_cpu(cp->param_count);
7330 if (param_count > max_param_count) {
7331 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7334 MGMT_STATUS_INVALID_PARAMS);
7337 expected_len = struct_size(cp, params, param_count);
7338 if (expected_len != len) {
7339 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7341 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7342 MGMT_STATUS_INVALID_PARAMS);
7345 bt_dev_dbg(hdev, "param_count %u", param_count);
7349 hci_conn_params_clear_disabled(hdev);
7351 for (i = 0; i < param_count; i++) {
7352 struct mgmt_conn_param *param = &cp->params[i];
7353 struct hci_conn_params *hci_param;
7354 u16 min, max, latency, timeout;
7357 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7360 if (param->addr.type == BDADDR_LE_PUBLIC) {
7361 addr_type = ADDR_LE_DEV_PUBLIC;
7362 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7363 addr_type = ADDR_LE_DEV_RANDOM;
7365 bt_dev_err(hdev, "ignoring invalid connection parameters");
7369 min = le16_to_cpu(param->min_interval);
7370 max = le16_to_cpu(param->max_interval);
7371 latency = le16_to_cpu(param->latency);
7372 timeout = le16_to_cpu(param->timeout);
7374 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7375 min, max, latency, timeout);
7377 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7378 bt_dev_err(hdev, "ignoring invalid connection parameters");
7382 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7385 bt_dev_err(hdev, "failed to add connection parameters");
7389 hci_param->conn_min_interval = min;
7390 hci_param->conn_max_interval = max;
7391 hci_param->conn_latency = latency;
7392 hci_param->supervision_timeout = timeout;
7395 hci_dev_unlock(hdev);
7397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7401 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7402 void *data, u16 len)
7404 struct mgmt_cp_set_external_config *cp = data;
7408 bt_dev_dbg(hdev, "sock %p", sk);
7410 if (hdev_is_powered(hdev))
7411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7412 MGMT_STATUS_REJECTED);
7414 if (cp->config != 0x00 && cp->config != 0x01)
7415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7416 MGMT_STATUS_INVALID_PARAMS);
7418 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7420 MGMT_STATUS_NOT_SUPPORTED);
7425 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7427 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7429 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7436 err = new_options(hdev, sk);
7438 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7439 mgmt_index_removed(hdev);
7441 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7442 hci_dev_set_flag(hdev, HCI_CONFIG);
7443 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7445 queue_work(hdev->req_workqueue, &hdev->power_on);
7447 set_bit(HCI_RAW, &hdev->flags);
7448 mgmt_index_added(hdev);
7453 hci_dev_unlock(hdev);
7457 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7458 void *data, u16 len)
7460 struct mgmt_cp_set_public_address *cp = data;
7464 bt_dev_dbg(hdev, "sock %p", sk);
7466 if (hdev_is_powered(hdev))
7467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7468 MGMT_STATUS_REJECTED);
7470 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7472 MGMT_STATUS_INVALID_PARAMS);
7474 if (!hdev->set_bdaddr)
7475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7476 MGMT_STATUS_NOT_SUPPORTED);
7480 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7481 bacpy(&hdev->public_addr, &cp->bdaddr);
7483 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7490 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7491 err = new_options(hdev, sk);
7493 if (is_configured(hdev)) {
7494 mgmt_index_removed(hdev);
7496 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7498 hci_dev_set_flag(hdev, HCI_CONFIG);
7499 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7501 queue_work(hdev->req_workqueue, &hdev->power_on);
7505 hci_dev_unlock(hdev);
7509 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7512 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7513 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7514 u8 *h192, *r192, *h256, *r256;
7515 struct mgmt_pending_cmd *cmd = data;
7516 struct sk_buff *skb = cmd->skb;
7517 u8 status = mgmt_status(err);
7520 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7525 status = MGMT_STATUS_FAILED;
7526 else if (IS_ERR(skb))
7527 status = mgmt_status(PTR_ERR(skb));
7529 status = mgmt_status(skb->data[0]);
7532 bt_dev_dbg(hdev, "status %u", status);
7534 mgmt_cp = cmd->param;
7537 status = mgmt_status(status);
7544 } else if (!bredr_sc_enabled(hdev)) {
7545 struct hci_rp_read_local_oob_data *rp;
7547 if (skb->len != sizeof(*rp)) {
7548 status = MGMT_STATUS_FAILED;
7551 status = MGMT_STATUS_SUCCESS;
7552 rp = (void *)skb->data;
7554 eir_len = 5 + 18 + 18;
7561 struct hci_rp_read_local_oob_ext_data *rp;
7563 if (skb->len != sizeof(*rp)) {
7564 status = MGMT_STATUS_FAILED;
7567 status = MGMT_STATUS_SUCCESS;
7568 rp = (void *)skb->data;
7570 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7571 eir_len = 5 + 18 + 18;
7575 eir_len = 5 + 18 + 18 + 18 + 18;
7585 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7592 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7593 hdev->dev_class, 3);
7596 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7597 EIR_SSP_HASH_C192, h192, 16);
7598 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7599 EIR_SSP_RAND_R192, r192, 16);
7603 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7604 EIR_SSP_HASH_C256, h256, 16);
7605 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7606 EIR_SSP_RAND_R256, r256, 16);
7610 mgmt_rp->type = mgmt_cp->type;
7611 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7613 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7614 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7615 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7616 if (err < 0 || status)
7619 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7621 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7622 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7623 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7625 if (skb && !IS_ERR(skb))
7629 mgmt_pending_remove(cmd);
7632 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7633 struct mgmt_cp_read_local_oob_ext_data *cp)
7635 struct mgmt_pending_cmd *cmd;
7638 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7643 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7644 read_local_oob_ext_data_complete);
7647 mgmt_pending_remove(cmd);
7654 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7655 void *data, u16 data_len)
7657 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7658 struct mgmt_rp_read_local_oob_ext_data *rp;
7661 u8 status, flags, role, addr[7], hash[16], rand[16];
7664 bt_dev_dbg(hdev, "sock %p", sk);
7666 if (hdev_is_powered(hdev)) {
7668 case BIT(BDADDR_BREDR):
7669 status = mgmt_bredr_support(hdev);
7675 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7676 status = mgmt_le_support(hdev);
7680 eir_len = 9 + 3 + 18 + 18 + 3;
7683 status = MGMT_STATUS_INVALID_PARAMS;
7688 status = MGMT_STATUS_NOT_POWERED;
7692 rp_len = sizeof(*rp) + eir_len;
7693 rp = kmalloc(rp_len, GFP_ATOMIC);
7697 if (!status && !lmp_ssp_capable(hdev)) {
7698 status = MGMT_STATUS_NOT_SUPPORTED;
7709 case BIT(BDADDR_BREDR):
7710 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7711 err = read_local_ssp_oob_req(hdev, sk, cp);
7712 hci_dev_unlock(hdev);
7716 status = MGMT_STATUS_FAILED;
7719 eir_len = eir_append_data(rp->eir, eir_len,
7721 hdev->dev_class, 3);
7724 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7725 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7726 smp_generate_oob(hdev, hash, rand) < 0) {
7727 hci_dev_unlock(hdev);
7728 status = MGMT_STATUS_FAILED;
7732 /* This should return the active RPA, but since the RPA
7733 * is only programmed on demand, it is really hard to fill
7734 * this in at the moment. For now disallow retrieving
7735 * local out-of-band data when privacy is in use.
7737 * Returning the identity address will not help here since
7738 * pairing happens before the identity resolving key is
7739 * known and thus the connection establishment happens
7740 * based on the RPA and not the identity address.
7742 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7743 hci_dev_unlock(hdev);
7744 status = MGMT_STATUS_REJECTED;
7748 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7749 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7750 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7751 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7752 memcpy(addr, &hdev->static_addr, 6);
7755 memcpy(addr, &hdev->bdaddr, 6);
7759 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7760 addr, sizeof(addr));
7762 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7767 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7768 &role, sizeof(role));
7770 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7771 eir_len = eir_append_data(rp->eir, eir_len,
7773 hash, sizeof(hash));
7775 eir_len = eir_append_data(rp->eir, eir_len,
7777 rand, sizeof(rand));
7780 flags = mgmt_get_adv_discov_flags(hdev);
7782 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7783 flags |= LE_AD_NO_BREDR;
7785 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7786 &flags, sizeof(flags));
7790 hci_dev_unlock(hdev);
7792 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7794 status = MGMT_STATUS_SUCCESS;
7797 rp->type = cp->type;
7798 rp->eir_len = cpu_to_le16(eir_len);
7800 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7801 status, rp, sizeof(*rp) + eir_len);
7802 if (err < 0 || status)
7805 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7806 rp, sizeof(*rp) + eir_len,
7807 HCI_MGMT_OOB_DATA_EVENTS, sk);
7815 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7819 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7820 flags |= MGMT_ADV_FLAG_DISCOV;
7821 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7822 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7823 flags |= MGMT_ADV_FLAG_APPEARANCE;
7824 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7825 flags |= MGMT_ADV_PARAM_DURATION;
7826 flags |= MGMT_ADV_PARAM_TIMEOUT;
7827 flags |= MGMT_ADV_PARAM_INTERVALS;
7828 flags |= MGMT_ADV_PARAM_TX_POWER;
7829 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7831 /* In extended adv TX_POWER returned from Set Adv Param
7832 * will be always valid.
7834 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7835 ext_adv_capable(hdev))
7836 flags |= MGMT_ADV_FLAG_TX_POWER;
7838 if (ext_adv_capable(hdev)) {
7839 flags |= MGMT_ADV_FLAG_SEC_1M;
7840 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7841 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7843 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7844 flags |= MGMT_ADV_FLAG_SEC_2M;
7846 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7847 flags |= MGMT_ADV_FLAG_SEC_CODED;
7853 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7854 void *data, u16 data_len)
7856 struct mgmt_rp_read_adv_features *rp;
7859 struct adv_info *adv_instance;
7860 u32 supported_flags;
7863 bt_dev_dbg(hdev, "sock %p", sk);
7865 if (!lmp_le_capable(hdev))
7866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7867 MGMT_STATUS_REJECTED);
7871 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7872 rp = kmalloc(rp_len, GFP_ATOMIC);
7874 hci_dev_unlock(hdev);
7878 supported_flags = get_supported_adv_flags(hdev);
7880 rp->supported_flags = cpu_to_le32(supported_flags);
7881 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7882 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7883 rp->max_instances = hdev->le_num_of_adv_sets;
7884 rp->num_instances = hdev->adv_instance_cnt;
7886 instance = rp->instance;
7887 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7888 *instance = adv_instance->instance;
7892 hci_dev_unlock(hdev);
7894 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7895 MGMT_STATUS_SUCCESS, rp, rp_len);
7902 static u8 calculate_name_len(struct hci_dev *hdev)
7904 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7906 return eir_append_local_name(hdev, buf, 0);
7909 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7912 u8 max_len = HCI_MAX_AD_LENGTH;
7915 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7916 MGMT_ADV_FLAG_LIMITED_DISCOV |
7917 MGMT_ADV_FLAG_MANAGED_FLAGS))
7920 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7923 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7924 max_len -= calculate_name_len(hdev);
7926 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7933 static bool flags_managed(u32 adv_flags)
7935 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7936 MGMT_ADV_FLAG_LIMITED_DISCOV |
7937 MGMT_ADV_FLAG_MANAGED_FLAGS);
7940 static bool tx_power_managed(u32 adv_flags)
7942 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7945 static bool name_managed(u32 adv_flags)
7947 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7950 static bool appearance_managed(u32 adv_flags)
7952 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7955 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7956 u8 len, bool is_adv_data)
7961 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7966 /* Make sure that the data is correctly formatted. */
7967 for (i = 0; i < len; i += (cur_len + 1)) {
7973 if (data[i + 1] == EIR_FLAGS &&
7974 (!is_adv_data || flags_managed(adv_flags)))
7977 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7980 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7983 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7986 if (data[i + 1] == EIR_APPEARANCE &&
7987 appearance_managed(adv_flags))
7990 /* If the current field length would exceed the total data
7991 * length, then it's invalid.
7993 if (i + cur_len >= len)
8000 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8002 u32 supported_flags, phy_flags;
8004 /* The current implementation only supports a subset of the specified
8005 * flags. Also need to check mutual exclusiveness of sec flags.
8007 supported_flags = get_supported_adv_flags(hdev);
8008 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8009 if (adv_flags & ~supported_flags ||
8010 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8016 static bool adv_busy(struct hci_dev *hdev)
8018 return pending_find(MGMT_OP_SET_LE, hdev);
8021 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8024 struct adv_info *adv, *n;
8026 bt_dev_dbg(hdev, "err %d", err);
8030 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8037 adv->pending = false;
8041 instance = adv->instance;
8043 if (hdev->cur_adv_instance == instance)
8044 cancel_adv_timeout(hdev);
8046 hci_remove_adv_instance(hdev, instance);
8047 mgmt_advertising_removed(sk, hdev, instance);
8050 hci_dev_unlock(hdev);
8053 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8055 struct mgmt_pending_cmd *cmd = data;
8056 struct mgmt_cp_add_advertising *cp = cmd->param;
8057 struct mgmt_rp_add_advertising rp;
8059 memset(&rp, 0, sizeof(rp));
8061 rp.instance = cp->instance;
8064 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8067 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8068 mgmt_status(err), &rp, sizeof(rp));
8070 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8072 mgmt_pending_free(cmd);
8075 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8077 struct mgmt_pending_cmd *cmd = data;
8078 struct mgmt_cp_add_advertising *cp = cmd->param;
8080 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8083 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8084 void *data, u16 data_len)
8086 struct mgmt_cp_add_advertising *cp = data;
8087 struct mgmt_rp_add_advertising rp;
8090 u16 timeout, duration;
8091 unsigned int prev_instance_cnt;
8092 u8 schedule_instance = 0;
8093 struct adv_info *next_instance;
8095 struct mgmt_pending_cmd *cmd;
8097 bt_dev_dbg(hdev, "sock %p", sk);
8099 status = mgmt_le_support(hdev);
8101 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8104 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8106 MGMT_STATUS_INVALID_PARAMS);
8108 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8110 MGMT_STATUS_INVALID_PARAMS);
8112 flags = __le32_to_cpu(cp->flags);
8113 timeout = __le16_to_cpu(cp->timeout);
8114 duration = __le16_to_cpu(cp->duration);
8116 if (!requested_adv_flags_are_valid(hdev, flags))
8117 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8118 MGMT_STATUS_INVALID_PARAMS);
8122 if (timeout && !hdev_is_powered(hdev)) {
8123 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8124 MGMT_STATUS_REJECTED);
8128 if (adv_busy(hdev)) {
8129 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8134 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8135 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8136 cp->scan_rsp_len, false)) {
8137 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8138 MGMT_STATUS_INVALID_PARAMS);
8142 prev_instance_cnt = hdev->adv_instance_cnt;
8144 err = hci_add_adv_instance(hdev, cp->instance, flags,
8145 cp->adv_data_len, cp->data,
8147 cp->data + cp->adv_data_len,
8149 HCI_ADV_TX_POWER_NO_PREFERENCE,
8150 hdev->le_adv_min_interval,
8151 hdev->le_adv_max_interval);
8153 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8154 MGMT_STATUS_FAILED);
8158 /* Only trigger an advertising added event if a new instance was
8161 if (hdev->adv_instance_cnt > prev_instance_cnt)
8162 mgmt_advertising_added(sk, hdev, cp->instance);
8164 if (hdev->cur_adv_instance == cp->instance) {
8165 /* If the currently advertised instance is being changed then
8166 * cancel the current advertising and schedule the next
8167 * instance. If there is only one instance then the overridden
8168 * advertising data will be visible right away.
8170 cancel_adv_timeout(hdev);
8172 next_instance = hci_get_next_instance(hdev, cp->instance);
8174 schedule_instance = next_instance->instance;
8175 } else if (!hdev->adv_instance_timeout) {
8176 /* Immediately advertise the new instance if no other
8177 * instance is currently being advertised.
8179 schedule_instance = cp->instance;
8182 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8183 * there is no instance to be advertised then we have no HCI
8184 * communication to make. Simply return.
8186 if (!hdev_is_powered(hdev) ||
8187 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8188 !schedule_instance) {
8189 rp.instance = cp->instance;
8190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8191 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8195 /* We're good to go, update advertising data, parameters, and start
8198 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8205 cp->instance = schedule_instance;
8207 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8208 add_advertising_complete);
8210 mgmt_pending_free(cmd);
8213 hci_dev_unlock(hdev);
8218 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8221 struct mgmt_pending_cmd *cmd = data;
8222 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8223 struct mgmt_rp_add_ext_adv_params rp;
8224 struct adv_info *adv;
8227 BT_DBG("%s", hdev->name);
8231 adv = hci_find_adv_instance(hdev, cp->instance);
8235 rp.instance = cp->instance;
8236 rp.tx_power = adv->tx_power;
8238 /* While we're at it, inform userspace of the available space for this
8239 * advertisement, given the flags that will be used.
8241 flags = __le32_to_cpu(cp->flags);
8242 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8243 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8246 /* If this advertisement was previously advertising and we
8247 * failed to update it, we signal that it has been removed and
8248 * delete its structure
8251 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8253 hci_remove_adv_instance(hdev, cp->instance);
8255 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8258 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8259 mgmt_status(err), &rp, sizeof(rp));
8264 mgmt_pending_free(cmd);
8266 hci_dev_unlock(hdev);
8269 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8271 struct mgmt_pending_cmd *cmd = data;
8272 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8274 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8277 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8278 void *data, u16 data_len)
8280 struct mgmt_cp_add_ext_adv_params *cp = data;
8281 struct mgmt_rp_add_ext_adv_params rp;
8282 struct mgmt_pending_cmd *cmd = NULL;
8283 u32 flags, min_interval, max_interval;
8284 u16 timeout, duration;
8289 BT_DBG("%s", hdev->name);
8291 status = mgmt_le_support(hdev);
8293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8296 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8298 MGMT_STATUS_INVALID_PARAMS);
8300 /* The purpose of breaking add_advertising into two separate MGMT calls
8301 * for params and data is to allow more parameters to be added to this
8302 * structure in the future. For this reason, we verify that we have the
8303 * bare minimum structure we know of when the interface was defined. Any
8304 * extra parameters we don't know about will be ignored in this request.
8306 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8307 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8308 MGMT_STATUS_INVALID_PARAMS);
8310 flags = __le32_to_cpu(cp->flags);
8312 if (!requested_adv_flags_are_valid(hdev, flags))
8313 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8314 MGMT_STATUS_INVALID_PARAMS);
8318 /* In new interface, we require that we are powered to register */
8319 if (!hdev_is_powered(hdev)) {
8320 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8321 MGMT_STATUS_REJECTED);
8325 if (adv_busy(hdev)) {
8326 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8331 /* Parse defined parameters from request, use defaults otherwise */
8332 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8333 __le16_to_cpu(cp->timeout) : 0;
8335 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8336 __le16_to_cpu(cp->duration) :
8337 hdev->def_multi_adv_rotation_duration;
8339 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8340 __le32_to_cpu(cp->min_interval) :
8341 hdev->le_adv_min_interval;
8343 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8344 __le32_to_cpu(cp->max_interval) :
8345 hdev->le_adv_max_interval;
8347 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8349 HCI_ADV_TX_POWER_NO_PREFERENCE;
8351 /* Create advertising instance with no advertising or response data */
8352 err = hci_add_adv_instance(hdev, cp->instance, flags,
8353 0, NULL, 0, NULL, timeout, duration,
8354 tx_power, min_interval, max_interval);
8357 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8358 MGMT_STATUS_FAILED);
8362 /* Submit request for advertising params if ext adv available */
8363 if (ext_adv_capable(hdev)) {
8364 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8368 hci_remove_adv_instance(hdev, cp->instance);
8372 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8373 add_ext_adv_params_complete);
8375 mgmt_pending_free(cmd);
8377 rp.instance = cp->instance;
8378 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8379 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8380 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8381 err = mgmt_cmd_complete(sk, hdev->id,
8382 MGMT_OP_ADD_EXT_ADV_PARAMS,
8383 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8387 hci_dev_unlock(hdev);
8392 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8394 struct mgmt_pending_cmd *cmd = data;
8395 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8396 struct mgmt_rp_add_advertising rp;
8398 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8400 memset(&rp, 0, sizeof(rp));
8402 rp.instance = cp->instance;
8405 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8408 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8409 mgmt_status(err), &rp, sizeof(rp));
8411 mgmt_pending_free(cmd);
8414 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8416 struct mgmt_pending_cmd *cmd = data;
8417 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8420 if (ext_adv_capable(hdev)) {
8421 err = hci_update_adv_data_sync(hdev, cp->instance);
8425 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8429 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8432 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8435 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8438 struct mgmt_cp_add_ext_adv_data *cp = data;
8439 struct mgmt_rp_add_ext_adv_data rp;
8440 u8 schedule_instance = 0;
8441 struct adv_info *next_instance;
8442 struct adv_info *adv_instance;
8444 struct mgmt_pending_cmd *cmd;
8446 BT_DBG("%s", hdev->name);
8450 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8452 if (!adv_instance) {
8453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8454 MGMT_STATUS_INVALID_PARAMS);
8458 /* In new interface, we require that we are powered to register */
8459 if (!hdev_is_powered(hdev)) {
8460 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8461 MGMT_STATUS_REJECTED);
8462 goto clear_new_instance;
8465 if (adv_busy(hdev)) {
8466 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8468 goto clear_new_instance;
8471 /* Validate new data */
8472 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8473 cp->adv_data_len, true) ||
8474 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8475 cp->adv_data_len, cp->scan_rsp_len, false)) {
8476 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8477 MGMT_STATUS_INVALID_PARAMS);
8478 goto clear_new_instance;
8481 /* Set the data in the advertising instance */
8482 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8483 cp->data, cp->scan_rsp_len,
8484 cp->data + cp->adv_data_len);
8486 /* If using software rotation, determine next instance to use */
8487 if (hdev->cur_adv_instance == cp->instance) {
8488 /* If the currently advertised instance is being changed
8489 * then cancel the current advertising and schedule the
8490 * next instance. If there is only one instance then the
8491 * overridden advertising data will be visible right
8494 cancel_adv_timeout(hdev);
8496 next_instance = hci_get_next_instance(hdev, cp->instance);
8498 schedule_instance = next_instance->instance;
8499 } else if (!hdev->adv_instance_timeout) {
8500 /* Immediately advertise the new instance if no other
8501 * instance is currently being advertised.
8503 schedule_instance = cp->instance;
8506 /* If the HCI_ADVERTISING flag is set or there is no instance to
8507 * be advertised then we have no HCI communication to make.
8510 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8511 if (adv_instance->pending) {
8512 mgmt_advertising_added(sk, hdev, cp->instance);
8513 adv_instance->pending = false;
8515 rp.instance = cp->instance;
8516 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8517 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8521 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8525 goto clear_new_instance;
8528 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8529 add_ext_adv_data_complete);
8531 mgmt_pending_free(cmd);
8532 goto clear_new_instance;
8535 /* We were successful in updating data, so trigger advertising_added
8536 * event if this is an instance that wasn't previously advertising. If
8537 * a failure occurs in the requests we initiated, we will remove the
8538 * instance again in add_advertising_complete
8540 if (adv_instance->pending)
8541 mgmt_advertising_added(sk, hdev, cp->instance);
8546 hci_remove_adv_instance(hdev, cp->instance);
8549 hci_dev_unlock(hdev);
8554 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8557 struct mgmt_pending_cmd *cmd = data;
8558 struct mgmt_cp_remove_advertising *cp = cmd->param;
8559 struct mgmt_rp_remove_advertising rp;
8561 bt_dev_dbg(hdev, "err %d", err);
8563 memset(&rp, 0, sizeof(rp));
8564 rp.instance = cp->instance;
8567 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8570 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8571 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8573 mgmt_pending_free(cmd);
8576 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8578 struct mgmt_pending_cmd *cmd = data;
8579 struct mgmt_cp_remove_advertising *cp = cmd->param;
8582 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8586 if (list_empty(&hdev->adv_instances))
8587 err = hci_disable_advertising_sync(hdev);
8592 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8593 void *data, u16 data_len)
8595 struct mgmt_cp_remove_advertising *cp = data;
8596 struct mgmt_pending_cmd *cmd;
8599 bt_dev_dbg(hdev, "sock %p", sk);
8603 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8604 err = mgmt_cmd_status(sk, hdev->id,
8605 MGMT_OP_REMOVE_ADVERTISING,
8606 MGMT_STATUS_INVALID_PARAMS);
8610 if (pending_find(MGMT_OP_SET_LE, hdev)) {
8611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8616 if (list_empty(&hdev->adv_instances)) {
8617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8618 MGMT_STATUS_INVALID_PARAMS);
8622 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8629 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8630 remove_advertising_complete);
8632 mgmt_pending_free(cmd);
8635 hci_dev_unlock(hdev);
8640 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8641 void *data, u16 data_len)
8643 struct mgmt_cp_get_adv_size_info *cp = data;
8644 struct mgmt_rp_get_adv_size_info rp;
8645 u32 flags, supported_flags;
8647 bt_dev_dbg(hdev, "sock %p", sk);
8649 if (!lmp_le_capable(hdev))
8650 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8651 MGMT_STATUS_REJECTED);
8653 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8654 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8655 MGMT_STATUS_INVALID_PARAMS);
8657 flags = __le32_to_cpu(cp->flags);
8659 /* The current implementation only supports a subset of the specified
8662 supported_flags = get_supported_adv_flags(hdev);
8663 if (flags & ~supported_flags)
8664 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8665 MGMT_STATUS_INVALID_PARAMS);
8667 rp.instance = cp->instance;
8668 rp.flags = cp->flags;
8669 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8670 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8672 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8673 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8676 static const struct hci_mgmt_handler mgmt_handlers[] = {
8677 { NULL }, /* 0x0000 (no command) */
8678 { read_version, MGMT_READ_VERSION_SIZE,
8680 HCI_MGMT_UNTRUSTED },
8681 { read_commands, MGMT_READ_COMMANDS_SIZE,
8683 HCI_MGMT_UNTRUSTED },
8684 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8686 HCI_MGMT_UNTRUSTED },
8687 { read_controller_info, MGMT_READ_INFO_SIZE,
8688 HCI_MGMT_UNTRUSTED },
8689 { set_powered, MGMT_SETTING_SIZE },
8690 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8691 { set_connectable, MGMT_SETTING_SIZE },
8692 { set_fast_connectable, MGMT_SETTING_SIZE },
8693 { set_bondable, MGMT_SETTING_SIZE },
8694 { set_link_security, MGMT_SETTING_SIZE },
8695 { set_ssp, MGMT_SETTING_SIZE },
8696 { set_hs, MGMT_SETTING_SIZE },
8697 { set_le, MGMT_SETTING_SIZE },
8698 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8699 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8700 { add_uuid, MGMT_ADD_UUID_SIZE },
8701 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8702 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8704 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8706 { disconnect, MGMT_DISCONNECT_SIZE },
8707 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8708 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8709 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8710 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8711 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8712 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8713 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8714 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8715 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8716 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8717 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8718 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8719 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8721 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8722 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8723 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8724 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8725 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8726 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8727 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8728 { set_advertising, MGMT_SETTING_SIZE },
8729 { set_bredr, MGMT_SETTING_SIZE },
8730 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8731 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8732 { set_secure_conn, MGMT_SETTING_SIZE },
8733 { set_debug_keys, MGMT_SETTING_SIZE },
8734 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8735 { load_irks, MGMT_LOAD_IRKS_SIZE,
8737 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8738 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8739 { add_device, MGMT_ADD_DEVICE_SIZE },
8740 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8741 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8743 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8745 HCI_MGMT_UNTRUSTED },
8746 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8747 HCI_MGMT_UNCONFIGURED |
8748 HCI_MGMT_UNTRUSTED },
8749 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8750 HCI_MGMT_UNCONFIGURED },
8751 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8752 HCI_MGMT_UNCONFIGURED },
8753 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8755 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8756 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8758 HCI_MGMT_UNTRUSTED },
8759 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8760 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8762 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8763 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8764 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8765 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8766 HCI_MGMT_UNTRUSTED },
8767 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8768 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8769 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8770 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8772 { set_wideband_speech, MGMT_SETTING_SIZE },
8773 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8774 HCI_MGMT_UNTRUSTED },
8775 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8776 HCI_MGMT_UNTRUSTED |
8777 HCI_MGMT_HDEV_OPTIONAL },
8778 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8780 HCI_MGMT_HDEV_OPTIONAL },
8781 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8782 HCI_MGMT_UNTRUSTED },
8783 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8785 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8786 HCI_MGMT_UNTRUSTED },
8787 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8789 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8790 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8791 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8792 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8794 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8795 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8797 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8799 { add_adv_patterns_monitor_rssi,
8800 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8804 void mgmt_index_added(struct hci_dev *hdev)
8806 struct mgmt_ev_ext_index ev;
8808 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8811 switch (hdev->dev_type) {
8813 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8814 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8815 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8818 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8819 HCI_MGMT_INDEX_EVENTS);
8832 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8833 HCI_MGMT_EXT_INDEX_EVENTS);
8836 void mgmt_index_removed(struct hci_dev *hdev)
8838 struct mgmt_ev_ext_index ev;
8839 u8 status = MGMT_STATUS_INVALID_INDEX;
8841 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8844 switch (hdev->dev_type) {
8846 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8848 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8849 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8850 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8853 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8854 HCI_MGMT_INDEX_EVENTS);
8867 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8868 HCI_MGMT_EXT_INDEX_EVENTS);
8871 void mgmt_power_on(struct hci_dev *hdev, int err)
8873 struct cmd_lookup match = { NULL, hdev };
8875 bt_dev_dbg(hdev, "err %d", err);
8880 restart_le_actions(hdev);
8881 hci_update_passive_scan(hdev);
8884 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8886 new_settings(hdev, match.sk);
8891 hci_dev_unlock(hdev);
8894 void __mgmt_power_off(struct hci_dev *hdev)
8896 struct cmd_lookup match = { NULL, hdev };
8897 u8 status, zero_cod[] = { 0, 0, 0 };
8899 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8901 /* If the power off is because of hdev unregistration let
8902 * use the appropriate INVALID_INDEX status. Otherwise use
8903 * NOT_POWERED. We cover both scenarios here since later in
8904 * mgmt_index_removed() any hci_conn callbacks will have already
8905 * been triggered, potentially causing misleading DISCONNECTED
8908 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8909 status = MGMT_STATUS_INVALID_INDEX;
8911 status = MGMT_STATUS_NOT_POWERED;
8913 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8915 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8916 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8917 zero_cod, sizeof(zero_cod),
8918 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8919 ext_info_changed(hdev, NULL);
8922 new_settings(hdev, match.sk);
8928 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8930 struct mgmt_pending_cmd *cmd;
8933 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8937 if (err == -ERFKILL)
8938 status = MGMT_STATUS_RFKILLED;
8940 status = MGMT_STATUS_FAILED;
8942 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8944 mgmt_pending_remove(cmd);
8947 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8950 struct mgmt_ev_new_link_key ev;
8952 memset(&ev, 0, sizeof(ev));
8954 ev.store_hint = persistent;
8955 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8956 ev.key.addr.type = BDADDR_BREDR;
8957 ev.key.type = key->type;
8958 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8959 ev.key.pin_len = key->pin_len;
8961 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8964 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8966 switch (ltk->type) {
8968 case SMP_LTK_RESPONDER:
8969 if (ltk->authenticated)
8970 return MGMT_LTK_AUTHENTICATED;
8971 return MGMT_LTK_UNAUTHENTICATED;
8973 if (ltk->authenticated)
8974 return MGMT_LTK_P256_AUTH;
8975 return MGMT_LTK_P256_UNAUTH;
8976 case SMP_LTK_P256_DEBUG:
8977 return MGMT_LTK_P256_DEBUG;
8980 return MGMT_LTK_UNAUTHENTICATED;
8983 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8985 struct mgmt_ev_new_long_term_key ev;
8987 memset(&ev, 0, sizeof(ev));
8989 /* Devices using resolvable or non-resolvable random addresses
8990 * without providing an identity resolving key don't require
8991 * to store long term keys. Their addresses will change the
8994 * Only when a remote device provides an identity address
8995 * make sure the long term key is stored. If the remote
8996 * identity is known, the long term keys are internally
8997 * mapped to the identity address. So allow static random
8998 * and public addresses here.
9000 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9001 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9002 ev.store_hint = 0x00;
9004 ev.store_hint = persistent;
9006 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9007 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9008 ev.key.type = mgmt_ltk_type(key);
9009 ev.key.enc_size = key->enc_size;
9010 ev.key.ediv = key->ediv;
9011 ev.key.rand = key->rand;
9013 if (key->type == SMP_LTK)
9014 ev.key.initiator = 1;
9016 /* Make sure we copy only the significant bytes based on the
9017 * encryption key size, and set the rest of the value to zeroes.
9019 memcpy(ev.key.val, key->val, key->enc_size);
9020 memset(ev.key.val + key->enc_size, 0,
9021 sizeof(ev.key.val) - key->enc_size);
9023 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9026 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9028 struct mgmt_ev_new_irk ev;
9030 memset(&ev, 0, sizeof(ev));
9032 ev.store_hint = persistent;
9034 bacpy(&ev.rpa, &irk->rpa);
9035 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9036 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9037 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9039 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9042 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9045 struct mgmt_ev_new_csrk ev;
9047 memset(&ev, 0, sizeof(ev));
9049 /* Devices using resolvable or non-resolvable random addresses
9050 * without providing an identity resolving key don't require
9051 * to store signature resolving keys. Their addresses will change
9052 * the next time around.
9054 * Only when a remote device provides an identity address
9055 * make sure the signature resolving key is stored. So allow
9056 * static random and public addresses here.
9058 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9059 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9060 ev.store_hint = 0x00;
9062 ev.store_hint = persistent;
9064 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9065 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9066 ev.key.type = csrk->type;
9067 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9069 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9072 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9073 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9074 u16 max_interval, u16 latency, u16 timeout)
9076 struct mgmt_ev_new_conn_param ev;
9078 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9081 memset(&ev, 0, sizeof(ev));
9082 bacpy(&ev.addr.bdaddr, bdaddr);
9083 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9084 ev.store_hint = store_hint;
9085 ev.min_interval = cpu_to_le16(min_interval);
9086 ev.max_interval = cpu_to_le16(max_interval);
9087 ev.latency = cpu_to_le16(latency);
9088 ev.timeout = cpu_to_le16(timeout);
9090 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9093 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9094 u8 *name, u8 name_len)
9096 struct sk_buff *skb;
9097 struct mgmt_ev_device_connected *ev;
9101 /* allocate buff for LE or BR/EDR adv */
9102 if (conn->le_adv_data_len > 0)
9103 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9104 sizeof(*ev) + conn->le_adv_data_len);
9106 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9107 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9108 eir_precalc_len(sizeof(conn->dev_class)));
9110 ev = skb_put(skb, sizeof(*ev));
9111 bacpy(&ev->addr.bdaddr, &conn->dst);
9112 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9115 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9117 ev->flags = __cpu_to_le32(flags);
9119 /* We must ensure that the EIR Data fields are ordered and
9120 * unique. Keep it simple for now and avoid the problem by not
9121 * adding any BR/EDR data to the LE adv.
9123 if (conn->le_adv_data_len > 0) {
9124 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9125 eir_len = conn->le_adv_data_len;
9128 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9130 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9131 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9132 conn->dev_class, sizeof(conn->dev_class));
9135 ev->eir_len = cpu_to_le16(eir_len);
9137 mgmt_event_skb(skb, NULL);
9140 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9142 struct sock **sk = data;
9144 cmd->cmd_complete(cmd, 0);
9149 mgmt_pending_remove(cmd);
9152 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9154 struct hci_dev *hdev = data;
9155 struct mgmt_cp_unpair_device *cp = cmd->param;
9157 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9159 cmd->cmd_complete(cmd, 0);
9160 mgmt_pending_remove(cmd);
9163 bool mgmt_powering_down(struct hci_dev *hdev)
9165 struct mgmt_pending_cmd *cmd;
9166 struct mgmt_mode *cp;
9168 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9179 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9180 u8 link_type, u8 addr_type, u8 reason,
9181 bool mgmt_connected)
9183 struct mgmt_ev_device_disconnected ev;
9184 struct sock *sk = NULL;
9186 /* The connection is still in hci_conn_hash so test for 1
9187 * instead of 0 to know if this is the last one.
9189 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9190 cancel_delayed_work(&hdev->power_off);
9191 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9194 if (!mgmt_connected)
9197 if (link_type != ACL_LINK && link_type != LE_LINK)
9200 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9202 bacpy(&ev.addr.bdaddr, bdaddr);
9203 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9206 /* Report disconnects due to suspend */
9207 if (hdev->suspended)
9208 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9210 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9215 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9219 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9220 u8 link_type, u8 addr_type, u8 status)
9222 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9223 struct mgmt_cp_disconnect *cp;
9224 struct mgmt_pending_cmd *cmd;
9226 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9229 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9235 if (bacmp(bdaddr, &cp->addr.bdaddr))
9238 if (cp->addr.type != bdaddr_type)
9241 cmd->cmd_complete(cmd, mgmt_status(status));
9242 mgmt_pending_remove(cmd);
9245 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9246 u8 addr_type, u8 status)
9248 struct mgmt_ev_connect_failed ev;
9250 /* The connection is still in hci_conn_hash so test for 1
9251 * instead of 0 to know if this is the last one.
9253 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9254 cancel_delayed_work(&hdev->power_off);
9255 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9258 bacpy(&ev.addr.bdaddr, bdaddr);
9259 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9260 ev.status = mgmt_status(status);
9262 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9265 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9267 struct mgmt_ev_pin_code_request ev;
9269 bacpy(&ev.addr.bdaddr, bdaddr);
9270 ev.addr.type = BDADDR_BREDR;
9273 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9276 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9279 struct mgmt_pending_cmd *cmd;
9281 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9285 cmd->cmd_complete(cmd, mgmt_status(status));
9286 mgmt_pending_remove(cmd);
9289 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9292 struct mgmt_pending_cmd *cmd;
9294 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9298 cmd->cmd_complete(cmd, mgmt_status(status));
9299 mgmt_pending_remove(cmd);
9302 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9303 u8 link_type, u8 addr_type, u32 value,
9306 struct mgmt_ev_user_confirm_request ev;
9308 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9310 bacpy(&ev.addr.bdaddr, bdaddr);
9311 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9312 ev.confirm_hint = confirm_hint;
9313 ev.value = cpu_to_le32(value);
9315 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9319 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9320 u8 link_type, u8 addr_type)
9322 struct mgmt_ev_user_passkey_request ev;
9324 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9326 bacpy(&ev.addr.bdaddr, bdaddr);
9327 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9329 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9333 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9334 u8 link_type, u8 addr_type, u8 status,
9337 struct mgmt_pending_cmd *cmd;
9339 cmd = pending_find(opcode, hdev);
9343 cmd->cmd_complete(cmd, mgmt_status(status));
9344 mgmt_pending_remove(cmd);
9349 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9350 u8 link_type, u8 addr_type, u8 status)
9352 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9353 status, MGMT_OP_USER_CONFIRM_REPLY);
9356 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9357 u8 link_type, u8 addr_type, u8 status)
9359 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9361 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9364 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9365 u8 link_type, u8 addr_type, u8 status)
9367 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9368 status, MGMT_OP_USER_PASSKEY_REPLY);
9371 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9372 u8 link_type, u8 addr_type, u8 status)
9374 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9376 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9379 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9380 u8 link_type, u8 addr_type, u32 passkey,
9383 struct mgmt_ev_passkey_notify ev;
9385 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9387 bacpy(&ev.addr.bdaddr, bdaddr);
9388 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9389 ev.passkey = __cpu_to_le32(passkey);
9390 ev.entered = entered;
9392 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9395 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9397 struct mgmt_ev_auth_failed ev;
9398 struct mgmt_pending_cmd *cmd;
9399 u8 status = mgmt_status(hci_status);
9401 bacpy(&ev.addr.bdaddr, &conn->dst);
9402 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9405 cmd = find_pairing(conn);
9407 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9408 cmd ? cmd->sk : NULL);
9411 cmd->cmd_complete(cmd, status);
9412 mgmt_pending_remove(cmd);
9416 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9418 struct cmd_lookup match = { NULL, hdev };
9422 u8 mgmt_err = mgmt_status(status);
9423 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9424 cmd_status_rsp, &mgmt_err);
9428 if (test_bit(HCI_AUTH, &hdev->flags))
9429 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9431 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9433 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9437 new_settings(hdev, match.sk);
9443 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9445 struct cmd_lookup *match = data;
9447 if (match->sk == NULL) {
9448 match->sk = cmd->sk;
9449 sock_hold(match->sk);
9453 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9456 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9458 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9459 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9460 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9463 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9464 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9465 ext_info_changed(hdev, NULL);
9472 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9474 struct mgmt_cp_set_local_name ev;
9475 struct mgmt_pending_cmd *cmd;
9480 memset(&ev, 0, sizeof(ev));
9481 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9482 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9484 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9486 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9488 /* If this is a HCI command related to powering on the
9489 * HCI dev don't send any mgmt signals.
9491 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9495 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9496 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9497 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9500 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9504 for (i = 0; i < uuid_count; i++) {
9505 if (!memcmp(uuid, uuids[i], 16))
9512 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9516 while (parsed < eir_len) {
9517 u8 field_len = eir[0];
9524 if (eir_len - parsed < field_len + 1)
9528 case EIR_UUID16_ALL:
9529 case EIR_UUID16_SOME:
9530 for (i = 0; i + 3 <= field_len; i += 2) {
9531 memcpy(uuid, bluetooth_base_uuid, 16);
9532 uuid[13] = eir[i + 3];
9533 uuid[12] = eir[i + 2];
9534 if (has_uuid(uuid, uuid_count, uuids))
9538 case EIR_UUID32_ALL:
9539 case EIR_UUID32_SOME:
9540 for (i = 0; i + 5 <= field_len; i += 4) {
9541 memcpy(uuid, bluetooth_base_uuid, 16);
9542 uuid[15] = eir[i + 5];
9543 uuid[14] = eir[i + 4];
9544 uuid[13] = eir[i + 3];
9545 uuid[12] = eir[i + 2];
9546 if (has_uuid(uuid, uuid_count, uuids))
9550 case EIR_UUID128_ALL:
9551 case EIR_UUID128_SOME:
9552 for (i = 0; i + 17 <= field_len; i += 16) {
9553 memcpy(uuid, eir + i + 2, 16);
9554 if (has_uuid(uuid, uuid_count, uuids))
9560 parsed += field_len + 1;
9561 eir += field_len + 1;
9567 static void restart_le_scan(struct hci_dev *hdev)
9569 /* If controller is not scanning we are done. */
9570 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9573 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9574 hdev->discovery.scan_start +
9575 hdev->discovery.scan_duration))
9578 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9579 DISCOV_LE_RESTART_DELAY);
9582 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9583 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9585 /* If a RSSI threshold has been specified, and
9586 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9587 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9588 * is set, let it through for further processing, as we might need to
9591 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9592 * the results are also dropped.
9594 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9595 (rssi == HCI_RSSI_INVALID ||
9596 (rssi < hdev->discovery.rssi &&
9597 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9600 if (hdev->discovery.uuid_count != 0) {
9601 /* If a list of UUIDs is provided in filter, results with no
9602 * matching UUID should be dropped.
9604 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9605 hdev->discovery.uuids) &&
9606 !eir_has_uuids(scan_rsp, scan_rsp_len,
9607 hdev->discovery.uuid_count,
9608 hdev->discovery.uuids))
9612 /* If duplicate filtering does not report RSSI changes, then restart
9613 * scanning to ensure updated result with updated RSSI values.
9615 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9616 restart_le_scan(hdev);
9618 /* Validate RSSI value against the RSSI threshold once more. */
9619 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9620 rssi < hdev->discovery.rssi)
9627 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9628 bdaddr_t *bdaddr, u8 addr_type)
9630 struct mgmt_ev_adv_monitor_device_lost ev;
9632 ev.monitor_handle = cpu_to_le16(handle);
9633 bacpy(&ev.addr.bdaddr, bdaddr);
9634 ev.addr.type = addr_type;
9636 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9640 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9641 struct sk_buff *skb,
9642 struct sock *skip_sk,
9645 struct sk_buff *advmon_skb;
9646 size_t advmon_skb_len;
9647 __le16 *monitor_handle;
9652 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9653 sizeof(struct mgmt_ev_device_found)) + skb->len;
9654 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9659 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9660 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9661 * store monitor_handle of the matched monitor.
9663 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9664 *monitor_handle = cpu_to_le16(handle);
9665 skb_put_data(advmon_skb, skb->data, skb->len);
9667 mgmt_event_skb(advmon_skb, skip_sk);
9670 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9671 bdaddr_t *bdaddr, bool report_device,
9672 struct sk_buff *skb,
9673 struct sock *skip_sk)
9675 struct monitored_device *dev, *tmp;
9676 bool matched = false;
9677 bool notified = false;
9679 /* We have received the Advertisement Report because:
9680 * 1. the kernel has initiated active discovery
9681 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9683 * 3. if none of the above is true, we have one or more active
9684 * Advertisement Monitor
9686 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9687 * and report ONLY one advertisement per device for the matched Monitor
9688 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9690 * For case 3, since we are not active scanning and all advertisements
9691 * received are due to a matched Advertisement Monitor, report all
9692 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9694 if (report_device && !hdev->advmon_pend_notify) {
9695 mgmt_event_skb(skb, skip_sk);
9699 hdev->advmon_pend_notify = false;
9701 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9702 if (!bacmp(&dev->bdaddr, bdaddr)) {
9705 if (!dev->notified) {
9706 mgmt_send_adv_monitor_device_found(hdev, skb,
9710 dev->notified = true;
9715 hdev->advmon_pend_notify = true;
9718 if (!report_device &&
9719 ((matched && !notified) || !msft_monitor_supported(hdev))) {
9720 /* Handle 0 indicates that we are not active scanning and this
9721 * is a subsequent advertisement report for an already matched
9722 * Advertisement Monitor or the controller offloading support
9725 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9729 mgmt_event_skb(skb, skip_sk);
9734 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9735 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9736 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9738 struct sk_buff *skb;
9739 struct mgmt_ev_device_found *ev;
9740 bool report_device = hci_discovery_active(hdev);
9742 /* Don't send events for a non-kernel initiated discovery. With
9743 * LE one exception is if we have pend_le_reports > 0 in which
9744 * case we're doing passive scanning and want these events.
9746 if (!hci_discovery_active(hdev)) {
9747 if (link_type == ACL_LINK)
9749 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9750 report_device = true;
9751 else if (!hci_is_adv_monitoring(hdev))
9755 if (hdev->discovery.result_filtering) {
9756 /* We are using service discovery */
9757 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9762 if (hdev->discovery.limited) {
9763 /* Check for limited discoverable bit */
9765 if (!(dev_class[1] & 0x20))
9768 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9769 if (!flags || !(flags[0] & LE_AD_LIMITED))
9774 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
9775 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9776 sizeof(*ev) + eir_len + scan_rsp_len + 5);
9780 ev = skb_put(skb, sizeof(*ev));
9782 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9783 * RSSI value was reported as 0 when not available. This behavior
9784 * is kept when using device discovery. This is required for full
9785 * backwards compatibility with the API.
9787 * However when using service discovery, the value 127 will be
9788 * returned when the RSSI is not available.
9790 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9791 link_type == ACL_LINK)
9794 bacpy(&ev->addr.bdaddr, bdaddr);
9795 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9797 ev->flags = cpu_to_le32(flags);
9800 /* Copy EIR or advertising data into event */
9801 skb_put_data(skb, eir, eir_len);
9803 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9806 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9808 skb_put_data(skb, eir_cod, sizeof(eir_cod));
9811 if (scan_rsp_len > 0)
9812 /* Append scan response data to event */
9813 skb_put_data(skb, scan_rsp, scan_rsp_len);
9815 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9817 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9820 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9821 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9823 struct sk_buff *skb;
9824 struct mgmt_ev_device_found *ev;
9828 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9829 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9831 ev = skb_put(skb, sizeof(*ev));
9832 bacpy(&ev->addr.bdaddr, bdaddr);
9833 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9837 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9839 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9841 ev->eir_len = cpu_to_le16(eir_len);
9842 ev->flags = cpu_to_le32(flags);
9844 mgmt_event_skb(skb, NULL);
9847 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9849 struct mgmt_ev_discovering ev;
9851 bt_dev_dbg(hdev, "discovering %u", discovering);
9853 memset(&ev, 0, sizeof(ev));
9854 ev.type = hdev->discovery.type;
9855 ev.discovering = discovering;
9857 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9860 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9862 struct mgmt_ev_controller_suspend ev;
9864 ev.suspend_state = state;
9865 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9868 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9871 struct mgmt_ev_controller_resume ev;
9873 ev.wake_reason = reason;
9875 bacpy(&ev.addr.bdaddr, bdaddr);
9876 ev.addr.type = addr_type;
9878 memset(&ev.addr, 0, sizeof(ev.addr));
9881 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9884 static struct hci_mgmt_chan chan = {
9885 .channel = HCI_CHANNEL_CONTROL,
9886 .handler_count = ARRAY_SIZE(mgmt_handlers),
9887 .handlers = mgmt_handlers,
9888 .hdev_init = mgmt_init_hdev,
9893 return hci_mgmt_chan_register(&chan);
9896 void mgmt_exit(void)
9898 hci_mgmt_chan_unregister(&chan);