2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 20
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_LINK_SECURITY,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_GET_PHY_CONFIGURATION,
112 MGMT_OP_SET_PHY_CONFIGURATION,
113 MGMT_OP_SET_BLOCKED_KEYS,
114 MGMT_OP_SET_WIDEBAND_SPEECH,
115 MGMT_OP_READ_CONTROLLER_CAP,
116 MGMT_OP_READ_EXP_FEATURES_INFO,
117 MGMT_OP_SET_EXP_FEATURE,
118 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
119 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
120 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
121 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
122 MGMT_OP_GET_DEVICE_FLAGS,
123 MGMT_OP_SET_DEVICE_FLAGS,
124 MGMT_OP_READ_ADV_MONITOR_FEATURES,
125 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
126 MGMT_OP_REMOVE_ADV_MONITOR,
127 MGMT_OP_ADD_EXT_ADV_PARAMS,
128 MGMT_OP_ADD_EXT_ADV_DATA,
129 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 static const u16 mgmt_events[] = {
133 MGMT_EV_CONTROLLER_ERROR,
135 MGMT_EV_INDEX_REMOVED,
136 MGMT_EV_NEW_SETTINGS,
137 MGMT_EV_CLASS_OF_DEV_CHANGED,
138 MGMT_EV_LOCAL_NAME_CHANGED,
139 MGMT_EV_NEW_LINK_KEY,
140 MGMT_EV_NEW_LONG_TERM_KEY,
141 MGMT_EV_DEVICE_CONNECTED,
142 MGMT_EV_DEVICE_DISCONNECTED,
143 MGMT_EV_CONNECT_FAILED,
144 MGMT_EV_PIN_CODE_REQUEST,
145 MGMT_EV_USER_CONFIRM_REQUEST,
146 MGMT_EV_USER_PASSKEY_REQUEST,
148 MGMT_EV_DEVICE_FOUND,
150 MGMT_EV_DEVICE_BLOCKED,
151 MGMT_EV_DEVICE_UNBLOCKED,
152 MGMT_EV_DEVICE_UNPAIRED,
153 MGMT_EV_PASSKEY_NOTIFY,
156 MGMT_EV_DEVICE_ADDED,
157 MGMT_EV_DEVICE_REMOVED,
158 MGMT_EV_NEW_CONN_PARAM,
159 MGMT_EV_UNCONF_INDEX_ADDED,
160 MGMT_EV_UNCONF_INDEX_REMOVED,
161 MGMT_EV_NEW_CONFIG_OPTIONS,
162 MGMT_EV_EXT_INDEX_ADDED,
163 MGMT_EV_EXT_INDEX_REMOVED,
164 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
165 MGMT_EV_ADVERTISING_ADDED,
166 MGMT_EV_ADVERTISING_REMOVED,
167 MGMT_EV_EXT_INFO_CHANGED,
168 MGMT_EV_PHY_CONFIGURATION_CHANGED,
169 MGMT_EV_EXP_FEATURE_CHANGED,
170 MGMT_EV_DEVICE_FLAGS_CHANGED,
171 MGMT_EV_ADV_MONITOR_ADDED,
172 MGMT_EV_ADV_MONITOR_REMOVED,
173 MGMT_EV_CONTROLLER_SUSPEND,
174 MGMT_EV_CONTROLLER_RESUME,
177 static const u16 mgmt_untrusted_commands[] = {
178 MGMT_OP_READ_INDEX_LIST,
180 MGMT_OP_READ_UNCONF_INDEX_LIST,
181 MGMT_OP_READ_CONFIG_INFO,
182 MGMT_OP_READ_EXT_INDEX_LIST,
183 MGMT_OP_READ_EXT_INFO,
184 MGMT_OP_READ_CONTROLLER_CAP,
185 MGMT_OP_READ_EXP_FEATURES_INFO,
186 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
187 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
190 static const u16 mgmt_untrusted_events[] = {
192 MGMT_EV_INDEX_REMOVED,
193 MGMT_EV_NEW_SETTINGS,
194 MGMT_EV_CLASS_OF_DEV_CHANGED,
195 MGMT_EV_LOCAL_NAME_CHANGED,
196 MGMT_EV_UNCONF_INDEX_ADDED,
197 MGMT_EV_UNCONF_INDEX_REMOVED,
198 MGMT_EV_NEW_CONFIG_OPTIONS,
199 MGMT_EV_EXT_INDEX_ADDED,
200 MGMT_EV_EXT_INDEX_REMOVED,
201 MGMT_EV_EXT_INFO_CHANGED,
202 MGMT_EV_EXP_FEATURE_CHANGED,
205 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
207 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
208 "\x00\x00\x00\x00\x00\x00\x00\x00"
210 /* HCI to MGMT error code conversion table */
211 static const u8 mgmt_status_table[] = {
213 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
214 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
215 MGMT_STATUS_FAILED, /* Hardware Failure */
216 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
217 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
218 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
219 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
220 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
221 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
222 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
223 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
224 MGMT_STATUS_BUSY, /* Command Disallowed */
225 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
226 MGMT_STATUS_REJECTED, /* Rejected Security */
227 MGMT_STATUS_REJECTED, /* Rejected Personal */
228 MGMT_STATUS_TIMEOUT, /* Host Timeout */
229 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
230 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
231 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
232 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
233 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
234 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
235 MGMT_STATUS_BUSY, /* Repeated Attempts */
236 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
237 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
238 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
239 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
240 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
241 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
243 MGMT_STATUS_FAILED, /* Unspecified Error */
244 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
245 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
246 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
247 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
248 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
249 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
250 MGMT_STATUS_FAILED, /* Unit Link Key Used */
251 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
252 MGMT_STATUS_TIMEOUT, /* Instant Passed */
253 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
254 MGMT_STATUS_FAILED, /* Transaction Collision */
255 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
256 MGMT_STATUS_REJECTED, /* QoS Rejected */
257 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
258 MGMT_STATUS_REJECTED, /* Insufficient Security */
259 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
260 MGMT_STATUS_BUSY, /* Role Switch Pending */
261 MGMT_STATUS_FAILED, /* Slot Violation */
262 MGMT_STATUS_FAILED, /* Role Switch Failed */
263 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
264 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
265 MGMT_STATUS_BUSY, /* Host Busy Pairing */
266 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
267 MGMT_STATUS_BUSY, /* Controller Busy */
268 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
269 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
270 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
271 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
272 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
275 static u8 mgmt_status(u8 hci_status)
277 if (hci_status < ARRAY_SIZE(mgmt_status_table))
278 return mgmt_status_table[hci_status];
280 return MGMT_STATUS_FAILED;
283 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
286 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
290 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
291 u16 len, int flag, struct sock *skip_sk)
293 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
297 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
298 struct sock *skip_sk)
300 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
301 HCI_SOCK_TRUSTED, skip_sk);
304 static u8 le_addr_type(u8 mgmt_addr_type)
306 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
307 return ADDR_LE_DEV_PUBLIC;
309 return ADDR_LE_DEV_RANDOM;
312 void mgmt_fill_version_info(void *ver)
314 struct mgmt_rp_read_version *rp = ver;
316 rp->version = MGMT_VERSION;
317 rp->revision = cpu_to_le16(MGMT_REVISION);
320 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
323 struct mgmt_rp_read_version rp;
325 bt_dev_dbg(hdev, "sock %p", sk);
327 mgmt_fill_version_info(&rp);
329 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
333 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
336 struct mgmt_rp_read_commands *rp;
337 u16 num_commands, num_events;
341 bt_dev_dbg(hdev, "sock %p", sk);
343 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
344 num_commands = ARRAY_SIZE(mgmt_commands);
345 num_events = ARRAY_SIZE(mgmt_events);
347 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
348 num_events = ARRAY_SIZE(mgmt_untrusted_events);
351 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
353 rp = kmalloc(rp_size, GFP_KERNEL);
357 rp->num_commands = cpu_to_le16(num_commands);
358 rp->num_events = cpu_to_le16(num_events);
360 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
361 __le16 *opcode = rp->opcodes;
363 for (i = 0; i < num_commands; i++, opcode++)
364 put_unaligned_le16(mgmt_commands[i], opcode);
366 for (i = 0; i < num_events; i++, opcode++)
367 put_unaligned_le16(mgmt_events[i], opcode);
369 __le16 *opcode = rp->opcodes;
371 for (i = 0; i < num_commands; i++, opcode++)
372 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
374 for (i = 0; i < num_events; i++, opcode++)
375 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
378 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
385 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
388 struct mgmt_rp_read_index_list *rp;
394 bt_dev_dbg(hdev, "sock %p", sk);
396 read_lock(&hci_dev_list_lock);
399 list_for_each_entry(d, &hci_dev_list, list) {
400 if (d->dev_type == HCI_PRIMARY &&
401 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
405 rp_len = sizeof(*rp) + (2 * count);
406 rp = kmalloc(rp_len, GFP_ATOMIC);
408 read_unlock(&hci_dev_list_lock);
413 list_for_each_entry(d, &hci_dev_list, list) {
414 if (hci_dev_test_flag(d, HCI_SETUP) ||
415 hci_dev_test_flag(d, HCI_CONFIG) ||
416 hci_dev_test_flag(d, HCI_USER_CHANNEL))
419 /* Devices marked as raw-only are neither configured
420 * nor unconfigured controllers.
422 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
425 if (d->dev_type == HCI_PRIMARY &&
426 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
427 rp->index[count++] = cpu_to_le16(d->id);
428 bt_dev_dbg(hdev, "Added hci%u", d->id);
432 rp->num_controllers = cpu_to_le16(count);
433 rp_len = sizeof(*rp) + (2 * count);
435 read_unlock(&hci_dev_list_lock);
437 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
445 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
446 void *data, u16 data_len)
448 struct mgmt_rp_read_unconf_index_list *rp;
454 bt_dev_dbg(hdev, "sock %p", sk);
456 read_lock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (d->dev_type == HCI_PRIMARY &&
461 hci_dev_test_flag(d, HCI_UNCONFIGURED))
465 rp_len = sizeof(*rp) + (2 * count);
466 rp = kmalloc(rp_len, GFP_ATOMIC);
468 read_unlock(&hci_dev_list_lock);
473 list_for_each_entry(d, &hci_dev_list, list) {
474 if (hci_dev_test_flag(d, HCI_SETUP) ||
475 hci_dev_test_flag(d, HCI_CONFIG) ||
476 hci_dev_test_flag(d, HCI_USER_CHANNEL))
479 /* Devices marked as raw-only are neither configured
480 * nor unconfigured controllers.
482 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
485 if (d->dev_type == HCI_PRIMARY &&
486 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
487 rp->index[count++] = cpu_to_le16(d->id);
488 bt_dev_dbg(hdev, "Added hci%u", d->id);
492 rp->num_controllers = cpu_to_le16(count);
493 rp_len = sizeof(*rp) + (2 * count);
495 read_unlock(&hci_dev_list_lock);
497 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
498 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
505 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
506 void *data, u16 data_len)
508 struct mgmt_rp_read_ext_index_list *rp;
513 bt_dev_dbg(hdev, "sock %p", sk);
515 read_lock(&hci_dev_list_lock);
518 list_for_each_entry(d, &hci_dev_list, list) {
519 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
523 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
525 read_unlock(&hci_dev_list_lock);
530 list_for_each_entry(d, &hci_dev_list, list) {
531 if (hci_dev_test_flag(d, HCI_SETUP) ||
532 hci_dev_test_flag(d, HCI_CONFIG) ||
533 hci_dev_test_flag(d, HCI_USER_CHANNEL))
536 /* Devices marked as raw-only are neither configured
537 * nor unconfigured controllers.
539 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
542 if (d->dev_type == HCI_PRIMARY) {
543 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
544 rp->entry[count].type = 0x01;
546 rp->entry[count].type = 0x00;
547 } else if (d->dev_type == HCI_AMP) {
548 rp->entry[count].type = 0x02;
553 rp->entry[count].bus = d->bus;
554 rp->entry[count++].index = cpu_to_le16(d->id);
555 bt_dev_dbg(hdev, "Added hci%u", d->id);
558 rp->num_controllers = cpu_to_le16(count);
560 read_unlock(&hci_dev_list_lock);
562 /* If this command is called at least once, then all the
563 * default index and unconfigured index events are disabled
564 * and from now on only extended index events are used.
566 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
567 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
568 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
570 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
571 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
572 struct_size(rp, entry, count));
579 static bool is_configured(struct hci_dev *hdev)
581 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
582 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
585 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
586 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
587 !bacmp(&hdev->public_addr, BDADDR_ANY))
593 static __le32 get_missing_options(struct hci_dev *hdev)
597 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
598 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
599 options |= MGMT_OPTION_EXTERNAL_CONFIG;
601 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
602 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
603 !bacmp(&hdev->public_addr, BDADDR_ANY))
604 options |= MGMT_OPTION_PUBLIC_ADDRESS;
606 return cpu_to_le32(options);
609 static int new_options(struct hci_dev *hdev, struct sock *skip)
611 __le32 options = get_missing_options(hdev);
613 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
614 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
617 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
619 __le32 options = get_missing_options(hdev);
621 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
625 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
626 void *data, u16 data_len)
628 struct mgmt_rp_read_config_info rp;
631 bt_dev_dbg(hdev, "sock %p", sk);
635 memset(&rp, 0, sizeof(rp));
636 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
638 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
639 options |= MGMT_OPTION_EXTERNAL_CONFIG;
641 if (hdev->set_bdaddr)
642 options |= MGMT_OPTION_PUBLIC_ADDRESS;
644 rp.supported_options = cpu_to_le32(options);
645 rp.missing_options = get_missing_options(hdev);
647 hci_dev_unlock(hdev);
649 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
653 static u32 get_supported_phys(struct hci_dev *hdev)
655 u32 supported_phys = 0;
657 if (lmp_bredr_capable(hdev)) {
658 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
660 if (hdev->features[0][0] & LMP_3SLOT)
661 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
663 if (hdev->features[0][0] & LMP_5SLOT)
664 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
666 if (lmp_edr_2m_capable(hdev)) {
667 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
669 if (lmp_edr_3slot_capable(hdev))
670 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
672 if (lmp_edr_5slot_capable(hdev))
673 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
675 if (lmp_edr_3m_capable(hdev)) {
676 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
678 if (lmp_edr_3slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
681 if (lmp_edr_5slot_capable(hdev))
682 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
687 if (lmp_le_capable(hdev)) {
688 supported_phys |= MGMT_PHY_LE_1M_TX;
689 supported_phys |= MGMT_PHY_LE_1M_RX;
691 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
692 supported_phys |= MGMT_PHY_LE_2M_TX;
693 supported_phys |= MGMT_PHY_LE_2M_RX;
696 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
697 supported_phys |= MGMT_PHY_LE_CODED_TX;
698 supported_phys |= MGMT_PHY_LE_CODED_RX;
702 return supported_phys;
705 static u32 get_selected_phys(struct hci_dev *hdev)
707 u32 selected_phys = 0;
709 if (lmp_bredr_capable(hdev)) {
710 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
712 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
713 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
715 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
716 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
718 if (lmp_edr_2m_capable(hdev)) {
719 if (!(hdev->pkt_type & HCI_2DH1))
720 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
722 if (lmp_edr_3slot_capable(hdev) &&
723 !(hdev->pkt_type & HCI_2DH3))
724 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
726 if (lmp_edr_5slot_capable(hdev) &&
727 !(hdev->pkt_type & HCI_2DH5))
728 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
730 if (lmp_edr_3m_capable(hdev)) {
731 if (!(hdev->pkt_type & HCI_3DH1))
732 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
734 if (lmp_edr_3slot_capable(hdev) &&
735 !(hdev->pkt_type & HCI_3DH3))
736 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
738 if (lmp_edr_5slot_capable(hdev) &&
739 !(hdev->pkt_type & HCI_3DH5))
740 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
745 if (lmp_le_capable(hdev)) {
746 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
747 selected_phys |= MGMT_PHY_LE_1M_TX;
749 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
750 selected_phys |= MGMT_PHY_LE_1M_RX;
752 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
753 selected_phys |= MGMT_PHY_LE_2M_TX;
755 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
756 selected_phys |= MGMT_PHY_LE_2M_RX;
758 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
759 selected_phys |= MGMT_PHY_LE_CODED_TX;
761 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
762 selected_phys |= MGMT_PHY_LE_CODED_RX;
765 return selected_phys;
768 static u32 get_configurable_phys(struct hci_dev *hdev)
770 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
771 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
774 static u32 get_supported_settings(struct hci_dev *hdev)
778 settings |= MGMT_SETTING_POWERED;
779 settings |= MGMT_SETTING_BONDABLE;
780 settings |= MGMT_SETTING_DEBUG_KEYS;
781 settings |= MGMT_SETTING_CONNECTABLE;
782 settings |= MGMT_SETTING_DISCOVERABLE;
784 if (lmp_bredr_capable(hdev)) {
785 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
786 settings |= MGMT_SETTING_FAST_CONNECTABLE;
787 settings |= MGMT_SETTING_BREDR;
788 settings |= MGMT_SETTING_LINK_SECURITY;
790 if (lmp_ssp_capable(hdev)) {
791 settings |= MGMT_SETTING_SSP;
792 if (IS_ENABLED(CONFIG_BT_HS))
793 settings |= MGMT_SETTING_HS;
796 if (lmp_sc_capable(hdev))
797 settings |= MGMT_SETTING_SECURE_CONN;
799 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
801 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
804 if (lmp_le_capable(hdev)) {
805 settings |= MGMT_SETTING_LE;
806 settings |= MGMT_SETTING_SECURE_CONN;
807 settings |= MGMT_SETTING_PRIVACY;
808 settings |= MGMT_SETTING_STATIC_ADDRESS;
810 /* When the experimental feature for LL Privacy support is
811 * enabled, then advertising is no longer supported.
813 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
814 settings |= MGMT_SETTING_ADVERTISING;
817 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
819 settings |= MGMT_SETTING_CONFIGURATION;
821 settings |= MGMT_SETTING_PHY_CONFIGURATION;
826 static u32 get_current_settings(struct hci_dev *hdev)
830 if (hdev_is_powered(hdev))
831 settings |= MGMT_SETTING_POWERED;
833 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
834 settings |= MGMT_SETTING_CONNECTABLE;
836 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
837 settings |= MGMT_SETTING_FAST_CONNECTABLE;
839 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
840 settings |= MGMT_SETTING_DISCOVERABLE;
842 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
843 settings |= MGMT_SETTING_BONDABLE;
845 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
846 settings |= MGMT_SETTING_BREDR;
848 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
849 settings |= MGMT_SETTING_LE;
851 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
852 settings |= MGMT_SETTING_LINK_SECURITY;
854 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
855 settings |= MGMT_SETTING_SSP;
857 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
858 settings |= MGMT_SETTING_HS;
860 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
861 settings |= MGMT_SETTING_ADVERTISING;
863 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
864 settings |= MGMT_SETTING_SECURE_CONN;
866 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
867 settings |= MGMT_SETTING_DEBUG_KEYS;
869 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
870 settings |= MGMT_SETTING_PRIVACY;
872 /* The current setting for static address has two purposes. The
873 * first is to indicate if the static address will be used and
874 * the second is to indicate if it is actually set.
876 * This means if the static address is not configured, this flag
877 * will never be set. If the address is configured, then if the
878 * address is actually used decides if the flag is set or not.
880 * For single mode LE only controllers and dual-mode controllers
881 * with BR/EDR disabled, the existence of the static address will
884 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
885 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
886 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
887 if (bacmp(&hdev->static_addr, BDADDR_ANY))
888 settings |= MGMT_SETTING_STATIC_ADDRESS;
891 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
892 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
897 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
899 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
902 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
903 struct hci_dev *hdev,
906 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
909 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
911 struct mgmt_pending_cmd *cmd;
913 /* If there's a pending mgmt command the flags will not yet have
914 * their final values, so check for this first.
916 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
918 struct mgmt_mode *cp = cmd->param;
920 return LE_AD_GENERAL;
921 else if (cp->val == 0x02)
922 return LE_AD_LIMITED;
924 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
925 return LE_AD_LIMITED;
926 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
927 return LE_AD_GENERAL;
933 bool mgmt_get_connectable(struct hci_dev *hdev)
935 struct mgmt_pending_cmd *cmd;
937 /* If there's a pending mgmt command the flag will not yet have
938 * it's final value, so check for this first.
940 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
942 struct mgmt_mode *cp = cmd->param;
947 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
950 static void service_cache_off(struct work_struct *work)
952 struct hci_dev *hdev = container_of(work, struct hci_dev,
954 struct hci_request req;
956 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
959 hci_req_init(&req, hdev);
963 __hci_req_update_eir(&req);
964 __hci_req_update_class(&req);
966 hci_dev_unlock(hdev);
968 hci_req_run(&req, NULL);
971 static void rpa_expired(struct work_struct *work)
973 struct hci_dev *hdev = container_of(work, struct hci_dev,
975 struct hci_request req;
977 bt_dev_dbg(hdev, "");
979 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
981 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
984 /* The generation of a new RPA and programming it into the
985 * controller happens in the hci_req_enable_advertising()
988 hci_req_init(&req, hdev);
989 if (ext_adv_capable(hdev))
990 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
992 __hci_req_enable_advertising(&req);
993 hci_req_run(&req, NULL);
996 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
998 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1001 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1002 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1004 /* Non-mgmt controlled devices get this bit set
1005 * implicitly so that pairing works for them, however
1006 * for mgmt we require user-space to explicitly enable
1009 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1012 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1013 void *data, u16 data_len)
1015 struct mgmt_rp_read_info rp;
1017 bt_dev_dbg(hdev, "sock %p", sk);
1021 memset(&rp, 0, sizeof(rp));
1023 bacpy(&rp.bdaddr, &hdev->bdaddr);
1025 rp.version = hdev->hci_ver;
1026 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1028 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1029 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1031 memcpy(rp.dev_class, hdev->dev_class, 3);
1033 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1034 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1036 hci_dev_unlock(hdev);
1038 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1042 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1047 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1048 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1049 hdev->dev_class, 3);
1051 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1052 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1055 name_len = strlen(hdev->dev_name);
1056 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1057 hdev->dev_name, name_len);
1059 name_len = strlen(hdev->short_name);
1060 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1061 hdev->short_name, name_len);
1066 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1067 void *data, u16 data_len)
1070 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1073 bt_dev_dbg(hdev, "sock %p", sk);
1075 memset(&buf, 0, sizeof(buf));
1079 bacpy(&rp->bdaddr, &hdev->bdaddr);
1081 rp->version = hdev->hci_ver;
1082 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1084 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1085 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1088 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1089 rp->eir_len = cpu_to_le16(eir_len);
1091 hci_dev_unlock(hdev);
1093 /* If this command is called at least once, then the events
1094 * for class of device and local name changes are disabled
1095 * and only the new extended controller information event
1098 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1099 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1100 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1102 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1103 sizeof(*rp) + eir_len);
1106 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1109 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1112 memset(buf, 0, sizeof(buf));
1114 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1115 ev->eir_len = cpu_to_le16(eir_len);
1117 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1118 sizeof(*ev) + eir_len,
1119 HCI_MGMT_EXT_INFO_EVENTS, skip);
1122 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1124 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1126 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1130 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1132 bt_dev_dbg(hdev, "status 0x%02x", status);
1134 if (hci_conn_count(hdev) == 0) {
1135 cancel_delayed_work(&hdev->power_off);
1136 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1140 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1142 struct mgmt_ev_advertising_added ev;
1144 ev.instance = instance;
1146 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1149 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1152 struct mgmt_ev_advertising_removed ev;
1154 ev.instance = instance;
1156 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1159 static void cancel_adv_timeout(struct hci_dev *hdev)
1161 if (hdev->adv_instance_timeout) {
1162 hdev->adv_instance_timeout = 0;
1163 cancel_delayed_work(&hdev->adv_instance_expire);
1167 static int clean_up_hci_state(struct hci_dev *hdev)
1169 struct hci_request req;
1170 struct hci_conn *conn;
1171 bool discov_stopped;
1174 hci_req_init(&req, hdev);
1176 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1177 test_bit(HCI_PSCAN, &hdev->flags)) {
1179 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1182 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1184 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1185 __hci_req_disable_advertising(&req);
1187 discov_stopped = hci_req_stop_discovery(&req);
1189 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1190 /* 0x15 == Terminated due to Power Off */
1191 __hci_abort_conn(&req, conn, 0x15);
1194 err = hci_req_run(&req, clean_up_hci_complete);
1195 if (!err && discov_stopped)
1196 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1201 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1204 struct mgmt_mode *cp = data;
1205 struct mgmt_pending_cmd *cmd;
1208 bt_dev_dbg(hdev, "sock %p", sk);
1210 if (cp->val != 0x00 && cp->val != 0x01)
1211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1212 MGMT_STATUS_INVALID_PARAMS);
1216 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1217 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1222 if (!!cp->val == hdev_is_powered(hdev)) {
1223 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1227 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1234 queue_work(hdev->req_workqueue, &hdev->power_on);
1237 /* Disconnect connections, stop scans, etc */
1238 err = clean_up_hci_state(hdev);
1240 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1241 HCI_POWER_OFF_TIMEOUT);
1243 /* ENODATA means there were no HCI commands queued */
1244 if (err == -ENODATA) {
1245 cancel_delayed_work(&hdev->power_off);
1246 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1252 hci_dev_unlock(hdev);
1256 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1258 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1260 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1261 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1264 int mgmt_new_settings(struct hci_dev *hdev)
1266 return new_settings(hdev, NULL);
1271 struct hci_dev *hdev;
1275 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1277 struct cmd_lookup *match = data;
1279 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1281 list_del(&cmd->list);
1283 if (match->sk == NULL) {
1284 match->sk = cmd->sk;
1285 sock_hold(match->sk);
1288 mgmt_pending_free(cmd);
1291 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1295 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1296 mgmt_pending_remove(cmd);
1299 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1301 if (cmd->cmd_complete) {
1304 cmd->cmd_complete(cmd, *status);
1305 mgmt_pending_remove(cmd);
1310 cmd_status_rsp(cmd, data);
1313 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1315 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1316 cmd->param, cmd->param_len);
1319 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1321 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1322 cmd->param, sizeof(struct mgmt_addr_info));
1325 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1327 if (!lmp_bredr_capable(hdev))
1328 return MGMT_STATUS_NOT_SUPPORTED;
1329 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1330 return MGMT_STATUS_REJECTED;
1332 return MGMT_STATUS_SUCCESS;
1335 static u8 mgmt_le_support(struct hci_dev *hdev)
1337 if (!lmp_le_capable(hdev))
1338 return MGMT_STATUS_NOT_SUPPORTED;
1339 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1340 return MGMT_STATUS_REJECTED;
1342 return MGMT_STATUS_SUCCESS;
1345 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1347 struct mgmt_pending_cmd *cmd;
1349 bt_dev_dbg(hdev, "status 0x%02x", status);
1353 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1358 u8 mgmt_err = mgmt_status(status);
1359 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1360 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1364 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1365 hdev->discov_timeout > 0) {
1366 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1367 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1370 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1371 new_settings(hdev, cmd->sk);
1374 mgmt_pending_remove(cmd);
1377 hci_dev_unlock(hdev);
1380 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1383 struct mgmt_cp_set_discoverable *cp = data;
1384 struct mgmt_pending_cmd *cmd;
1388 bt_dev_dbg(hdev, "sock %p", sk);
1390 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1391 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1393 MGMT_STATUS_REJECTED);
1395 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1396 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 MGMT_STATUS_INVALID_PARAMS);
1399 timeout = __le16_to_cpu(cp->timeout);
1401 /* Disabling discoverable requires that no timeout is set,
1402 * and enabling limited discoverable requires a timeout.
1404 if ((cp->val == 0x00 && timeout > 0) ||
1405 (cp->val == 0x02 && timeout == 0))
1406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1407 MGMT_STATUS_INVALID_PARAMS);
1411 if (!hdev_is_powered(hdev) && timeout > 0) {
1412 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 MGMT_STATUS_NOT_POWERED);
1417 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1418 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1426 MGMT_STATUS_REJECTED);
1430 if (hdev->advertising_paused) {
1431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1436 if (!hdev_is_powered(hdev)) {
1437 bool changed = false;
1439 /* Setting limited discoverable when powered off is
1440 * not a valid operation since it requires a timeout
1441 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1443 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1444 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1448 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1453 err = new_settings(hdev, sk);
1458 /* If the current mode is the same, then just update the timeout
1459 * value with the new value. And if only the timeout gets updated,
1460 * then no need for any HCI transactions.
1462 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1463 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1464 HCI_LIMITED_DISCOVERABLE)) {
1465 cancel_delayed_work(&hdev->discov_off);
1466 hdev->discov_timeout = timeout;
1468 if (cp->val && hdev->discov_timeout > 0) {
1469 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1470 queue_delayed_work(hdev->req_workqueue,
1471 &hdev->discov_off, to);
1474 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1478 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1484 /* Cancel any potential discoverable timeout that might be
1485 * still active and store new timeout value. The arming of
1486 * the timeout happens in the complete handler.
1488 cancel_delayed_work(&hdev->discov_off);
1489 hdev->discov_timeout = timeout;
1492 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1494 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1496 /* Limited discoverable mode */
1497 if (cp->val == 0x02)
1498 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1500 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1502 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1506 hci_dev_unlock(hdev);
1510 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1512 struct mgmt_pending_cmd *cmd;
1514 bt_dev_dbg(hdev, "status 0x%02x", status);
1518 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1523 u8 mgmt_err = mgmt_status(status);
1524 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1528 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1529 new_settings(hdev, cmd->sk);
1532 mgmt_pending_remove(cmd);
1535 hci_dev_unlock(hdev);
1538 static int set_connectable_update_settings(struct hci_dev *hdev,
1539 struct sock *sk, u8 val)
1541 bool changed = false;
1544 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1548 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1550 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1551 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1554 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1559 hci_req_update_scan(hdev);
1560 hci_update_background_scan(hdev);
1561 return new_settings(hdev, sk);
1567 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1570 struct mgmt_mode *cp = data;
1571 struct mgmt_pending_cmd *cmd;
1574 bt_dev_dbg(hdev, "sock %p", sk);
1576 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1577 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1579 MGMT_STATUS_REJECTED);
1581 if (cp->val != 0x00 && cp->val != 0x01)
1582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1583 MGMT_STATUS_INVALID_PARAMS);
1587 if (!hdev_is_powered(hdev)) {
1588 err = set_connectable_update_settings(hdev, sk, cp->val);
1592 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1599 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1606 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1608 if (hdev->discov_timeout > 0)
1609 cancel_delayed_work(&hdev->discov_off);
1611 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1612 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1613 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1616 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1620 hci_dev_unlock(hdev);
1624 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1627 struct mgmt_mode *cp = data;
1631 bt_dev_dbg(hdev, "sock %p", sk);
1633 if (cp->val != 0x00 && cp->val != 0x01)
1634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1635 MGMT_STATUS_INVALID_PARAMS);
1640 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1642 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1644 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1649 /* In limited privacy mode the change of bondable mode
1650 * may affect the local advertising address.
1652 if (hdev_is_powered(hdev) &&
1653 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1654 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1655 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1656 queue_work(hdev->req_workqueue,
1657 &hdev->discoverable_update);
1659 err = new_settings(hdev, sk);
1663 hci_dev_unlock(hdev);
1667 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1670 struct mgmt_mode *cp = data;
1671 struct mgmt_pending_cmd *cmd;
1675 bt_dev_dbg(hdev, "sock %p", sk);
1677 status = mgmt_bredr_support(hdev);
1679 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 if (cp->val != 0x00 && cp->val != 0x01)
1683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1684 MGMT_STATUS_INVALID_PARAMS);
1688 if (!hdev_is_powered(hdev)) {
1689 bool changed = false;
1691 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1692 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1696 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1701 err = new_settings(hdev, sk);
1706 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1707 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1714 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1715 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1719 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1725 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1727 mgmt_pending_remove(cmd);
1732 hci_dev_unlock(hdev);
1736 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1738 struct mgmt_mode *cp = data;
1739 struct mgmt_pending_cmd *cmd;
1743 bt_dev_dbg(hdev, "sock %p", sk);
1745 status = mgmt_bredr_support(hdev);
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1749 if (!lmp_ssp_capable(hdev))
1750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1751 MGMT_STATUS_NOT_SUPPORTED);
1753 if (cp->val != 0x00 && cp->val != 0x01)
1754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1755 MGMT_STATUS_INVALID_PARAMS);
1759 if (!hdev_is_powered(hdev)) {
1763 changed = !hci_dev_test_and_set_flag(hdev,
1766 changed = hci_dev_test_and_clear_flag(hdev,
1769 changed = hci_dev_test_and_clear_flag(hdev,
1772 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1775 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1780 err = new_settings(hdev, sk);
1785 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1791 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1792 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1796 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1802 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1803 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1804 sizeof(cp->val), &cp->val);
1806 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1808 mgmt_pending_remove(cmd);
1813 hci_dev_unlock(hdev);
1817 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1819 struct mgmt_mode *cp = data;
1824 bt_dev_dbg(hdev, "sock %p", sk);
1826 if (!IS_ENABLED(CONFIG_BT_HS))
1827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1828 MGMT_STATUS_NOT_SUPPORTED);
1830 status = mgmt_bredr_support(hdev);
1832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1834 if (!lmp_ssp_capable(hdev))
1835 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1836 MGMT_STATUS_NOT_SUPPORTED);
1838 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1840 MGMT_STATUS_REJECTED);
1842 if (cp->val != 0x00 && cp->val != 0x01)
1843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1844 MGMT_STATUS_INVALID_PARAMS);
1848 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1849 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1855 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1857 if (hdev_is_powered(hdev)) {
1858 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1859 MGMT_STATUS_REJECTED);
1863 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1866 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1871 err = new_settings(hdev, sk);
1874 hci_dev_unlock(hdev);
1878 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1880 struct cmd_lookup match = { NULL, hdev };
1885 u8 mgmt_err = mgmt_status(status);
1887 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1892 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1894 new_settings(hdev, match.sk);
1899 /* Make sure the controller has a good default for
1900 * advertising data. Restrict the update to when LE
1901 * has actually been enabled. During power on, the
1902 * update in powered_update_hci will take care of it.
1904 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1905 struct hci_request req;
1906 hci_req_init(&req, hdev);
1907 if (ext_adv_capable(hdev)) {
1910 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1912 __hci_req_update_scan_rsp_data(&req, 0x00);
1914 __hci_req_update_adv_data(&req, 0x00);
1915 __hci_req_update_scan_rsp_data(&req, 0x00);
1917 hci_req_run(&req, NULL);
1918 hci_update_background_scan(hdev);
1922 hci_dev_unlock(hdev);
1925 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1927 struct mgmt_mode *cp = data;
1928 struct hci_cp_write_le_host_supported hci_cp;
1929 struct mgmt_pending_cmd *cmd;
1930 struct hci_request req;
1934 bt_dev_dbg(hdev, "sock %p", sk);
1936 if (!lmp_le_capable(hdev))
1937 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1938 MGMT_STATUS_NOT_SUPPORTED);
1940 if (cp->val != 0x00 && cp->val != 0x01)
1941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1942 MGMT_STATUS_INVALID_PARAMS);
1944 /* Bluetooth single mode LE only controllers or dual-mode
1945 * controllers configured as LE only devices, do not allow
1946 * switching LE off. These have either LE enabled explicitly
1947 * or BR/EDR has been previously switched off.
1949 * When trying to enable an already enabled LE, then gracefully
1950 * send a positive response. Trying to disable it however will
1951 * result into rejection.
1953 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1954 if (cp->val == 0x01)
1955 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1958 MGMT_STATUS_REJECTED);
1964 enabled = lmp_host_le_capable(hdev);
1967 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1969 if (!hdev_is_powered(hdev) || val == enabled) {
1970 bool changed = false;
1972 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1973 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1977 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1978 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1982 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1987 err = new_settings(hdev, sk);
1992 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1993 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1999 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2005 hci_req_init(&req, hdev);
2007 memset(&hci_cp, 0, sizeof(hci_cp));
2011 hci_cp.simul = 0x00;
2013 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2014 __hci_req_disable_advertising(&req);
2016 if (ext_adv_capable(hdev))
2017 __hci_req_clear_ext_adv_sets(&req);
2020 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2023 err = hci_req_run(&req, le_enable_complete);
2025 mgmt_pending_remove(cmd);
2028 hci_dev_unlock(hdev);
2032 /* This is a helper function to test for pending mgmt commands that can
2033 * cause CoD or EIR HCI commands. We can only allow one such pending
2034 * mgmt command at a time since otherwise we cannot easily track what
2035 * the current values are, will be, and based on that calculate if a new
2036 * HCI command needs to be sent and if yes with what value.
2038 static bool pending_eir_or_class(struct hci_dev *hdev)
2040 struct mgmt_pending_cmd *cmd;
2042 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2043 switch (cmd->opcode) {
2044 case MGMT_OP_ADD_UUID:
2045 case MGMT_OP_REMOVE_UUID:
2046 case MGMT_OP_SET_DEV_CLASS:
2047 case MGMT_OP_SET_POWERED:
2055 static const u8 bluetooth_base_uuid[] = {
2056 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2057 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2060 static u8 get_uuid_size(const u8 *uuid)
2064 if (memcmp(uuid, bluetooth_base_uuid, 12))
2067 val = get_unaligned_le32(&uuid[12]);
2074 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2076 struct mgmt_pending_cmd *cmd;
2080 cmd = pending_find(mgmt_op, hdev);
2084 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2085 mgmt_status(status), hdev->dev_class, 3);
2087 mgmt_pending_remove(cmd);
2090 hci_dev_unlock(hdev);
2093 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2095 bt_dev_dbg(hdev, "status 0x%02x", status);
2097 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2100 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2102 struct mgmt_cp_add_uuid *cp = data;
2103 struct mgmt_pending_cmd *cmd;
2104 struct hci_request req;
2105 struct bt_uuid *uuid;
2108 bt_dev_dbg(hdev, "sock %p", sk);
2112 if (pending_eir_or_class(hdev)) {
2113 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2118 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2124 memcpy(uuid->uuid, cp->uuid, 16);
2125 uuid->svc_hint = cp->svc_hint;
2126 uuid->size = get_uuid_size(cp->uuid);
2128 list_add_tail(&uuid->list, &hdev->uuids);
2130 hci_req_init(&req, hdev);
2132 __hci_req_update_class(&req);
2133 __hci_req_update_eir(&req);
2135 err = hci_req_run(&req, add_uuid_complete);
2137 if (err != -ENODATA)
2140 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2141 hdev->dev_class, 3);
2145 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2154 hci_dev_unlock(hdev);
2158 static bool enable_service_cache(struct hci_dev *hdev)
2160 if (!hdev_is_powered(hdev))
2163 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2164 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2172 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2174 bt_dev_dbg(hdev, "status 0x%02x", status);
2176 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2179 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2182 struct mgmt_cp_remove_uuid *cp = data;
2183 struct mgmt_pending_cmd *cmd;
2184 struct bt_uuid *match, *tmp;
2185 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2186 struct hci_request req;
2189 bt_dev_dbg(hdev, "sock %p", sk);
2193 if (pending_eir_or_class(hdev)) {
2194 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2199 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2200 hci_uuids_clear(hdev);
2202 if (enable_service_cache(hdev)) {
2203 err = mgmt_cmd_complete(sk, hdev->id,
2204 MGMT_OP_REMOVE_UUID,
2205 0, hdev->dev_class, 3);
2214 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2215 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2218 list_del(&match->list);
2224 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2225 MGMT_STATUS_INVALID_PARAMS);
2230 hci_req_init(&req, hdev);
2232 __hci_req_update_class(&req);
2233 __hci_req_update_eir(&req);
2235 err = hci_req_run(&req, remove_uuid_complete);
2237 if (err != -ENODATA)
2240 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2241 hdev->dev_class, 3);
2245 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2254 hci_dev_unlock(hdev);
2258 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2260 bt_dev_dbg(hdev, "status 0x%02x", status);
2262 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2265 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2268 struct mgmt_cp_set_dev_class *cp = data;
2269 struct mgmt_pending_cmd *cmd;
2270 struct hci_request req;
2273 bt_dev_dbg(hdev, "sock %p", sk);
2275 if (!lmp_bredr_capable(hdev))
2276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2277 MGMT_STATUS_NOT_SUPPORTED);
2281 if (pending_eir_or_class(hdev)) {
2282 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2288 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2289 MGMT_STATUS_INVALID_PARAMS);
2293 hdev->major_class = cp->major;
2294 hdev->minor_class = cp->minor;
2296 if (!hdev_is_powered(hdev)) {
2297 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2298 hdev->dev_class, 3);
2302 hci_req_init(&req, hdev);
2304 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2305 hci_dev_unlock(hdev);
2306 cancel_delayed_work_sync(&hdev->service_cache);
2308 __hci_req_update_eir(&req);
2311 __hci_req_update_class(&req);
2313 err = hci_req_run(&req, set_class_complete);
2315 if (err != -ENODATA)
2318 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2319 hdev->dev_class, 3);
2323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2332 hci_dev_unlock(hdev);
2336 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2339 struct mgmt_cp_load_link_keys *cp = data;
2340 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2341 sizeof(struct mgmt_link_key_info));
2342 u16 key_count, expected_len;
2346 bt_dev_dbg(hdev, "sock %p", sk);
2348 if (!lmp_bredr_capable(hdev))
2349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2350 MGMT_STATUS_NOT_SUPPORTED);
2352 key_count = __le16_to_cpu(cp->key_count);
2353 if (key_count > max_key_count) {
2354 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 MGMT_STATUS_INVALID_PARAMS);
2360 expected_len = struct_size(cp, keys, key_count);
2361 if (expected_len != len) {
2362 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2365 MGMT_STATUS_INVALID_PARAMS);
2368 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2369 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2370 MGMT_STATUS_INVALID_PARAMS);
2372 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2375 for (i = 0; i < key_count; i++) {
2376 struct mgmt_link_key_info *key = &cp->keys[i];
2378 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2379 return mgmt_cmd_status(sk, hdev->id,
2380 MGMT_OP_LOAD_LINK_KEYS,
2381 MGMT_STATUS_INVALID_PARAMS);
2386 hci_link_keys_clear(hdev);
2389 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2391 changed = hci_dev_test_and_clear_flag(hdev,
2392 HCI_KEEP_DEBUG_KEYS);
2395 new_settings(hdev, NULL);
2397 for (i = 0; i < key_count; i++) {
2398 struct mgmt_link_key_info *key = &cp->keys[i];
2400 if (hci_is_blocked_key(hdev,
2401 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2403 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2408 /* Always ignore debug keys and require a new pairing if
2409 * the user wants to use them.
2411 if (key->type == HCI_LK_DEBUG_COMBINATION)
2414 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2415 key->type, key->pin_len, NULL);
2418 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2420 hci_dev_unlock(hdev);
2425 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2426 u8 addr_type, struct sock *skip_sk)
2428 struct mgmt_ev_device_unpaired ev;
2430 bacpy(&ev.addr.bdaddr, bdaddr);
2431 ev.addr.type = addr_type;
2433 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2437 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2440 struct mgmt_cp_unpair_device *cp = data;
2441 struct mgmt_rp_unpair_device rp;
2442 struct hci_conn_params *params;
2443 struct mgmt_pending_cmd *cmd;
2444 struct hci_conn *conn;
2448 memset(&rp, 0, sizeof(rp));
2449 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2450 rp.addr.type = cp->addr.type;
2452 if (!bdaddr_type_is_valid(cp->addr.type))
2453 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2454 MGMT_STATUS_INVALID_PARAMS,
2457 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2458 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2459 MGMT_STATUS_INVALID_PARAMS,
2464 if (!hdev_is_powered(hdev)) {
2465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 MGMT_STATUS_NOT_POWERED, &rp,
2471 if (cp->addr.type == BDADDR_BREDR) {
2472 /* If disconnection is requested, then look up the
2473 * connection. If the remote device is connected, it
2474 * will be later used to terminate the link.
2476 * Setting it to NULL explicitly will cause no
2477 * termination of the link.
2480 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2485 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2487 err = mgmt_cmd_complete(sk, hdev->id,
2488 MGMT_OP_UNPAIR_DEVICE,
2489 MGMT_STATUS_NOT_PAIRED, &rp,
2497 /* LE address type */
2498 addr_type = le_addr_type(cp->addr.type);
2500 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2501 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2503 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2504 MGMT_STATUS_NOT_PAIRED, &rp,
2509 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2511 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2516 /* Defer clearing up the connection parameters until closing to
2517 * give a chance of keeping them if a repairing happens.
2519 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2521 /* Disable auto-connection parameters if present */
2522 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2524 if (params->explicit_connect)
2525 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2527 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2530 /* If disconnection is not requested, then clear the connection
2531 * variable so that the link is not terminated.
2533 if (!cp->disconnect)
2537 /* If the connection variable is set, then termination of the
2538 * link is requested.
2541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2543 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2547 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2554 cmd->cmd_complete = addr_cmd_complete;
2556 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2558 mgmt_pending_remove(cmd);
2561 hci_dev_unlock(hdev);
2565 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2568 struct mgmt_cp_disconnect *cp = data;
2569 struct mgmt_rp_disconnect rp;
2570 struct mgmt_pending_cmd *cmd;
2571 struct hci_conn *conn;
2574 bt_dev_dbg(hdev, "sock %p", sk);
2576 memset(&rp, 0, sizeof(rp));
2577 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2578 rp.addr.type = cp->addr.type;
2580 if (!bdaddr_type_is_valid(cp->addr.type))
2581 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2582 MGMT_STATUS_INVALID_PARAMS,
2587 if (!test_bit(HCI_UP, &hdev->flags)) {
2588 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 MGMT_STATUS_NOT_POWERED, &rp,
2594 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2595 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2600 if (cp->addr.type == BDADDR_BREDR)
2601 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2604 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2605 le_addr_type(cp->addr.type));
2607 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2608 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2609 MGMT_STATUS_NOT_CONNECTED, &rp,
2614 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2620 cmd->cmd_complete = generic_cmd_complete;
2622 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2624 mgmt_pending_remove(cmd);
2627 hci_dev_unlock(hdev);
2631 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2633 switch (link_type) {
2635 switch (addr_type) {
2636 case ADDR_LE_DEV_PUBLIC:
2637 return BDADDR_LE_PUBLIC;
2640 /* Fallback to LE Random address type */
2641 return BDADDR_LE_RANDOM;
2645 /* Fallback to BR/EDR type */
2646 return BDADDR_BREDR;
2650 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2653 struct mgmt_rp_get_connections *rp;
2658 bt_dev_dbg(hdev, "sock %p", sk);
2662 if (!hdev_is_powered(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2664 MGMT_STATUS_NOT_POWERED);
2669 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2670 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2674 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2681 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2682 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2684 bacpy(&rp->addr[i].bdaddr, &c->dst);
2685 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2686 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2691 rp->conn_count = cpu_to_le16(i);
2693 /* Recalculate length in case of filtered SCO connections, etc */
2694 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2695 struct_size(rp, addr, i));
2700 hci_dev_unlock(hdev);
2704 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2705 struct mgmt_cp_pin_code_neg_reply *cp)
2707 struct mgmt_pending_cmd *cmd;
2710 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2715 cmd->cmd_complete = addr_cmd_complete;
2717 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2718 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2720 mgmt_pending_remove(cmd);
2725 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2728 struct hci_conn *conn;
2729 struct mgmt_cp_pin_code_reply *cp = data;
2730 struct hci_cp_pin_code_reply reply;
2731 struct mgmt_pending_cmd *cmd;
2734 bt_dev_dbg(hdev, "sock %p", sk);
2738 if (!hdev_is_powered(hdev)) {
2739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2740 MGMT_STATUS_NOT_POWERED);
2744 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2746 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 MGMT_STATUS_NOT_CONNECTED);
2751 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2752 struct mgmt_cp_pin_code_neg_reply ncp;
2754 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2756 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2758 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2760 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2761 MGMT_STATUS_INVALID_PARAMS);
2766 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2772 cmd->cmd_complete = addr_cmd_complete;
2774 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2775 reply.pin_len = cp->pin_len;
2776 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2778 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2780 mgmt_pending_remove(cmd);
2783 hci_dev_unlock(hdev);
2787 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2790 struct mgmt_cp_set_io_capability *cp = data;
2792 bt_dev_dbg(hdev, "sock %p", sk);
2794 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2796 MGMT_STATUS_INVALID_PARAMS);
2800 hdev->io_capability = cp->io_capability;
2802 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2804 hci_dev_unlock(hdev);
2806 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2810 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2812 struct hci_dev *hdev = conn->hdev;
2813 struct mgmt_pending_cmd *cmd;
2815 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2816 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2819 if (cmd->user_data != conn)
2828 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2830 struct mgmt_rp_pair_device rp;
2831 struct hci_conn *conn = cmd->user_data;
2834 bacpy(&rp.addr.bdaddr, &conn->dst);
2835 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2837 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2838 status, &rp, sizeof(rp));
2840 /* So we don't get further callbacks for this connection */
2841 conn->connect_cfm_cb = NULL;
2842 conn->security_cfm_cb = NULL;
2843 conn->disconn_cfm_cb = NULL;
2845 hci_conn_drop(conn);
2847 /* The device is paired so there is no need to remove
2848 * its connection parameters anymore.
2850 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2857 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2859 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2860 struct mgmt_pending_cmd *cmd;
2862 cmd = find_pairing(conn);
2864 cmd->cmd_complete(cmd, status);
2865 mgmt_pending_remove(cmd);
2869 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2871 struct mgmt_pending_cmd *cmd;
2873 BT_DBG("status %u", status);
2875 cmd = find_pairing(conn);
2877 BT_DBG("Unable to find a pending command");
2881 cmd->cmd_complete(cmd, mgmt_status(status));
2882 mgmt_pending_remove(cmd);
2885 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2887 struct mgmt_pending_cmd *cmd;
2889 BT_DBG("status %u", status);
2894 cmd = find_pairing(conn);
2896 BT_DBG("Unable to find a pending command");
2900 cmd->cmd_complete(cmd, mgmt_status(status));
2901 mgmt_pending_remove(cmd);
2904 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2907 struct mgmt_cp_pair_device *cp = data;
2908 struct mgmt_rp_pair_device rp;
2909 struct mgmt_pending_cmd *cmd;
2910 u8 sec_level, auth_type;
2911 struct hci_conn *conn;
2914 bt_dev_dbg(hdev, "sock %p", sk);
2916 memset(&rp, 0, sizeof(rp));
2917 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2918 rp.addr.type = cp->addr.type;
2920 if (!bdaddr_type_is_valid(cp->addr.type))
2921 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2922 MGMT_STATUS_INVALID_PARAMS,
2925 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2926 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2927 MGMT_STATUS_INVALID_PARAMS,
2932 if (!hdev_is_powered(hdev)) {
2933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 MGMT_STATUS_NOT_POWERED, &rp,
2939 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_ALREADY_PAIRED, &rp,
2946 sec_level = BT_SECURITY_MEDIUM;
2947 auth_type = HCI_AT_DEDICATED_BONDING;
2949 if (cp->addr.type == BDADDR_BREDR) {
2950 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2951 auth_type, CONN_REASON_PAIR_DEVICE);
2953 u8 addr_type = le_addr_type(cp->addr.type);
2954 struct hci_conn_params *p;
2956 /* When pairing a new device, it is expected to remember
2957 * this device for future connections. Adding the connection
2958 * parameter information ahead of time allows tracking
2959 * of the slave preferred values and will speed up any
2960 * further connection establishment.
2962 * If connection parameters already exist, then they
2963 * will be kept and this function does nothing.
2965 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2967 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2968 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2970 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2971 sec_level, HCI_LE_CONN_TIMEOUT,
2972 CONN_REASON_PAIR_DEVICE);
2978 if (PTR_ERR(conn) == -EBUSY)
2979 status = MGMT_STATUS_BUSY;
2980 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2981 status = MGMT_STATUS_NOT_SUPPORTED;
2982 else if (PTR_ERR(conn) == -ECONNREFUSED)
2983 status = MGMT_STATUS_REJECTED;
2985 status = MGMT_STATUS_CONNECT_FAILED;
2987 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2988 status, &rp, sizeof(rp));
2992 if (conn->connect_cfm_cb) {
2993 hci_conn_drop(conn);
2994 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2999 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3002 hci_conn_drop(conn);
3006 cmd->cmd_complete = pairing_complete;
3008 /* For LE, just connecting isn't a proof that the pairing finished */
3009 if (cp->addr.type == BDADDR_BREDR) {
3010 conn->connect_cfm_cb = pairing_complete_cb;
3011 conn->security_cfm_cb = pairing_complete_cb;
3012 conn->disconn_cfm_cb = pairing_complete_cb;
3014 conn->connect_cfm_cb = le_pairing_complete_cb;
3015 conn->security_cfm_cb = le_pairing_complete_cb;
3016 conn->disconn_cfm_cb = le_pairing_complete_cb;
3019 conn->io_capability = cp->io_cap;
3020 cmd->user_data = hci_conn_get(conn);
3022 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3023 hci_conn_security(conn, sec_level, auth_type, true)) {
3024 cmd->cmd_complete(cmd, 0);
3025 mgmt_pending_remove(cmd);
3031 hci_dev_unlock(hdev);
3035 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3038 struct mgmt_addr_info *addr = data;
3039 struct mgmt_pending_cmd *cmd;
3040 struct hci_conn *conn;
3043 bt_dev_dbg(hdev, "sock %p", sk);
3047 if (!hdev_is_powered(hdev)) {
3048 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3049 MGMT_STATUS_NOT_POWERED);
3053 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 MGMT_STATUS_INVALID_PARAMS);
3060 conn = cmd->user_data;
3062 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3063 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3064 MGMT_STATUS_INVALID_PARAMS);
3068 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3069 mgmt_pending_remove(cmd);
3071 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3072 addr, sizeof(*addr));
3074 /* Since user doesn't want to proceed with the connection, abort any
3075 * ongoing pairing and then terminate the link if it was created
3076 * because of the pair device action.
3078 if (addr->type == BDADDR_BREDR)
3079 hci_remove_link_key(hdev, &addr->bdaddr);
3081 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3082 le_addr_type(addr->type));
3084 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3085 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3088 hci_dev_unlock(hdev);
3092 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3093 struct mgmt_addr_info *addr, u16 mgmt_op,
3094 u16 hci_op, __le32 passkey)
3096 struct mgmt_pending_cmd *cmd;
3097 struct hci_conn *conn;
3102 if (!hdev_is_powered(hdev)) {
3103 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3104 MGMT_STATUS_NOT_POWERED, addr,
3109 if (addr->type == BDADDR_BREDR)
3110 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3112 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3113 le_addr_type(addr->type));
3116 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3117 MGMT_STATUS_NOT_CONNECTED, addr,
3122 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3123 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3125 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3126 MGMT_STATUS_SUCCESS, addr,
3129 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3130 MGMT_STATUS_FAILED, addr,
3136 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3142 cmd->cmd_complete = addr_cmd_complete;
3144 /* Continue with pairing via HCI */
3145 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3146 struct hci_cp_user_passkey_reply cp;
3148 bacpy(&cp.bdaddr, &addr->bdaddr);
3149 cp.passkey = passkey;
3150 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3152 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3156 mgmt_pending_remove(cmd);
3159 hci_dev_unlock(hdev);
3163 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3164 void *data, u16 len)
3166 struct mgmt_cp_pin_code_neg_reply *cp = data;
3168 bt_dev_dbg(hdev, "sock %p", sk);
3170 return user_pairing_resp(sk, hdev, &cp->addr,
3171 MGMT_OP_PIN_CODE_NEG_REPLY,
3172 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3175 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3178 struct mgmt_cp_user_confirm_reply *cp = data;
3180 bt_dev_dbg(hdev, "sock %p", sk);
3182 if (len != sizeof(*cp))
3183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3184 MGMT_STATUS_INVALID_PARAMS);
3186 return user_pairing_resp(sk, hdev, &cp->addr,
3187 MGMT_OP_USER_CONFIRM_REPLY,
3188 HCI_OP_USER_CONFIRM_REPLY, 0);
3191 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3192 void *data, u16 len)
3194 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3196 bt_dev_dbg(hdev, "sock %p", sk);
3198 return user_pairing_resp(sk, hdev, &cp->addr,
3199 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3200 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3203 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3206 struct mgmt_cp_user_passkey_reply *cp = data;
3208 bt_dev_dbg(hdev, "sock %p", sk);
3210 return user_pairing_resp(sk, hdev, &cp->addr,
3211 MGMT_OP_USER_PASSKEY_REPLY,
3212 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3215 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3216 void *data, u16 len)
3218 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3220 bt_dev_dbg(hdev, "sock %p", sk);
3222 return user_pairing_resp(sk, hdev, &cp->addr,
3223 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3224 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3227 static void adv_expire(struct hci_dev *hdev, u32 flags)
3229 struct adv_info *adv_instance;
3230 struct hci_request req;
3233 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3237 /* stop if current instance doesn't need to be changed */
3238 if (!(adv_instance->flags & flags))
3241 cancel_adv_timeout(hdev);
3243 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3247 hci_req_init(&req, hdev);
3248 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3253 hci_req_run(&req, NULL);
3256 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3258 struct mgmt_cp_set_local_name *cp;
3259 struct mgmt_pending_cmd *cmd;
3261 bt_dev_dbg(hdev, "status 0x%02x", status);
3265 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3272 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3273 mgmt_status(status));
3275 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3278 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3279 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3282 mgmt_pending_remove(cmd);
3285 hci_dev_unlock(hdev);
3288 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3291 struct mgmt_cp_set_local_name *cp = data;
3292 struct mgmt_pending_cmd *cmd;
3293 struct hci_request req;
3296 bt_dev_dbg(hdev, "sock %p", sk);
3300 /* If the old values are the same as the new ones just return a
3301 * direct command complete event.
3303 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3304 !memcmp(hdev->short_name, cp->short_name,
3305 sizeof(hdev->short_name))) {
3306 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3311 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3313 if (!hdev_is_powered(hdev)) {
3314 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3321 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3322 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3323 ext_info_changed(hdev, sk);
3328 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3334 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3336 hci_req_init(&req, hdev);
3338 if (lmp_bredr_capable(hdev)) {
3339 __hci_req_update_name(&req);
3340 __hci_req_update_eir(&req);
3343 /* The name is stored in the scan response data and so
3344 * no need to udpate the advertising data here.
3346 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3347 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3349 err = hci_req_run(&req, set_name_complete);
3351 mgmt_pending_remove(cmd);
3354 hci_dev_unlock(hdev);
3358 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3361 struct mgmt_cp_set_appearance *cp = data;
3365 bt_dev_dbg(hdev, "sock %p", sk);
3367 if (!lmp_le_capable(hdev))
3368 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3369 MGMT_STATUS_NOT_SUPPORTED);
3371 appearance = le16_to_cpu(cp->appearance);
3375 if (hdev->appearance != appearance) {
3376 hdev->appearance = appearance;
3378 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3379 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3381 ext_info_changed(hdev, sk);
3384 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3387 hci_dev_unlock(hdev);
3392 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3393 void *data, u16 len)
3395 struct mgmt_rp_get_phy_configuration rp;
3397 bt_dev_dbg(hdev, "sock %p", sk);
3401 memset(&rp, 0, sizeof(rp));
3403 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3404 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3405 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3407 hci_dev_unlock(hdev);
3409 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3413 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3415 struct mgmt_ev_phy_configuration_changed ev;
3417 memset(&ev, 0, sizeof(ev));
3419 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3421 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3425 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3426 u16 opcode, struct sk_buff *skb)
3428 struct mgmt_pending_cmd *cmd;
3430 bt_dev_dbg(hdev, "status 0x%02x", status);
3434 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3439 mgmt_cmd_status(cmd->sk, hdev->id,
3440 MGMT_OP_SET_PHY_CONFIGURATION,
3441 mgmt_status(status));
3443 mgmt_cmd_complete(cmd->sk, hdev->id,
3444 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3447 mgmt_phy_configuration_changed(hdev, cmd->sk);
3450 mgmt_pending_remove(cmd);
3453 hci_dev_unlock(hdev);
3456 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3457 void *data, u16 len)
3459 struct mgmt_cp_set_phy_configuration *cp = data;
3460 struct hci_cp_le_set_default_phy cp_phy;
3461 struct mgmt_pending_cmd *cmd;
3462 struct hci_request req;
3463 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3464 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3465 bool changed = false;
3468 bt_dev_dbg(hdev, "sock %p", sk);
3470 configurable_phys = get_configurable_phys(hdev);
3471 supported_phys = get_supported_phys(hdev);
3472 selected_phys = __le32_to_cpu(cp->selected_phys);
3474 if (selected_phys & ~supported_phys)
3475 return mgmt_cmd_status(sk, hdev->id,
3476 MGMT_OP_SET_PHY_CONFIGURATION,
3477 MGMT_STATUS_INVALID_PARAMS);
3479 unconfigure_phys = supported_phys & ~configurable_phys;
3481 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3482 return mgmt_cmd_status(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3484 MGMT_STATUS_INVALID_PARAMS);
3486 if (selected_phys == get_selected_phys(hdev))
3487 return mgmt_cmd_complete(sk, hdev->id,
3488 MGMT_OP_SET_PHY_CONFIGURATION,
3493 if (!hdev_is_powered(hdev)) {
3494 err = mgmt_cmd_status(sk, hdev->id,
3495 MGMT_OP_SET_PHY_CONFIGURATION,
3496 MGMT_STATUS_REJECTED);
3500 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3501 err = mgmt_cmd_status(sk, hdev->id,
3502 MGMT_OP_SET_PHY_CONFIGURATION,
3507 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3508 pkt_type |= (HCI_DH3 | HCI_DM3);
3510 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3512 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3513 pkt_type |= (HCI_DH5 | HCI_DM5);
3515 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3517 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3518 pkt_type &= ~HCI_2DH1;
3520 pkt_type |= HCI_2DH1;
3522 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3523 pkt_type &= ~HCI_2DH3;
3525 pkt_type |= HCI_2DH3;
3527 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3528 pkt_type &= ~HCI_2DH5;
3530 pkt_type |= HCI_2DH5;
3532 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3533 pkt_type &= ~HCI_3DH1;
3535 pkt_type |= HCI_3DH1;
3537 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3538 pkt_type &= ~HCI_3DH3;
3540 pkt_type |= HCI_3DH3;
3542 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3543 pkt_type &= ~HCI_3DH5;
3545 pkt_type |= HCI_3DH5;
3547 if (pkt_type != hdev->pkt_type) {
3548 hdev->pkt_type = pkt_type;
3552 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3553 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3555 mgmt_phy_configuration_changed(hdev, sk);
3557 err = mgmt_cmd_complete(sk, hdev->id,
3558 MGMT_OP_SET_PHY_CONFIGURATION,
3564 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3571 hci_req_init(&req, hdev);
3573 memset(&cp_phy, 0, sizeof(cp_phy));
3575 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3576 cp_phy.all_phys |= 0x01;
3578 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3579 cp_phy.all_phys |= 0x02;
3581 if (selected_phys & MGMT_PHY_LE_1M_TX)
3582 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3584 if (selected_phys & MGMT_PHY_LE_2M_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3587 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3588 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3590 if (selected_phys & MGMT_PHY_LE_1M_RX)
3591 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3593 if (selected_phys & MGMT_PHY_LE_2M_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3596 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3597 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3599 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3601 err = hci_req_run_skb(&req, set_default_phy_complete);
3603 mgmt_pending_remove(cmd);
3606 hci_dev_unlock(hdev);
3611 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3614 int err = MGMT_STATUS_SUCCESS;
3615 struct mgmt_cp_set_blocked_keys *keys = data;
3616 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3617 sizeof(struct mgmt_blocked_key_info));
3618 u16 key_count, expected_len;
3621 bt_dev_dbg(hdev, "sock %p", sk);
3623 key_count = __le16_to_cpu(keys->key_count);
3624 if (key_count > max_key_count) {
3625 bt_dev_err(hdev, "too big key_count value %u", key_count);
3626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3627 MGMT_STATUS_INVALID_PARAMS);
3630 expected_len = struct_size(keys, keys, key_count);
3631 if (expected_len != len) {
3632 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3635 MGMT_STATUS_INVALID_PARAMS);
3640 hci_blocked_keys_clear(hdev);
3642 for (i = 0; i < keys->key_count; ++i) {
3643 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3646 err = MGMT_STATUS_NO_RESOURCES;
3650 b->type = keys->keys[i].type;
3651 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3652 list_add_rcu(&b->list, &hdev->blocked_keys);
3654 hci_dev_unlock(hdev);
3656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3660 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3661 void *data, u16 len)
3663 struct mgmt_mode *cp = data;
3665 bool changed = false;
3667 bt_dev_dbg(hdev, "sock %p", sk);
3669 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3670 return mgmt_cmd_status(sk, hdev->id,
3671 MGMT_OP_SET_WIDEBAND_SPEECH,
3672 MGMT_STATUS_NOT_SUPPORTED);
3674 if (cp->val != 0x00 && cp->val != 0x01)
3675 return mgmt_cmd_status(sk, hdev->id,
3676 MGMT_OP_SET_WIDEBAND_SPEECH,
3677 MGMT_STATUS_INVALID_PARAMS);
3681 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3682 err = mgmt_cmd_status(sk, hdev->id,
3683 MGMT_OP_SET_WIDEBAND_SPEECH,
3688 if (hdev_is_powered(hdev) &&
3689 !!cp->val != hci_dev_test_flag(hdev,
3690 HCI_WIDEBAND_SPEECH_ENABLED)) {
3691 err = mgmt_cmd_status(sk, hdev->id,
3692 MGMT_OP_SET_WIDEBAND_SPEECH,
3693 MGMT_STATUS_REJECTED);
3698 changed = !hci_dev_test_and_set_flag(hdev,
3699 HCI_WIDEBAND_SPEECH_ENABLED);
3701 changed = hci_dev_test_and_clear_flag(hdev,
3702 HCI_WIDEBAND_SPEECH_ENABLED);
3704 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3709 err = new_settings(hdev, sk);
3712 hci_dev_unlock(hdev);
3716 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3717 void *data, u16 data_len)
3720 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3723 u8 tx_power_range[2];
3725 bt_dev_dbg(hdev, "sock %p", sk);
3727 memset(&buf, 0, sizeof(buf));
3731 /* When the Read Simple Pairing Options command is supported, then
3732 * the remote public key validation is supported.
3734 * Alternatively, when Microsoft extensions are available, they can
3735 * indicate support for public key validation as well.
3737 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3738 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3740 flags |= 0x02; /* Remote public key validation (LE) */
3742 /* When the Read Encryption Key Size command is supported, then the
3743 * encryption key size is enforced.
3745 if (hdev->commands[20] & 0x10)
3746 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3748 flags |= 0x08; /* Encryption key size enforcement (LE) */
3750 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3753 /* When the Read Simple Pairing Options command is supported, then
3754 * also max encryption key size information is provided.
3756 if (hdev->commands[41] & 0x08)
3757 cap_len = eir_append_le16(rp->cap, cap_len,
3758 MGMT_CAP_MAX_ENC_KEY_SIZE,
3759 hdev->max_enc_key_size);
3761 cap_len = eir_append_le16(rp->cap, cap_len,
3762 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3763 SMP_MAX_ENC_KEY_SIZE);
3765 /* Append the min/max LE tx power parameters if we were able to fetch
3766 * it from the controller
3768 if (hdev->commands[38] & 0x80) {
3769 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3770 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3771 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3775 rp->cap_len = cpu_to_le16(cap_len);
3777 hci_dev_unlock(hdev);
3779 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3780 rp, sizeof(*rp) + cap_len);
3783 #ifdef CONFIG_BT_FEATURE_DEBUG
3784 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3785 static const u8 debug_uuid[16] = {
3786 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3787 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3791 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3792 static const u8 simult_central_periph_uuid[16] = {
3793 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3794 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3797 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3798 static const u8 rpa_resolution_uuid[16] = {
3799 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3800 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3803 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3804 void *data, u16 data_len)
3806 char buf[62]; /* Enough space for 3 features */
3807 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3811 bt_dev_dbg(hdev, "sock %p", sk);
3813 memset(&buf, 0, sizeof(buf));
3815 #ifdef CONFIG_BT_FEATURE_DEBUG
3817 flags = bt_dbg_get() ? BIT(0) : 0;
3819 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3820 rp->features[idx].flags = cpu_to_le32(flags);
3826 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3827 (hdev->le_states[4] & 0x08) && /* Central */
3828 (hdev->le_states[4] & 0x40) && /* Peripheral */
3829 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3834 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3835 rp->features[idx].flags = cpu_to_le32(flags);
3839 if (hdev && use_ll_privacy(hdev)) {
3840 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3841 flags = BIT(0) | BIT(1);
3845 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3846 rp->features[idx].flags = cpu_to_le32(flags);
3850 rp->feature_count = cpu_to_le16(idx);
3852 /* After reading the experimental features information, enable
3853 * the events to update client on any future change.
3855 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3857 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3858 MGMT_OP_READ_EXP_FEATURES_INFO,
3859 0, rp, sizeof(*rp) + (20 * idx));
3862 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3865 struct mgmt_ev_exp_feature_changed ev;
3867 memset(&ev, 0, sizeof(ev));
3868 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3869 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3871 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3873 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3877 #ifdef CONFIG_BT_FEATURE_DEBUG
3878 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3880 struct mgmt_ev_exp_feature_changed ev;
3882 memset(&ev, 0, sizeof(ev));
3883 memcpy(ev.uuid, debug_uuid, 16);
3884 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3886 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3888 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3892 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3893 void *data, u16 data_len)
3895 struct mgmt_cp_set_exp_feature *cp = data;
3896 struct mgmt_rp_set_exp_feature rp;
3898 bt_dev_dbg(hdev, "sock %p", sk);
3900 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3901 memset(rp.uuid, 0, 16);
3902 rp.flags = cpu_to_le32(0);
3904 #ifdef CONFIG_BT_FEATURE_DEBUG
3906 bool changed = bt_dbg_get();
3911 exp_debug_feature_changed(false, sk);
3915 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3916 bool changed = hci_dev_test_flag(hdev,
3917 HCI_ENABLE_LL_PRIVACY);
3919 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3922 exp_ll_privacy_feature_changed(false, hdev, sk);
3925 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3927 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3928 MGMT_OP_SET_EXP_FEATURE, 0,
3932 #ifdef CONFIG_BT_FEATURE_DEBUG
3933 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3937 /* Command requires to use the non-controller index */
3939 return mgmt_cmd_status(sk, hdev->id,
3940 MGMT_OP_SET_EXP_FEATURE,
3941 MGMT_STATUS_INVALID_INDEX);
3943 /* Parameters are limited to a single octet */
3944 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3945 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3946 MGMT_OP_SET_EXP_FEATURE,
3947 MGMT_STATUS_INVALID_PARAMS);
3949 /* Only boolean on/off is supported */
3950 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3951 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3952 MGMT_OP_SET_EXP_FEATURE,
3953 MGMT_STATUS_INVALID_PARAMS);
3955 val = !!cp->param[0];
3956 changed = val ? !bt_dbg_get() : bt_dbg_get();
3959 memcpy(rp.uuid, debug_uuid, 16);
3960 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3962 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3964 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3965 MGMT_OP_SET_EXP_FEATURE, 0,
3969 exp_debug_feature_changed(val, sk);
3975 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3980 /* Command requires to use the controller index */
3982 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3983 MGMT_OP_SET_EXP_FEATURE,
3984 MGMT_STATUS_INVALID_INDEX);
3986 /* Changes can only be made when controller is powered down */
3987 if (hdev_is_powered(hdev))
3988 return mgmt_cmd_status(sk, hdev->id,
3989 MGMT_OP_SET_EXP_FEATURE,
3990 MGMT_STATUS_REJECTED);
3992 /* Parameters are limited to a single octet */
3993 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3994 return mgmt_cmd_status(sk, hdev->id,
3995 MGMT_OP_SET_EXP_FEATURE,
3996 MGMT_STATUS_INVALID_PARAMS);
3998 /* Only boolean on/off is supported */
3999 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4000 return mgmt_cmd_status(sk, hdev->id,
4001 MGMT_OP_SET_EXP_FEATURE,
4002 MGMT_STATUS_INVALID_PARAMS);
4004 val = !!cp->param[0];
4007 changed = !hci_dev_test_flag(hdev,
4008 HCI_ENABLE_LL_PRIVACY);
4009 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4010 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4012 /* Enable LL privacy + supported settings changed */
4013 flags = BIT(0) | BIT(1);
4015 changed = hci_dev_test_flag(hdev,
4016 HCI_ENABLE_LL_PRIVACY);
4017 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4019 /* Disable LL privacy + supported settings changed */
4023 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4024 rp.flags = cpu_to_le32(flags);
4026 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4028 err = mgmt_cmd_complete(sk, hdev->id,
4029 MGMT_OP_SET_EXP_FEATURE, 0,
4033 exp_ll_privacy_feature_changed(val, hdev, sk);
4038 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4039 MGMT_OP_SET_EXP_FEATURE,
4040 MGMT_STATUS_NOT_SUPPORTED);
4043 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4045 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4048 struct mgmt_cp_get_device_flags *cp = data;
4049 struct mgmt_rp_get_device_flags rp;
4050 struct bdaddr_list_with_flags *br_params;
4051 struct hci_conn_params *params;
4052 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4053 u32 current_flags = 0;
4054 u8 status = MGMT_STATUS_INVALID_PARAMS;
4056 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4057 &cp->addr.bdaddr, cp->addr.type);
4061 if (cp->addr.type == BDADDR_BREDR) {
4062 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4068 current_flags = br_params->current_flags;
4070 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4071 le_addr_type(cp->addr.type));
4076 current_flags = params->current_flags;
4079 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4080 rp.addr.type = cp->addr.type;
4081 rp.supported_flags = cpu_to_le32(supported_flags);
4082 rp.current_flags = cpu_to_le32(current_flags);
4084 status = MGMT_STATUS_SUCCESS;
4087 hci_dev_unlock(hdev);
4089 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4093 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4094 bdaddr_t *bdaddr, u8 bdaddr_type,
4095 u32 supported_flags, u32 current_flags)
4097 struct mgmt_ev_device_flags_changed ev;
4099 bacpy(&ev.addr.bdaddr, bdaddr);
4100 ev.addr.type = bdaddr_type;
4101 ev.supported_flags = cpu_to_le32(supported_flags);
4102 ev.current_flags = cpu_to_le32(current_flags);
4104 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4107 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4110 struct mgmt_cp_set_device_flags *cp = data;
4111 struct bdaddr_list_with_flags *br_params;
4112 struct hci_conn_params *params;
4113 u8 status = MGMT_STATUS_INVALID_PARAMS;
4114 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4115 u32 current_flags = __le32_to_cpu(cp->current_flags);
4117 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4118 &cp->addr.bdaddr, cp->addr.type,
4119 __le32_to_cpu(current_flags));
4121 if ((supported_flags | current_flags) != supported_flags) {
4122 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4123 current_flags, supported_flags);
4129 if (cp->addr.type == BDADDR_BREDR) {
4130 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4135 br_params->current_flags = current_flags;
4136 status = MGMT_STATUS_SUCCESS;
4138 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4139 &cp->addr.bdaddr, cp->addr.type);
4142 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4143 le_addr_type(cp->addr.type));
4145 params->current_flags = current_flags;
4146 status = MGMT_STATUS_SUCCESS;
4148 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4150 le_addr_type(cp->addr.type));
4155 hci_dev_unlock(hdev);
4157 if (status == MGMT_STATUS_SUCCESS)
4158 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4159 supported_flags, current_flags);
4161 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4162 &cp->addr, sizeof(cp->addr));
4165 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4168 struct mgmt_ev_adv_monitor_added ev;
4170 ev.monitor_handle = cpu_to_le16(handle);
4172 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4175 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4177 struct mgmt_ev_adv_monitor_removed ev;
4178 struct mgmt_pending_cmd *cmd;
4179 struct sock *sk_skip = NULL;
4180 struct mgmt_cp_remove_adv_monitor *cp;
4182 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4186 if (cp->monitor_handle)
4190 ev.monitor_handle = cpu_to_le16(handle);
4192 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4195 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4196 void *data, u16 len)
4198 struct adv_monitor *monitor = NULL;
4199 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4202 __u32 supported = 0;
4204 __u16 num_handles = 0;
4205 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4207 BT_DBG("request for %s", hdev->name);
4211 if (msft_monitor_supported(hdev))
4212 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4214 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4215 handles[num_handles++] = monitor->handle;
4217 hci_dev_unlock(hdev);
4219 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4220 rp = kmalloc(rp_size, GFP_KERNEL);
4224 /* All supported features are currently enabled */
4225 enabled = supported;
4227 rp->supported_features = cpu_to_le32(supported);
4228 rp->enabled_features = cpu_to_le32(enabled);
4229 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4230 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4231 rp->num_handles = cpu_to_le16(num_handles);
4233 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4235 err = mgmt_cmd_complete(sk, hdev->id,
4236 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4237 MGMT_STATUS_SUCCESS, rp, rp_size);
4244 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4246 struct mgmt_rp_add_adv_patterns_monitor rp;
4247 struct mgmt_pending_cmd *cmd;
4248 struct adv_monitor *monitor;
4253 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4255 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4260 monitor = cmd->user_data;
4261 rp.monitor_handle = cpu_to_le16(monitor->handle);
4264 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4265 hdev->adv_monitors_cnt++;
4266 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4267 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4268 hci_update_background_scan(hdev);
4271 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4272 mgmt_status(status), &rp, sizeof(rp));
4273 mgmt_pending_remove(cmd);
4276 hci_dev_unlock(hdev);
4277 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4278 rp.monitor_handle, status);
4283 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4284 struct adv_monitor *m, u8 status,
4285 void *data, u16 len, u16 op)
4287 struct mgmt_rp_add_adv_patterns_monitor rp;
4288 struct mgmt_pending_cmd *cmd;
4297 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4298 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4299 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4300 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4301 status = MGMT_STATUS_BUSY;
4305 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4307 status = MGMT_STATUS_NO_RESOURCES;
4312 pending = hci_add_adv_monitor(hdev, m, &err);
4314 if (err == -ENOSPC || err == -ENOMEM)
4315 status = MGMT_STATUS_NO_RESOURCES;
4316 else if (err == -EINVAL)
4317 status = MGMT_STATUS_INVALID_PARAMS;
4319 status = MGMT_STATUS_FAILED;
4321 mgmt_pending_remove(cmd);
4326 mgmt_pending_remove(cmd);
4327 rp.monitor_handle = cpu_to_le16(m->handle);
4328 mgmt_adv_monitor_added(sk, hdev, m->handle);
4329 m->state = ADV_MONITOR_STATE_REGISTERED;
4330 hdev->adv_monitors_cnt++;
4332 hci_dev_unlock(hdev);
4333 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4337 hci_dev_unlock(hdev);
4342 hci_free_adv_monitor(hdev, m);
4343 hci_dev_unlock(hdev);
4344 return mgmt_cmd_status(sk, hdev->id, op, status);
4347 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4348 struct mgmt_adv_rssi_thresholds *rssi)
4351 m->rssi.low_threshold = rssi->low_threshold;
4352 m->rssi.low_threshold_timeout =
4353 __le16_to_cpu(rssi->low_threshold_timeout);
4354 m->rssi.high_threshold = rssi->high_threshold;
4355 m->rssi.high_threshold_timeout =
4356 __le16_to_cpu(rssi->high_threshold_timeout);
4357 m->rssi.sampling_period = rssi->sampling_period;
4359 /* Default values. These numbers are the least constricting
4360 * parameters for MSFT API to work, so it behaves as if there
4361 * are no rssi parameter to consider. May need to be changed
4362 * if other API are to be supported.
4364 m->rssi.low_threshold = -127;
4365 m->rssi.low_threshold_timeout = 60;
4366 m->rssi.high_threshold = -127;
4367 m->rssi.high_threshold_timeout = 0;
4368 m->rssi.sampling_period = 0;
4372 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4373 struct mgmt_adv_pattern *patterns)
4375 u8 offset = 0, length = 0;
4376 struct adv_pattern *p = NULL;
4379 for (i = 0; i < pattern_count; i++) {
4380 offset = patterns[i].offset;
4381 length = patterns[i].length;
4382 if (offset >= HCI_MAX_AD_LENGTH ||
4383 length > HCI_MAX_AD_LENGTH ||
4384 (offset + length) > HCI_MAX_AD_LENGTH)
4385 return MGMT_STATUS_INVALID_PARAMS;
4387 p = kmalloc(sizeof(*p), GFP_KERNEL);
4389 return MGMT_STATUS_NO_RESOURCES;
4391 p->ad_type = patterns[i].ad_type;
4392 p->offset = patterns[i].offset;
4393 p->length = patterns[i].length;
4394 memcpy(p->value, patterns[i].value, p->length);
4396 INIT_LIST_HEAD(&p->list);
4397 list_add(&p->list, &m->patterns);
4400 return MGMT_STATUS_SUCCESS;
4403 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4404 void *data, u16 len)
4406 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4407 struct adv_monitor *m = NULL;
4408 u8 status = MGMT_STATUS_SUCCESS;
4409 size_t expected_size = sizeof(*cp);
4411 BT_DBG("request for %s", hdev->name);
4413 if (len <= sizeof(*cp)) {
4414 status = MGMT_STATUS_INVALID_PARAMS;
4418 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4419 if (len != expected_size) {
4420 status = MGMT_STATUS_INVALID_PARAMS;
4424 m = kzalloc(sizeof(*m), GFP_KERNEL);
4426 status = MGMT_STATUS_NO_RESOURCES;
4430 INIT_LIST_HEAD(&m->patterns);
4432 parse_adv_monitor_rssi(m, NULL);
4433 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4436 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4437 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4440 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4441 void *data, u16 len)
4443 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4444 struct adv_monitor *m = NULL;
4445 u8 status = MGMT_STATUS_SUCCESS;
4446 size_t expected_size = sizeof(*cp);
4448 BT_DBG("request for %s", hdev->name);
4450 if (len <= sizeof(*cp)) {
4451 status = MGMT_STATUS_INVALID_PARAMS;
4455 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4456 if (len != expected_size) {
4457 status = MGMT_STATUS_INVALID_PARAMS;
4461 m = kzalloc(sizeof(*m), GFP_KERNEL);
4463 status = MGMT_STATUS_NO_RESOURCES;
4467 INIT_LIST_HEAD(&m->patterns);
4469 parse_adv_monitor_rssi(m, &cp->rssi);
4470 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4473 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4474 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4477 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4479 struct mgmt_rp_remove_adv_monitor rp;
4480 struct mgmt_cp_remove_adv_monitor *cp;
4481 struct mgmt_pending_cmd *cmd;
4486 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4491 rp.monitor_handle = cp->monitor_handle;
4494 hci_update_background_scan(hdev);
4496 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4497 mgmt_status(status), &rp, sizeof(rp));
4498 mgmt_pending_remove(cmd);
4501 hci_dev_unlock(hdev);
4502 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
4503 rp.monitor_handle, status);
4508 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4509 void *data, u16 len)
4511 struct mgmt_cp_remove_adv_monitor *cp = data;
4512 struct mgmt_rp_remove_adv_monitor rp;
4513 struct mgmt_pending_cmd *cmd;
4514 u16 handle = __le16_to_cpu(cp->monitor_handle);
4518 BT_DBG("request for %s", hdev->name);
4519 rp.monitor_handle = cp->monitor_handle;
4523 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4524 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4525 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4526 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4527 status = MGMT_STATUS_BUSY;
4531 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4533 status = MGMT_STATUS_NO_RESOURCES;
4538 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4540 pending = hci_remove_all_adv_monitor(hdev, &err);
4543 mgmt_pending_remove(cmd);
4546 status = MGMT_STATUS_INVALID_INDEX;
4548 status = MGMT_STATUS_FAILED;
4553 /* monitor can be removed without forwarding request to controller */
4555 mgmt_pending_remove(cmd);
4556 hci_dev_unlock(hdev);
4558 return mgmt_cmd_complete(sk, hdev->id,
4559 MGMT_OP_REMOVE_ADV_MONITOR,
4560 MGMT_STATUS_SUCCESS,
4564 hci_dev_unlock(hdev);
4568 hci_dev_unlock(hdev);
4569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4573 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4574 u16 opcode, struct sk_buff *skb)
4576 struct mgmt_rp_read_local_oob_data mgmt_rp;
4577 size_t rp_size = sizeof(mgmt_rp);
4578 struct mgmt_pending_cmd *cmd;
4580 bt_dev_dbg(hdev, "status %u", status);
4582 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4586 if (status || !skb) {
4587 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4588 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4592 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4594 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4595 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4597 if (skb->len < sizeof(*rp)) {
4598 mgmt_cmd_status(cmd->sk, hdev->id,
4599 MGMT_OP_READ_LOCAL_OOB_DATA,
4600 MGMT_STATUS_FAILED);
4604 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4605 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4607 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4609 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4611 if (skb->len < sizeof(*rp)) {
4612 mgmt_cmd_status(cmd->sk, hdev->id,
4613 MGMT_OP_READ_LOCAL_OOB_DATA,
4614 MGMT_STATUS_FAILED);
4618 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4619 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4621 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4622 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4625 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4626 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4629 mgmt_pending_remove(cmd);
4632 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4633 void *data, u16 data_len)
4635 struct mgmt_pending_cmd *cmd;
4636 struct hci_request req;
4639 bt_dev_dbg(hdev, "sock %p", sk);
4643 if (!hdev_is_powered(hdev)) {
4644 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4645 MGMT_STATUS_NOT_POWERED);
4649 if (!lmp_ssp_capable(hdev)) {
4650 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4651 MGMT_STATUS_NOT_SUPPORTED);
4655 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4661 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4667 hci_req_init(&req, hdev);
4669 if (bredr_sc_enabled(hdev))
4670 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4672 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4674 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4676 mgmt_pending_remove(cmd);
4679 hci_dev_unlock(hdev);
4683 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4684 void *data, u16 len)
4686 struct mgmt_addr_info *addr = data;
4689 bt_dev_dbg(hdev, "sock %p", sk);
4691 if (!bdaddr_type_is_valid(addr->type))
4692 return mgmt_cmd_complete(sk, hdev->id,
4693 MGMT_OP_ADD_REMOTE_OOB_DATA,
4694 MGMT_STATUS_INVALID_PARAMS,
4695 addr, sizeof(*addr));
4699 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4700 struct mgmt_cp_add_remote_oob_data *cp = data;
4703 if (cp->addr.type != BDADDR_BREDR) {
4704 err = mgmt_cmd_complete(sk, hdev->id,
4705 MGMT_OP_ADD_REMOTE_OOB_DATA,
4706 MGMT_STATUS_INVALID_PARAMS,
4707 &cp->addr, sizeof(cp->addr));
4711 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4712 cp->addr.type, cp->hash,
4713 cp->rand, NULL, NULL);
4715 status = MGMT_STATUS_FAILED;
4717 status = MGMT_STATUS_SUCCESS;
4719 err = mgmt_cmd_complete(sk, hdev->id,
4720 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4721 &cp->addr, sizeof(cp->addr));
4722 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4723 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4724 u8 *rand192, *hash192, *rand256, *hash256;
4727 if (bdaddr_type_is_le(cp->addr.type)) {
4728 /* Enforce zero-valued 192-bit parameters as
4729 * long as legacy SMP OOB isn't implemented.
4731 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4732 memcmp(cp->hash192, ZERO_KEY, 16)) {
4733 err = mgmt_cmd_complete(sk, hdev->id,
4734 MGMT_OP_ADD_REMOTE_OOB_DATA,
4735 MGMT_STATUS_INVALID_PARAMS,
4736 addr, sizeof(*addr));
4743 /* In case one of the P-192 values is set to zero,
4744 * then just disable OOB data for P-192.
4746 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4747 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4751 rand192 = cp->rand192;
4752 hash192 = cp->hash192;
4756 /* In case one of the P-256 values is set to zero, then just
4757 * disable OOB data for P-256.
4759 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4760 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4764 rand256 = cp->rand256;
4765 hash256 = cp->hash256;
4768 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4769 cp->addr.type, hash192, rand192,
4772 status = MGMT_STATUS_FAILED;
4774 status = MGMT_STATUS_SUCCESS;
4776 err = mgmt_cmd_complete(sk, hdev->id,
4777 MGMT_OP_ADD_REMOTE_OOB_DATA,
4778 status, &cp->addr, sizeof(cp->addr));
4780 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4782 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4783 MGMT_STATUS_INVALID_PARAMS);
4787 hci_dev_unlock(hdev);
4791 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4792 void *data, u16 len)
4794 struct mgmt_cp_remove_remote_oob_data *cp = data;
4798 bt_dev_dbg(hdev, "sock %p", sk);
4800 if (cp->addr.type != BDADDR_BREDR)
4801 return mgmt_cmd_complete(sk, hdev->id,
4802 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4803 MGMT_STATUS_INVALID_PARAMS,
4804 &cp->addr, sizeof(cp->addr));
4808 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4809 hci_remote_oob_data_clear(hdev);
4810 status = MGMT_STATUS_SUCCESS;
4814 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4816 status = MGMT_STATUS_INVALID_PARAMS;
4818 status = MGMT_STATUS_SUCCESS;
4821 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4822 status, &cp->addr, sizeof(cp->addr));
4824 hci_dev_unlock(hdev);
4828 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4830 struct mgmt_pending_cmd *cmd;
4832 bt_dev_dbg(hdev, "status %d", status);
4836 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4838 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4841 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4844 cmd->cmd_complete(cmd, mgmt_status(status));
4845 mgmt_pending_remove(cmd);
4848 hci_dev_unlock(hdev);
4850 /* Handle suspend notifier */
4851 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4852 hdev->suspend_tasks)) {
4853 bt_dev_dbg(hdev, "Unpaused discovery");
4854 wake_up(&hdev->suspend_wait_q);
4858 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4859 uint8_t *mgmt_status)
4862 case DISCOV_TYPE_LE:
4863 *mgmt_status = mgmt_le_support(hdev);
4867 case DISCOV_TYPE_INTERLEAVED:
4868 *mgmt_status = mgmt_le_support(hdev);
4872 case DISCOV_TYPE_BREDR:
4873 *mgmt_status = mgmt_bredr_support(hdev);
4878 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4885 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4886 u16 op, void *data, u16 len)
4888 struct mgmt_cp_start_discovery *cp = data;
4889 struct mgmt_pending_cmd *cmd;
4893 bt_dev_dbg(hdev, "sock %p", sk);
4897 if (!hdev_is_powered(hdev)) {
4898 err = mgmt_cmd_complete(sk, hdev->id, op,
4899 MGMT_STATUS_NOT_POWERED,
4900 &cp->type, sizeof(cp->type));
4904 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4905 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4906 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4907 &cp->type, sizeof(cp->type));
4911 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4912 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4913 &cp->type, sizeof(cp->type));
4917 /* Can't start discovery when it is paused */
4918 if (hdev->discovery_paused) {
4919 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4920 &cp->type, sizeof(cp->type));
4924 /* Clear the discovery filter first to free any previously
4925 * allocated memory for the UUID list.
4927 hci_discovery_filter_clear(hdev);
4929 hdev->discovery.type = cp->type;
4930 hdev->discovery.report_invalid_rssi = false;
4931 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4932 hdev->discovery.limited = true;
4934 hdev->discovery.limited = false;
4936 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4942 cmd->cmd_complete = generic_cmd_complete;
4944 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4945 queue_work(hdev->req_workqueue, &hdev->discov_update);
4949 hci_dev_unlock(hdev);
4953 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4954 void *data, u16 len)
4956 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4960 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4961 void *data, u16 len)
4963 return start_discovery_internal(sk, hdev,
4964 MGMT_OP_START_LIMITED_DISCOVERY,
4968 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4971 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4975 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4976 void *data, u16 len)
4978 struct mgmt_cp_start_service_discovery *cp = data;
4979 struct mgmt_pending_cmd *cmd;
4980 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4981 u16 uuid_count, expected_len;
4985 bt_dev_dbg(hdev, "sock %p", sk);
4989 if (!hdev_is_powered(hdev)) {
4990 err = mgmt_cmd_complete(sk, hdev->id,
4991 MGMT_OP_START_SERVICE_DISCOVERY,
4992 MGMT_STATUS_NOT_POWERED,
4993 &cp->type, sizeof(cp->type));
4997 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4998 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4999 err = mgmt_cmd_complete(sk, hdev->id,
5000 MGMT_OP_START_SERVICE_DISCOVERY,
5001 MGMT_STATUS_BUSY, &cp->type,
5006 if (hdev->discovery_paused) {
5007 err = mgmt_cmd_complete(sk, hdev->id,
5008 MGMT_OP_START_SERVICE_DISCOVERY,
5009 MGMT_STATUS_BUSY, &cp->type,
5014 uuid_count = __le16_to_cpu(cp->uuid_count);
5015 if (uuid_count > max_uuid_count) {
5016 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5018 err = mgmt_cmd_complete(sk, hdev->id,
5019 MGMT_OP_START_SERVICE_DISCOVERY,
5020 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5025 expected_len = sizeof(*cp) + uuid_count * 16;
5026 if (expected_len != len) {
5027 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5029 err = mgmt_cmd_complete(sk, hdev->id,
5030 MGMT_OP_START_SERVICE_DISCOVERY,
5031 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5036 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5037 err = mgmt_cmd_complete(sk, hdev->id,
5038 MGMT_OP_START_SERVICE_DISCOVERY,
5039 status, &cp->type, sizeof(cp->type));
5043 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5050 cmd->cmd_complete = service_discovery_cmd_complete;
5052 /* Clear the discovery filter first to free any previously
5053 * allocated memory for the UUID list.
5055 hci_discovery_filter_clear(hdev);
5057 hdev->discovery.result_filtering = true;
5058 hdev->discovery.type = cp->type;
5059 hdev->discovery.rssi = cp->rssi;
5060 hdev->discovery.uuid_count = uuid_count;
5062 if (uuid_count > 0) {
5063 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5065 if (!hdev->discovery.uuids) {
5066 err = mgmt_cmd_complete(sk, hdev->id,
5067 MGMT_OP_START_SERVICE_DISCOVERY,
5069 &cp->type, sizeof(cp->type));
5070 mgmt_pending_remove(cmd);
5075 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5076 queue_work(hdev->req_workqueue, &hdev->discov_update);
5080 hci_dev_unlock(hdev);
5084 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5086 struct mgmt_pending_cmd *cmd;
5088 bt_dev_dbg(hdev, "status %d", status);
5092 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5094 cmd->cmd_complete(cmd, mgmt_status(status));
5095 mgmt_pending_remove(cmd);
5098 hci_dev_unlock(hdev);
5100 /* Handle suspend notifier */
5101 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5102 bt_dev_dbg(hdev, "Paused discovery");
5103 wake_up(&hdev->suspend_wait_q);
5107 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5110 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5111 struct mgmt_pending_cmd *cmd;
5114 bt_dev_dbg(hdev, "sock %p", sk);
5118 if (!hci_discovery_active(hdev)) {
5119 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5120 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5121 sizeof(mgmt_cp->type));
5125 if (hdev->discovery.type != mgmt_cp->type) {
5126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5127 MGMT_STATUS_INVALID_PARAMS,
5128 &mgmt_cp->type, sizeof(mgmt_cp->type));
5132 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5138 cmd->cmd_complete = generic_cmd_complete;
5140 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5141 queue_work(hdev->req_workqueue, &hdev->discov_update);
5145 hci_dev_unlock(hdev);
5149 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5152 struct mgmt_cp_confirm_name *cp = data;
5153 struct inquiry_entry *e;
5156 bt_dev_dbg(hdev, "sock %p", sk);
5160 if (!hci_discovery_active(hdev)) {
5161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5162 MGMT_STATUS_FAILED, &cp->addr,
5167 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5169 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5170 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5175 if (cp->name_known) {
5176 e->name_state = NAME_KNOWN;
5179 e->name_state = NAME_NEEDED;
5180 hci_inquiry_cache_update_resolve(hdev, e);
5183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5184 &cp->addr, sizeof(cp->addr));
5187 hci_dev_unlock(hdev);
5191 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5194 struct mgmt_cp_block_device *cp = data;
5198 bt_dev_dbg(hdev, "sock %p", sk);
5200 if (!bdaddr_type_is_valid(cp->addr.type))
5201 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5202 MGMT_STATUS_INVALID_PARAMS,
5203 &cp->addr, sizeof(cp->addr));
5207 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
5210 status = MGMT_STATUS_FAILED;
5214 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5216 status = MGMT_STATUS_SUCCESS;
5219 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5220 &cp->addr, sizeof(cp->addr));
5222 hci_dev_unlock(hdev);
5227 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5230 struct mgmt_cp_unblock_device *cp = data;
5234 bt_dev_dbg(hdev, "sock %p", sk);
5236 if (!bdaddr_type_is_valid(cp->addr.type))
5237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5238 MGMT_STATUS_INVALID_PARAMS,
5239 &cp->addr, sizeof(cp->addr));
5243 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5246 status = MGMT_STATUS_INVALID_PARAMS;
5250 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5252 status = MGMT_STATUS_SUCCESS;
5255 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5256 &cp->addr, sizeof(cp->addr));
5258 hci_dev_unlock(hdev);
5263 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5266 struct mgmt_cp_set_device_id *cp = data;
5267 struct hci_request req;
5271 bt_dev_dbg(hdev, "sock %p", sk);
5273 source = __le16_to_cpu(cp->source);
5275 if (source > 0x0002)
5276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5277 MGMT_STATUS_INVALID_PARAMS);
5281 hdev->devid_source = source;
5282 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5283 hdev->devid_product = __le16_to_cpu(cp->product);
5284 hdev->devid_version = __le16_to_cpu(cp->version);
5286 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5289 hci_req_init(&req, hdev);
5290 __hci_req_update_eir(&req);
5291 hci_req_run(&req, NULL);
5293 hci_dev_unlock(hdev);
5298 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5301 bt_dev_dbg(hdev, "status %d", status);
5304 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5307 struct cmd_lookup match = { NULL, hdev };
5308 struct hci_request req;
5310 struct adv_info *adv_instance;
5316 u8 mgmt_err = mgmt_status(status);
5318 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5319 cmd_status_rsp, &mgmt_err);
5323 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5324 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5326 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5328 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5331 new_settings(hdev, match.sk);
5336 /* Handle suspend notifier */
5337 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5338 hdev->suspend_tasks)) {
5339 bt_dev_dbg(hdev, "Paused advertising");
5340 wake_up(&hdev->suspend_wait_q);
5341 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5342 hdev->suspend_tasks)) {
5343 bt_dev_dbg(hdev, "Unpaused advertising");
5344 wake_up(&hdev->suspend_wait_q);
5347 /* If "Set Advertising" was just disabled and instance advertising was
5348 * set up earlier, then re-enable multi-instance advertising.
5350 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5351 list_empty(&hdev->adv_instances))
5354 instance = hdev->cur_adv_instance;
5356 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5357 struct adv_info, list);
5361 instance = adv_instance->instance;
5364 hci_req_init(&req, hdev);
5366 err = __hci_req_schedule_adv_instance(&req, instance, true);
5369 err = hci_req_run(&req, enable_advertising_instance);
5372 bt_dev_err(hdev, "failed to re-configure advertising");
5375 hci_dev_unlock(hdev);
5378 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5381 struct mgmt_mode *cp = data;
5382 struct mgmt_pending_cmd *cmd;
5383 struct hci_request req;
5387 bt_dev_dbg(hdev, "sock %p", sk);
5389 status = mgmt_le_support(hdev);
5391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5394 /* Enabling the experimental LL Privay support disables support for
5397 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5399 MGMT_STATUS_NOT_SUPPORTED);
5401 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5403 MGMT_STATUS_INVALID_PARAMS);
5405 if (hdev->advertising_paused)
5406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5413 /* The following conditions are ones which mean that we should
5414 * not do any HCI communication but directly send a mgmt
5415 * response to user space (after toggling the flag if
5418 if (!hdev_is_powered(hdev) ||
5419 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5420 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5421 hci_conn_num(hdev, LE_LINK) > 0 ||
5422 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5423 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5427 hdev->cur_adv_instance = 0x00;
5428 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5429 if (cp->val == 0x02)
5430 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5432 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5434 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5435 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5438 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5443 err = new_settings(hdev, sk);
5448 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5449 pending_find(MGMT_OP_SET_LE, hdev)) {
5450 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5455 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5461 hci_req_init(&req, hdev);
5463 if (cp->val == 0x02)
5464 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5466 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5468 cancel_adv_timeout(hdev);
5471 /* Switch to instance "0" for the Set Advertising setting.
5472 * We cannot use update_[adv|scan_rsp]_data() here as the
5473 * HCI_ADVERTISING flag is not yet set.
5475 hdev->cur_adv_instance = 0x00;
5477 if (ext_adv_capable(hdev)) {
5478 __hci_req_start_ext_adv(&req, 0x00);
5480 __hci_req_update_adv_data(&req, 0x00);
5481 __hci_req_update_scan_rsp_data(&req, 0x00);
5482 __hci_req_enable_advertising(&req);
5485 __hci_req_disable_advertising(&req);
5488 err = hci_req_run(&req, set_advertising_complete);
5490 mgmt_pending_remove(cmd);
5493 hci_dev_unlock(hdev);
5497 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5498 void *data, u16 len)
5500 struct mgmt_cp_set_static_address *cp = data;
5503 bt_dev_dbg(hdev, "sock %p", sk);
5505 if (!lmp_le_capable(hdev))
5506 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5507 MGMT_STATUS_NOT_SUPPORTED);
5509 if (hdev_is_powered(hdev))
5510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5511 MGMT_STATUS_REJECTED);
5513 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5514 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5515 return mgmt_cmd_status(sk, hdev->id,
5516 MGMT_OP_SET_STATIC_ADDRESS,
5517 MGMT_STATUS_INVALID_PARAMS);
5519 /* Two most significant bits shall be set */
5520 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5521 return mgmt_cmd_status(sk, hdev->id,
5522 MGMT_OP_SET_STATIC_ADDRESS,
5523 MGMT_STATUS_INVALID_PARAMS);
5528 bacpy(&hdev->static_addr, &cp->bdaddr);
5530 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5534 err = new_settings(hdev, sk);
5537 hci_dev_unlock(hdev);
5541 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5542 void *data, u16 len)
5544 struct mgmt_cp_set_scan_params *cp = data;
5545 __u16 interval, window;
5548 bt_dev_dbg(hdev, "sock %p", sk);
5550 if (!lmp_le_capable(hdev))
5551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5552 MGMT_STATUS_NOT_SUPPORTED);
5554 interval = __le16_to_cpu(cp->interval);
5556 if (interval < 0x0004 || interval > 0x4000)
5557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5558 MGMT_STATUS_INVALID_PARAMS);
5560 window = __le16_to_cpu(cp->window);
5562 if (window < 0x0004 || window > 0x4000)
5563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5564 MGMT_STATUS_INVALID_PARAMS);
5566 if (window > interval)
5567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5568 MGMT_STATUS_INVALID_PARAMS);
5572 hdev->le_scan_interval = interval;
5573 hdev->le_scan_window = window;
5575 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5578 /* If background scan is running, restart it so new parameters are
5581 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5582 hdev->discovery.state == DISCOVERY_STOPPED) {
5583 struct hci_request req;
5585 hci_req_init(&req, hdev);
5587 hci_req_add_le_scan_disable(&req, false);
5588 hci_req_add_le_passive_scan(&req);
5590 hci_req_run(&req, NULL);
5593 hci_dev_unlock(hdev);
5598 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5601 struct mgmt_pending_cmd *cmd;
5603 bt_dev_dbg(hdev, "status 0x%02x", status);
5607 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5612 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5613 mgmt_status(status));
5615 struct mgmt_mode *cp = cmd->param;
5618 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5620 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5622 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5623 new_settings(hdev, cmd->sk);
5626 mgmt_pending_remove(cmd);
5629 hci_dev_unlock(hdev);
5632 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5633 void *data, u16 len)
5635 struct mgmt_mode *cp = data;
5636 struct mgmt_pending_cmd *cmd;
5637 struct hci_request req;
5640 bt_dev_dbg(hdev, "sock %p", sk);
5642 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5643 hdev->hci_ver < BLUETOOTH_VER_1_2)
5644 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5645 MGMT_STATUS_NOT_SUPPORTED);
5647 if (cp->val != 0x00 && cp->val != 0x01)
5648 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5649 MGMT_STATUS_INVALID_PARAMS);
5653 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5659 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5660 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5665 if (!hdev_is_powered(hdev)) {
5666 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5667 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5669 new_settings(hdev, sk);
5673 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5680 hci_req_init(&req, hdev);
5682 __hci_req_write_fast_connectable(&req, cp->val);
5684 err = hci_req_run(&req, fast_connectable_complete);
5686 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5687 MGMT_STATUS_FAILED);
5688 mgmt_pending_remove(cmd);
5692 hci_dev_unlock(hdev);
5697 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5699 struct mgmt_pending_cmd *cmd;
5701 bt_dev_dbg(hdev, "status 0x%02x", status);
5705 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5710 u8 mgmt_err = mgmt_status(status);
5712 /* We need to restore the flag if related HCI commands
5715 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5717 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5719 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5720 new_settings(hdev, cmd->sk);
5723 mgmt_pending_remove(cmd);
5726 hci_dev_unlock(hdev);
5729 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5731 struct mgmt_mode *cp = data;
5732 struct mgmt_pending_cmd *cmd;
5733 struct hci_request req;
5736 bt_dev_dbg(hdev, "sock %p", sk);
5738 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5740 MGMT_STATUS_NOT_SUPPORTED);
5742 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5744 MGMT_STATUS_REJECTED);
5746 if (cp->val != 0x00 && cp->val != 0x01)
5747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5748 MGMT_STATUS_INVALID_PARAMS);
5752 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5753 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5757 if (!hdev_is_powered(hdev)) {
5759 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5760 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5761 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5762 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5763 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5766 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5768 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5772 err = new_settings(hdev, sk);
5776 /* Reject disabling when powered on */
5778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5779 MGMT_STATUS_REJECTED);
5782 /* When configuring a dual-mode controller to operate
5783 * with LE only and using a static address, then switching
5784 * BR/EDR back on is not allowed.
5786 * Dual-mode controllers shall operate with the public
5787 * address as its identity address for BR/EDR and LE. So
5788 * reject the attempt to create an invalid configuration.
5790 * The same restrictions applies when secure connections
5791 * has been enabled. For BR/EDR this is a controller feature
5792 * while for LE it is a host stack feature. This means that
5793 * switching BR/EDR back on when secure connections has been
5794 * enabled is not a supported transaction.
5796 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5797 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5798 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5799 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5800 MGMT_STATUS_REJECTED);
5805 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5806 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5811 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5817 /* We need to flip the bit already here so that
5818 * hci_req_update_adv_data generates the correct flags.
5820 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5822 hci_req_init(&req, hdev);
5824 __hci_req_write_fast_connectable(&req, false);
5825 __hci_req_update_scan(&req);
5827 /* Since only the advertising data flags will change, there
5828 * is no need to update the scan response data.
5830 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5832 err = hci_req_run(&req, set_bredr_complete);
5834 mgmt_pending_remove(cmd);
5837 hci_dev_unlock(hdev);
5841 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5843 struct mgmt_pending_cmd *cmd;
5844 struct mgmt_mode *cp;
5846 bt_dev_dbg(hdev, "status %u", status);
5850 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5855 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5856 mgmt_status(status));
5864 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5865 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5868 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5869 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5872 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5873 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5877 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5878 new_settings(hdev, cmd->sk);
5881 mgmt_pending_remove(cmd);
5883 hci_dev_unlock(hdev);
5886 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5887 void *data, u16 len)
5889 struct mgmt_mode *cp = data;
5890 struct mgmt_pending_cmd *cmd;
5891 struct hci_request req;
5895 bt_dev_dbg(hdev, "sock %p", sk);
5897 if (!lmp_sc_capable(hdev) &&
5898 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5900 MGMT_STATUS_NOT_SUPPORTED);
5902 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5903 lmp_sc_capable(hdev) &&
5904 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5906 MGMT_STATUS_REJECTED);
5908 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5910 MGMT_STATUS_INVALID_PARAMS);
5914 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5919 changed = !hci_dev_test_and_set_flag(hdev,
5921 if (cp->val == 0x02)
5922 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5926 changed = hci_dev_test_and_clear_flag(hdev,
5928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5931 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5936 err = new_settings(hdev, sk);
5941 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5942 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5949 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5950 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5951 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5955 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5961 hci_req_init(&req, hdev);
5962 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5963 err = hci_req_run(&req, sc_enable_complete);
5965 mgmt_pending_remove(cmd);
5970 hci_dev_unlock(hdev);
5974 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5975 void *data, u16 len)
5977 struct mgmt_mode *cp = data;
5978 bool changed, use_changed;
5981 bt_dev_dbg(hdev, "sock %p", sk);
5983 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5984 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5985 MGMT_STATUS_INVALID_PARAMS);
5990 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5992 changed = hci_dev_test_and_clear_flag(hdev,
5993 HCI_KEEP_DEBUG_KEYS);
5995 if (cp->val == 0x02)
5996 use_changed = !hci_dev_test_and_set_flag(hdev,
5997 HCI_USE_DEBUG_KEYS);
5999 use_changed = hci_dev_test_and_clear_flag(hdev,
6000 HCI_USE_DEBUG_KEYS);
6002 if (hdev_is_powered(hdev) && use_changed &&
6003 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6004 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6005 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6006 sizeof(mode), &mode);
6009 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6014 err = new_settings(hdev, sk);
6017 hci_dev_unlock(hdev);
6021 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6024 struct mgmt_cp_set_privacy *cp = cp_data;
6028 bt_dev_dbg(hdev, "sock %p", sk);
6030 if (!lmp_le_capable(hdev))
6031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6032 MGMT_STATUS_NOT_SUPPORTED);
6034 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6036 MGMT_STATUS_INVALID_PARAMS);
6038 if (hdev_is_powered(hdev))
6039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6040 MGMT_STATUS_REJECTED);
6044 /* If user space supports this command it is also expected to
6045 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6047 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6050 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6051 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6052 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6053 hci_adv_instances_set_rpa_expired(hdev, true);
6054 if (cp->privacy == 0x02)
6055 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6057 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6059 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6060 memset(hdev->irk, 0, sizeof(hdev->irk));
6061 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6062 hci_adv_instances_set_rpa_expired(hdev, false);
6063 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6066 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6071 err = new_settings(hdev, sk);
6074 hci_dev_unlock(hdev);
6078 static bool irk_is_valid(struct mgmt_irk_info *irk)
6080 switch (irk->addr.type) {
6081 case BDADDR_LE_PUBLIC:
6084 case BDADDR_LE_RANDOM:
6085 /* Two most significant bits shall be set */
6086 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6094 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6097 struct mgmt_cp_load_irks *cp = cp_data;
6098 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6099 sizeof(struct mgmt_irk_info));
6100 u16 irk_count, expected_len;
6103 bt_dev_dbg(hdev, "sock %p", sk);
6105 if (!lmp_le_capable(hdev))
6106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6107 MGMT_STATUS_NOT_SUPPORTED);
6109 irk_count = __le16_to_cpu(cp->irk_count);
6110 if (irk_count > max_irk_count) {
6111 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6113 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6114 MGMT_STATUS_INVALID_PARAMS);
6117 expected_len = struct_size(cp, irks, irk_count);
6118 if (expected_len != len) {
6119 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6121 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6122 MGMT_STATUS_INVALID_PARAMS);
6125 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6127 for (i = 0; i < irk_count; i++) {
6128 struct mgmt_irk_info *key = &cp->irks[i];
6130 if (!irk_is_valid(key))
6131 return mgmt_cmd_status(sk, hdev->id,
6133 MGMT_STATUS_INVALID_PARAMS);
6138 hci_smp_irks_clear(hdev);
6140 for (i = 0; i < irk_count; i++) {
6141 struct mgmt_irk_info *irk = &cp->irks[i];
6143 if (hci_is_blocked_key(hdev,
6144 HCI_BLOCKED_KEY_TYPE_IRK,
6146 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6151 hci_add_irk(hdev, &irk->addr.bdaddr,
6152 le_addr_type(irk->addr.type), irk->val,
6156 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6158 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6160 hci_dev_unlock(hdev);
6165 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6167 if (key->master != 0x00 && key->master != 0x01)
6170 switch (key->addr.type) {
6171 case BDADDR_LE_PUBLIC:
6174 case BDADDR_LE_RANDOM:
6175 /* Two most significant bits shall be set */
6176 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6184 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6185 void *cp_data, u16 len)
6187 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6188 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6189 sizeof(struct mgmt_ltk_info));
6190 u16 key_count, expected_len;
6193 bt_dev_dbg(hdev, "sock %p", sk);
6195 if (!lmp_le_capable(hdev))
6196 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6197 MGMT_STATUS_NOT_SUPPORTED);
6199 key_count = __le16_to_cpu(cp->key_count);
6200 if (key_count > max_key_count) {
6201 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6203 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6204 MGMT_STATUS_INVALID_PARAMS);
6207 expected_len = struct_size(cp, keys, key_count);
6208 if (expected_len != len) {
6209 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6212 MGMT_STATUS_INVALID_PARAMS);
6215 bt_dev_dbg(hdev, "key_count %u", key_count);
6217 for (i = 0; i < key_count; i++) {
6218 struct mgmt_ltk_info *key = &cp->keys[i];
6220 if (!ltk_is_valid(key))
6221 return mgmt_cmd_status(sk, hdev->id,
6222 MGMT_OP_LOAD_LONG_TERM_KEYS,
6223 MGMT_STATUS_INVALID_PARAMS);
6228 hci_smp_ltks_clear(hdev);
6230 for (i = 0; i < key_count; i++) {
6231 struct mgmt_ltk_info *key = &cp->keys[i];
6232 u8 type, authenticated;
6234 if (hci_is_blocked_key(hdev,
6235 HCI_BLOCKED_KEY_TYPE_LTK,
6237 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6242 switch (key->type) {
6243 case MGMT_LTK_UNAUTHENTICATED:
6244 authenticated = 0x00;
6245 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6247 case MGMT_LTK_AUTHENTICATED:
6248 authenticated = 0x01;
6249 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6251 case MGMT_LTK_P256_UNAUTH:
6252 authenticated = 0x00;
6253 type = SMP_LTK_P256;
6255 case MGMT_LTK_P256_AUTH:
6256 authenticated = 0x01;
6257 type = SMP_LTK_P256;
6259 case MGMT_LTK_P256_DEBUG:
6260 authenticated = 0x00;
6261 type = SMP_LTK_P256_DEBUG;
6267 hci_add_ltk(hdev, &key->addr.bdaddr,
6268 le_addr_type(key->addr.type), type, authenticated,
6269 key->val, key->enc_size, key->ediv, key->rand);
6272 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6275 hci_dev_unlock(hdev);
6280 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6282 struct hci_conn *conn = cmd->user_data;
6283 struct mgmt_rp_get_conn_info rp;
6286 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6288 if (status == MGMT_STATUS_SUCCESS) {
6289 rp.rssi = conn->rssi;
6290 rp.tx_power = conn->tx_power;
6291 rp.max_tx_power = conn->max_tx_power;
6293 rp.rssi = HCI_RSSI_INVALID;
6294 rp.tx_power = HCI_TX_POWER_INVALID;
6295 rp.max_tx_power = HCI_TX_POWER_INVALID;
6298 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6299 status, &rp, sizeof(rp));
6301 hci_conn_drop(conn);
6307 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6310 struct hci_cp_read_rssi *cp;
6311 struct mgmt_pending_cmd *cmd;
6312 struct hci_conn *conn;
6316 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6320 /* Commands sent in request are either Read RSSI or Read Transmit Power
6321 * Level so we check which one was last sent to retrieve connection
6322 * handle. Both commands have handle as first parameter so it's safe to
6323 * cast data on the same command struct.
6325 * First command sent is always Read RSSI and we fail only if it fails.
6326 * In other case we simply override error to indicate success as we
6327 * already remembered if TX power value is actually valid.
6329 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6331 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6332 status = MGMT_STATUS_SUCCESS;
6334 status = mgmt_status(hci_status);
6338 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6342 handle = __le16_to_cpu(cp->handle);
6343 conn = hci_conn_hash_lookup_handle(hdev, handle);
6345 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6350 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6354 cmd->cmd_complete(cmd, status);
6355 mgmt_pending_remove(cmd);
6358 hci_dev_unlock(hdev);
6361 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6364 struct mgmt_cp_get_conn_info *cp = data;
6365 struct mgmt_rp_get_conn_info rp;
6366 struct hci_conn *conn;
6367 unsigned long conn_info_age;
6370 bt_dev_dbg(hdev, "sock %p", sk);
6372 memset(&rp, 0, sizeof(rp));
6373 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6374 rp.addr.type = cp->addr.type;
6376 if (!bdaddr_type_is_valid(cp->addr.type))
6377 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6378 MGMT_STATUS_INVALID_PARAMS,
6383 if (!hdev_is_powered(hdev)) {
6384 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6385 MGMT_STATUS_NOT_POWERED, &rp,
6390 if (cp->addr.type == BDADDR_BREDR)
6391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6394 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6396 if (!conn || conn->state != BT_CONNECTED) {
6397 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6398 MGMT_STATUS_NOT_CONNECTED, &rp,
6403 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6405 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6409 /* To avoid client trying to guess when to poll again for information we
6410 * calculate conn info age as random value between min/max set in hdev.
6412 conn_info_age = hdev->conn_info_min_age +
6413 prandom_u32_max(hdev->conn_info_max_age -
6414 hdev->conn_info_min_age);
6416 /* Query controller to refresh cached values if they are too old or were
6419 if (time_after(jiffies, conn->conn_info_timestamp +
6420 msecs_to_jiffies(conn_info_age)) ||
6421 !conn->conn_info_timestamp) {
6422 struct hci_request req;
6423 struct hci_cp_read_tx_power req_txp_cp;
6424 struct hci_cp_read_rssi req_rssi_cp;
6425 struct mgmt_pending_cmd *cmd;
6427 hci_req_init(&req, hdev);
6428 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6429 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6432 /* For LE links TX power does not change thus we don't need to
6433 * query for it once value is known.
6435 if (!bdaddr_type_is_le(cp->addr.type) ||
6436 conn->tx_power == HCI_TX_POWER_INVALID) {
6437 req_txp_cp.handle = cpu_to_le16(conn->handle);
6438 req_txp_cp.type = 0x00;
6439 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6440 sizeof(req_txp_cp), &req_txp_cp);
6443 /* Max TX power needs to be read only once per connection */
6444 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6445 req_txp_cp.handle = cpu_to_le16(conn->handle);
6446 req_txp_cp.type = 0x01;
6447 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6448 sizeof(req_txp_cp), &req_txp_cp);
6451 err = hci_req_run(&req, conn_info_refresh_complete);
6455 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6462 hci_conn_hold(conn);
6463 cmd->user_data = hci_conn_get(conn);
6464 cmd->cmd_complete = conn_info_cmd_complete;
6466 conn->conn_info_timestamp = jiffies;
6468 /* Cache is valid, just reply with values cached in hci_conn */
6469 rp.rssi = conn->rssi;
6470 rp.tx_power = conn->tx_power;
6471 rp.max_tx_power = conn->max_tx_power;
6473 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6474 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6478 hci_dev_unlock(hdev);
6482 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6484 struct hci_conn *conn = cmd->user_data;
6485 struct mgmt_rp_get_clock_info rp;
6486 struct hci_dev *hdev;
6489 memset(&rp, 0, sizeof(rp));
6490 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6495 hdev = hci_dev_get(cmd->index);
6497 rp.local_clock = cpu_to_le32(hdev->clock);
6502 rp.piconet_clock = cpu_to_le32(conn->clock);
6503 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6507 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6511 hci_conn_drop(conn);
6518 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6520 struct hci_cp_read_clock *hci_cp;
6521 struct mgmt_pending_cmd *cmd;
6522 struct hci_conn *conn;
6524 bt_dev_dbg(hdev, "status %u", status);
6528 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6532 if (hci_cp->which) {
6533 u16 handle = __le16_to_cpu(hci_cp->handle);
6534 conn = hci_conn_hash_lookup_handle(hdev, handle);
6539 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6543 cmd->cmd_complete(cmd, mgmt_status(status));
6544 mgmt_pending_remove(cmd);
6547 hci_dev_unlock(hdev);
6550 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6553 struct mgmt_cp_get_clock_info *cp = data;
6554 struct mgmt_rp_get_clock_info rp;
6555 struct hci_cp_read_clock hci_cp;
6556 struct mgmt_pending_cmd *cmd;
6557 struct hci_request req;
6558 struct hci_conn *conn;
6561 bt_dev_dbg(hdev, "sock %p", sk);
6563 memset(&rp, 0, sizeof(rp));
6564 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6565 rp.addr.type = cp->addr.type;
6567 if (cp->addr.type != BDADDR_BREDR)
6568 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6569 MGMT_STATUS_INVALID_PARAMS,
6574 if (!hdev_is_powered(hdev)) {
6575 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6576 MGMT_STATUS_NOT_POWERED, &rp,
6581 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6582 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6584 if (!conn || conn->state != BT_CONNECTED) {
6585 err = mgmt_cmd_complete(sk, hdev->id,
6586 MGMT_OP_GET_CLOCK_INFO,
6587 MGMT_STATUS_NOT_CONNECTED,
6595 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6601 cmd->cmd_complete = clock_info_cmd_complete;
6603 hci_req_init(&req, hdev);
6605 memset(&hci_cp, 0, sizeof(hci_cp));
6606 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6609 hci_conn_hold(conn);
6610 cmd->user_data = hci_conn_get(conn);
6612 hci_cp.handle = cpu_to_le16(conn->handle);
6613 hci_cp.which = 0x01; /* Piconet clock */
6614 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6617 err = hci_req_run(&req, get_clock_info_complete);
6619 mgmt_pending_remove(cmd);
6622 hci_dev_unlock(hdev);
6626 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6628 struct hci_conn *conn;
6630 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6634 if (conn->dst_type != type)
6637 if (conn->state != BT_CONNECTED)
6643 /* This function requires the caller holds hdev->lock */
6644 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6645 u8 addr_type, u8 auto_connect)
6647 struct hci_conn_params *params;
6649 params = hci_conn_params_add(hdev, addr, addr_type);
6653 if (params->auto_connect == auto_connect)
6656 list_del_init(¶ms->action);
6658 switch (auto_connect) {
6659 case HCI_AUTO_CONN_DISABLED:
6660 case HCI_AUTO_CONN_LINK_LOSS:
6661 /* If auto connect is being disabled when we're trying to
6662 * connect to device, keep connecting.
6664 if (params->explicit_connect)
6665 list_add(¶ms->action, &hdev->pend_le_conns);
6667 case HCI_AUTO_CONN_REPORT:
6668 if (params->explicit_connect)
6669 list_add(¶ms->action, &hdev->pend_le_conns);
6671 list_add(¶ms->action, &hdev->pend_le_reports);
6673 case HCI_AUTO_CONN_DIRECT:
6674 case HCI_AUTO_CONN_ALWAYS:
6675 if (!is_connected(hdev, addr, addr_type))
6676 list_add(¶ms->action, &hdev->pend_le_conns);
6680 params->auto_connect = auto_connect;
6682 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6683 addr, addr_type, auto_connect);
6688 static void device_added(struct sock *sk, struct hci_dev *hdev,
6689 bdaddr_t *bdaddr, u8 type, u8 action)
6691 struct mgmt_ev_device_added ev;
6693 bacpy(&ev.addr.bdaddr, bdaddr);
6694 ev.addr.type = type;
6697 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6700 static int add_device(struct sock *sk, struct hci_dev *hdev,
6701 void *data, u16 len)
6703 struct mgmt_cp_add_device *cp = data;
6704 u8 auto_conn, addr_type;
6705 struct hci_conn_params *params;
6707 u32 current_flags = 0;
6709 bt_dev_dbg(hdev, "sock %p", sk);
6711 if (!bdaddr_type_is_valid(cp->addr.type) ||
6712 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6713 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6714 MGMT_STATUS_INVALID_PARAMS,
6715 &cp->addr, sizeof(cp->addr));
6717 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6718 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6719 MGMT_STATUS_INVALID_PARAMS,
6720 &cp->addr, sizeof(cp->addr));
6724 if (cp->addr.type == BDADDR_BREDR) {
6725 /* Only incoming connections action is supported for now */
6726 if (cp->action != 0x01) {
6727 err = mgmt_cmd_complete(sk, hdev->id,
6729 MGMT_STATUS_INVALID_PARAMS,
6730 &cp->addr, sizeof(cp->addr));
6734 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6740 hci_req_update_scan(hdev);
6745 addr_type = le_addr_type(cp->addr.type);
6747 if (cp->action == 0x02)
6748 auto_conn = HCI_AUTO_CONN_ALWAYS;
6749 else if (cp->action == 0x01)
6750 auto_conn = HCI_AUTO_CONN_DIRECT;
6752 auto_conn = HCI_AUTO_CONN_REPORT;
6754 /* Kernel internally uses conn_params with resolvable private
6755 * address, but Add Device allows only identity addresses.
6756 * Make sure it is enforced before calling
6757 * hci_conn_params_lookup.
6759 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6760 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6761 MGMT_STATUS_INVALID_PARAMS,
6762 &cp->addr, sizeof(cp->addr));
6766 /* If the connection parameters don't exist for this device,
6767 * they will be created and configured with defaults.
6769 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6771 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6772 MGMT_STATUS_FAILED, &cp->addr,
6776 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6779 current_flags = params->current_flags;
6782 hci_update_background_scan(hdev);
6785 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6786 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6787 SUPPORTED_DEVICE_FLAGS(), current_flags);
6789 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6790 MGMT_STATUS_SUCCESS, &cp->addr,
6794 hci_dev_unlock(hdev);
6798 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6799 bdaddr_t *bdaddr, u8 type)
6801 struct mgmt_ev_device_removed ev;
6803 bacpy(&ev.addr.bdaddr, bdaddr);
6804 ev.addr.type = type;
6806 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6809 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6810 void *data, u16 len)
6812 struct mgmt_cp_remove_device *cp = data;
6815 bt_dev_dbg(hdev, "sock %p", sk);
6819 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6820 struct hci_conn_params *params;
6823 if (!bdaddr_type_is_valid(cp->addr.type)) {
6824 err = mgmt_cmd_complete(sk, hdev->id,
6825 MGMT_OP_REMOVE_DEVICE,
6826 MGMT_STATUS_INVALID_PARAMS,
6827 &cp->addr, sizeof(cp->addr));
6831 if (cp->addr.type == BDADDR_BREDR) {
6832 err = hci_bdaddr_list_del(&hdev->whitelist,
6836 err = mgmt_cmd_complete(sk, hdev->id,
6837 MGMT_OP_REMOVE_DEVICE,
6838 MGMT_STATUS_INVALID_PARAMS,
6844 hci_req_update_scan(hdev);
6846 device_removed(sk, hdev, &cp->addr.bdaddr,
6851 addr_type = le_addr_type(cp->addr.type);
6853 /* Kernel internally uses conn_params with resolvable private
6854 * address, but Remove Device allows only identity addresses.
6855 * Make sure it is enforced before calling
6856 * hci_conn_params_lookup.
6858 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6859 err = mgmt_cmd_complete(sk, hdev->id,
6860 MGMT_OP_REMOVE_DEVICE,
6861 MGMT_STATUS_INVALID_PARAMS,
6862 &cp->addr, sizeof(cp->addr));
6866 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6869 err = mgmt_cmd_complete(sk, hdev->id,
6870 MGMT_OP_REMOVE_DEVICE,
6871 MGMT_STATUS_INVALID_PARAMS,
6872 &cp->addr, sizeof(cp->addr));
6876 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6877 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6878 err = mgmt_cmd_complete(sk, hdev->id,
6879 MGMT_OP_REMOVE_DEVICE,
6880 MGMT_STATUS_INVALID_PARAMS,
6881 &cp->addr, sizeof(cp->addr));
6885 list_del(¶ms->action);
6886 list_del(¶ms->list);
6888 hci_update_background_scan(hdev);
6890 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6892 struct hci_conn_params *p, *tmp;
6893 struct bdaddr_list *b, *btmp;
6895 if (cp->addr.type) {
6896 err = mgmt_cmd_complete(sk, hdev->id,
6897 MGMT_OP_REMOVE_DEVICE,
6898 MGMT_STATUS_INVALID_PARAMS,
6899 &cp->addr, sizeof(cp->addr));
6903 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6904 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6909 hci_req_update_scan(hdev);
6911 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6912 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6914 device_removed(sk, hdev, &p->addr, p->addr_type);
6915 if (p->explicit_connect) {
6916 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6919 list_del(&p->action);
6924 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6926 hci_update_background_scan(hdev);
6930 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6931 MGMT_STATUS_SUCCESS, &cp->addr,
6934 hci_dev_unlock(hdev);
6938 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6941 struct mgmt_cp_load_conn_param *cp = data;
6942 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6943 sizeof(struct mgmt_conn_param));
6944 u16 param_count, expected_len;
6947 if (!lmp_le_capable(hdev))
6948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6949 MGMT_STATUS_NOT_SUPPORTED);
6951 param_count = __le16_to_cpu(cp->param_count);
6952 if (param_count > max_param_count) {
6953 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6956 MGMT_STATUS_INVALID_PARAMS);
6959 expected_len = struct_size(cp, params, param_count);
6960 if (expected_len != len) {
6961 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6964 MGMT_STATUS_INVALID_PARAMS);
6967 bt_dev_dbg(hdev, "param_count %u", param_count);
6971 hci_conn_params_clear_disabled(hdev);
6973 for (i = 0; i < param_count; i++) {
6974 struct mgmt_conn_param *param = &cp->params[i];
6975 struct hci_conn_params *hci_param;
6976 u16 min, max, latency, timeout;
6979 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6982 if (param->addr.type == BDADDR_LE_PUBLIC) {
6983 addr_type = ADDR_LE_DEV_PUBLIC;
6984 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6985 addr_type = ADDR_LE_DEV_RANDOM;
6987 bt_dev_err(hdev, "ignoring invalid connection parameters");
6991 min = le16_to_cpu(param->min_interval);
6992 max = le16_to_cpu(param->max_interval);
6993 latency = le16_to_cpu(param->latency);
6994 timeout = le16_to_cpu(param->timeout);
6996 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6997 min, max, latency, timeout);
6999 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7000 bt_dev_err(hdev, "ignoring invalid connection parameters");
7004 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7007 bt_dev_err(hdev, "failed to add connection parameters");
7011 hci_param->conn_min_interval = min;
7012 hci_param->conn_max_interval = max;
7013 hci_param->conn_latency = latency;
7014 hci_param->supervision_timeout = timeout;
7017 hci_dev_unlock(hdev);
7019 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7023 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7024 void *data, u16 len)
7026 struct mgmt_cp_set_external_config *cp = data;
7030 bt_dev_dbg(hdev, "sock %p", sk);
7032 if (hdev_is_powered(hdev))
7033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7034 MGMT_STATUS_REJECTED);
7036 if (cp->config != 0x00 && cp->config != 0x01)
7037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7038 MGMT_STATUS_INVALID_PARAMS);
7040 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7041 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7042 MGMT_STATUS_NOT_SUPPORTED);
7047 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7049 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7051 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7058 err = new_options(hdev, sk);
7060 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7061 mgmt_index_removed(hdev);
7063 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7064 hci_dev_set_flag(hdev, HCI_CONFIG);
7065 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7067 queue_work(hdev->req_workqueue, &hdev->power_on);
7069 set_bit(HCI_RAW, &hdev->flags);
7070 mgmt_index_added(hdev);
7075 hci_dev_unlock(hdev);
7079 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7080 void *data, u16 len)
7082 struct mgmt_cp_set_public_address *cp = data;
7086 bt_dev_dbg(hdev, "sock %p", sk);
7088 if (hdev_is_powered(hdev))
7089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7090 MGMT_STATUS_REJECTED);
7092 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7093 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7094 MGMT_STATUS_INVALID_PARAMS);
7096 if (!hdev->set_bdaddr)
7097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7098 MGMT_STATUS_NOT_SUPPORTED);
7102 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7103 bacpy(&hdev->public_addr, &cp->bdaddr);
7105 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7112 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7113 err = new_options(hdev, sk);
7115 if (is_configured(hdev)) {
7116 mgmt_index_removed(hdev);
7118 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7120 hci_dev_set_flag(hdev, HCI_CONFIG);
7121 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7123 queue_work(hdev->req_workqueue, &hdev->power_on);
7127 hci_dev_unlock(hdev);
7131 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7132 u16 opcode, struct sk_buff *skb)
7134 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7135 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7136 u8 *h192, *r192, *h256, *r256;
7137 struct mgmt_pending_cmd *cmd;
7141 bt_dev_dbg(hdev, "status %u", status);
7143 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7147 mgmt_cp = cmd->param;
7150 status = mgmt_status(status);
7157 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7158 struct hci_rp_read_local_oob_data *rp;
7160 if (skb->len != sizeof(*rp)) {
7161 status = MGMT_STATUS_FAILED;
7164 status = MGMT_STATUS_SUCCESS;
7165 rp = (void *)skb->data;
7167 eir_len = 5 + 18 + 18;
7174 struct hci_rp_read_local_oob_ext_data *rp;
7176 if (skb->len != sizeof(*rp)) {
7177 status = MGMT_STATUS_FAILED;
7180 status = MGMT_STATUS_SUCCESS;
7181 rp = (void *)skb->data;
7183 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7184 eir_len = 5 + 18 + 18;
7188 eir_len = 5 + 18 + 18 + 18 + 18;
7198 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7205 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7206 hdev->dev_class, 3);
7209 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7210 EIR_SSP_HASH_C192, h192, 16);
7211 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7212 EIR_SSP_RAND_R192, r192, 16);
7216 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7217 EIR_SSP_HASH_C256, h256, 16);
7218 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7219 EIR_SSP_RAND_R256, r256, 16);
7223 mgmt_rp->type = mgmt_cp->type;
7224 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7226 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7227 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7228 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7229 if (err < 0 || status)
7232 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7234 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7235 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7236 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7239 mgmt_pending_remove(cmd);
7242 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7243 struct mgmt_cp_read_local_oob_ext_data *cp)
7245 struct mgmt_pending_cmd *cmd;
7246 struct hci_request req;
7249 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7254 hci_req_init(&req, hdev);
7256 if (bredr_sc_enabled(hdev))
7257 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7259 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7261 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7263 mgmt_pending_remove(cmd);
7270 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7271 void *data, u16 data_len)
7273 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7274 struct mgmt_rp_read_local_oob_ext_data *rp;
7277 u8 status, flags, role, addr[7], hash[16], rand[16];
7280 bt_dev_dbg(hdev, "sock %p", sk);
7282 if (hdev_is_powered(hdev)) {
7284 case BIT(BDADDR_BREDR):
7285 status = mgmt_bredr_support(hdev);
7291 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7292 status = mgmt_le_support(hdev);
7296 eir_len = 9 + 3 + 18 + 18 + 3;
7299 status = MGMT_STATUS_INVALID_PARAMS;
7304 status = MGMT_STATUS_NOT_POWERED;
7308 rp_len = sizeof(*rp) + eir_len;
7309 rp = kmalloc(rp_len, GFP_ATOMIC);
7320 case BIT(BDADDR_BREDR):
7321 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7322 err = read_local_ssp_oob_req(hdev, sk, cp);
7323 hci_dev_unlock(hdev);
7327 status = MGMT_STATUS_FAILED;
7330 eir_len = eir_append_data(rp->eir, eir_len,
7332 hdev->dev_class, 3);
7335 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7336 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7337 smp_generate_oob(hdev, hash, rand) < 0) {
7338 hci_dev_unlock(hdev);
7339 status = MGMT_STATUS_FAILED;
7343 /* This should return the active RPA, but since the RPA
7344 * is only programmed on demand, it is really hard to fill
7345 * this in at the moment. For now disallow retrieving
7346 * local out-of-band data when privacy is in use.
7348 * Returning the identity address will not help here since
7349 * pairing happens before the identity resolving key is
7350 * known and thus the connection establishment happens
7351 * based on the RPA and not the identity address.
7353 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7354 hci_dev_unlock(hdev);
7355 status = MGMT_STATUS_REJECTED;
7359 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7360 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7361 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7362 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7363 memcpy(addr, &hdev->static_addr, 6);
7366 memcpy(addr, &hdev->bdaddr, 6);
7370 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7371 addr, sizeof(addr));
7373 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7378 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7379 &role, sizeof(role));
7381 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7382 eir_len = eir_append_data(rp->eir, eir_len,
7384 hash, sizeof(hash));
7386 eir_len = eir_append_data(rp->eir, eir_len,
7388 rand, sizeof(rand));
7391 flags = mgmt_get_adv_discov_flags(hdev);
7393 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7394 flags |= LE_AD_NO_BREDR;
7396 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7397 &flags, sizeof(flags));
7401 hci_dev_unlock(hdev);
7403 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7405 status = MGMT_STATUS_SUCCESS;
7408 rp->type = cp->type;
7409 rp->eir_len = cpu_to_le16(eir_len);
7411 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7412 status, rp, sizeof(*rp) + eir_len);
7413 if (err < 0 || status)
7416 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7417 rp, sizeof(*rp) + eir_len,
7418 HCI_MGMT_OOB_DATA_EVENTS, sk);
7426 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7430 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7431 flags |= MGMT_ADV_FLAG_DISCOV;
7432 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7433 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7434 flags |= MGMT_ADV_FLAG_APPEARANCE;
7435 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7436 flags |= MGMT_ADV_PARAM_DURATION;
7437 flags |= MGMT_ADV_PARAM_TIMEOUT;
7438 flags |= MGMT_ADV_PARAM_INTERVALS;
7439 flags |= MGMT_ADV_PARAM_TX_POWER;
7440 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7442 /* In extended adv TX_POWER returned from Set Adv Param
7443 * will be always valid.
7445 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7446 ext_adv_capable(hdev))
7447 flags |= MGMT_ADV_FLAG_TX_POWER;
7449 if (ext_adv_capable(hdev)) {
7450 flags |= MGMT_ADV_FLAG_SEC_1M;
7451 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7452 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7454 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7455 flags |= MGMT_ADV_FLAG_SEC_2M;
7457 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7458 flags |= MGMT_ADV_FLAG_SEC_CODED;
7464 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7465 void *data, u16 data_len)
7467 struct mgmt_rp_read_adv_features *rp;
7470 struct adv_info *adv_instance;
7471 u32 supported_flags;
7474 bt_dev_dbg(hdev, "sock %p", sk);
7476 if (!lmp_le_capable(hdev))
7477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7478 MGMT_STATUS_REJECTED);
7480 /* Enabling the experimental LL Privay support disables support for
7483 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7484 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7485 MGMT_STATUS_NOT_SUPPORTED);
7489 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7490 rp = kmalloc(rp_len, GFP_ATOMIC);
7492 hci_dev_unlock(hdev);
7496 supported_flags = get_supported_adv_flags(hdev);
7498 rp->supported_flags = cpu_to_le32(supported_flags);
7499 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7500 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7501 rp->max_instances = hdev->le_num_of_adv_sets;
7502 rp->num_instances = hdev->adv_instance_cnt;
7504 instance = rp->instance;
7505 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7506 *instance = adv_instance->instance;
7510 hci_dev_unlock(hdev);
7512 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7513 MGMT_STATUS_SUCCESS, rp, rp_len);
7520 static u8 calculate_name_len(struct hci_dev *hdev)
7522 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7524 return append_local_name(hdev, buf, 0);
7527 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7530 u8 max_len = HCI_MAX_AD_LENGTH;
7533 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7534 MGMT_ADV_FLAG_LIMITED_DISCOV |
7535 MGMT_ADV_FLAG_MANAGED_FLAGS))
7538 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7541 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7542 max_len -= calculate_name_len(hdev);
7544 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7551 static bool flags_managed(u32 adv_flags)
7553 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7554 MGMT_ADV_FLAG_LIMITED_DISCOV |
7555 MGMT_ADV_FLAG_MANAGED_FLAGS);
7558 static bool tx_power_managed(u32 adv_flags)
7560 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7563 static bool name_managed(u32 adv_flags)
7565 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7568 static bool appearance_managed(u32 adv_flags)
7570 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7573 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7574 u8 len, bool is_adv_data)
7579 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7584 /* Make sure that the data is correctly formatted. */
7585 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7588 if (data[i + 1] == EIR_FLAGS &&
7589 (!is_adv_data || flags_managed(adv_flags)))
7592 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7595 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7598 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7601 if (data[i + 1] == EIR_APPEARANCE &&
7602 appearance_managed(adv_flags))
7605 /* If the current field length would exceed the total data
7606 * length, then it's invalid.
7608 if (i + cur_len >= len)
7615 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7617 u32 supported_flags, phy_flags;
7619 /* The current implementation only supports a subset of the specified
7620 * flags. Also need to check mutual exclusiveness of sec flags.
7622 supported_flags = get_supported_adv_flags(hdev);
7623 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7624 if (adv_flags & ~supported_flags ||
7625 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7631 static bool adv_busy(struct hci_dev *hdev)
7633 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7634 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7635 pending_find(MGMT_OP_SET_LE, hdev) ||
7636 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7637 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7640 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7643 struct mgmt_pending_cmd *cmd;
7644 struct mgmt_cp_add_advertising *cp;
7645 struct mgmt_rp_add_advertising rp;
7646 struct adv_info *adv_instance, *n;
7649 bt_dev_dbg(hdev, "status %d", status);
7653 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7655 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7657 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7658 if (!adv_instance->pending)
7662 adv_instance->pending = false;
7666 instance = adv_instance->instance;
7668 if (hdev->cur_adv_instance == instance)
7669 cancel_adv_timeout(hdev);
7671 hci_remove_adv_instance(hdev, instance);
7672 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7679 rp.instance = cp->instance;
7682 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7683 mgmt_status(status));
7685 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7686 mgmt_status(status), &rp, sizeof(rp));
7688 mgmt_pending_remove(cmd);
7691 hci_dev_unlock(hdev);
7694 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7695 void *data, u16 data_len)
7697 struct mgmt_cp_add_advertising *cp = data;
7698 struct mgmt_rp_add_advertising rp;
7701 u16 timeout, duration;
7702 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7703 u8 schedule_instance = 0;
7704 struct adv_info *next_instance;
7706 struct mgmt_pending_cmd *cmd;
7707 struct hci_request req;
7709 bt_dev_dbg(hdev, "sock %p", sk);
7711 status = mgmt_le_support(hdev);
7713 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7716 /* Enabling the experimental LL Privay support disables support for
7719 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7720 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7721 MGMT_STATUS_NOT_SUPPORTED);
7723 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7725 MGMT_STATUS_INVALID_PARAMS);
7727 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7728 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7729 MGMT_STATUS_INVALID_PARAMS);
7731 flags = __le32_to_cpu(cp->flags);
7732 timeout = __le16_to_cpu(cp->timeout);
7733 duration = __le16_to_cpu(cp->duration);
7735 if (!requested_adv_flags_are_valid(hdev, flags))
7736 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7737 MGMT_STATUS_INVALID_PARAMS);
7741 if (timeout && !hdev_is_powered(hdev)) {
7742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7743 MGMT_STATUS_REJECTED);
7747 if (adv_busy(hdev)) {
7748 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7753 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7754 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7755 cp->scan_rsp_len, false)) {
7756 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7757 MGMT_STATUS_INVALID_PARAMS);
7761 err = hci_add_adv_instance(hdev, cp->instance, flags,
7762 cp->adv_data_len, cp->data,
7764 cp->data + cp->adv_data_len,
7766 HCI_ADV_TX_POWER_NO_PREFERENCE,
7767 hdev->le_adv_min_interval,
7768 hdev->le_adv_max_interval);
7770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7771 MGMT_STATUS_FAILED);
7775 /* Only trigger an advertising added event if a new instance was
7778 if (hdev->adv_instance_cnt > prev_instance_cnt)
7779 mgmt_advertising_added(sk, hdev, cp->instance);
7781 if (hdev->cur_adv_instance == cp->instance) {
7782 /* If the currently advertised instance is being changed then
7783 * cancel the current advertising and schedule the next
7784 * instance. If there is only one instance then the overridden
7785 * advertising data will be visible right away.
7787 cancel_adv_timeout(hdev);
7789 next_instance = hci_get_next_instance(hdev, cp->instance);
7791 schedule_instance = next_instance->instance;
7792 } else if (!hdev->adv_instance_timeout) {
7793 /* Immediately advertise the new instance if no other
7794 * instance is currently being advertised.
7796 schedule_instance = cp->instance;
7799 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7800 * there is no instance to be advertised then we have no HCI
7801 * communication to make. Simply return.
7803 if (!hdev_is_powered(hdev) ||
7804 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7805 !schedule_instance) {
7806 rp.instance = cp->instance;
7807 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7808 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7812 /* We're good to go, update advertising data, parameters, and start
7815 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7822 hci_req_init(&req, hdev);
7824 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7827 err = hci_req_run(&req, add_advertising_complete);
7830 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7831 MGMT_STATUS_FAILED);
7832 mgmt_pending_remove(cmd);
7836 hci_dev_unlock(hdev);
7841 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7844 struct mgmt_pending_cmd *cmd;
7845 struct mgmt_cp_add_ext_adv_params *cp;
7846 struct mgmt_rp_add_ext_adv_params rp;
7847 struct adv_info *adv_instance;
7850 BT_DBG("%s", hdev->name);
7854 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7859 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7863 rp.instance = cp->instance;
7864 rp.tx_power = adv_instance->tx_power;
7866 /* While we're at it, inform userspace of the available space for this
7867 * advertisement, given the flags that will be used.
7869 flags = __le32_to_cpu(cp->flags);
7870 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7871 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7874 /* If this advertisement was previously advertising and we
7875 * failed to update it, we signal that it has been removed and
7876 * delete its structure
7878 if (!adv_instance->pending)
7879 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7881 hci_remove_adv_instance(hdev, cp->instance);
7883 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7884 mgmt_status(status));
7887 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7888 mgmt_status(status), &rp, sizeof(rp));
7893 mgmt_pending_remove(cmd);
7895 hci_dev_unlock(hdev);
7898 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7899 void *data, u16 data_len)
7901 struct mgmt_cp_add_ext_adv_params *cp = data;
7902 struct mgmt_rp_add_ext_adv_params rp;
7903 struct mgmt_pending_cmd *cmd = NULL;
7904 struct adv_info *adv_instance;
7905 struct hci_request req;
7906 u32 flags, min_interval, max_interval;
7907 u16 timeout, duration;
7912 BT_DBG("%s", hdev->name);
7914 status = mgmt_le_support(hdev);
7916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7919 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7921 MGMT_STATUS_INVALID_PARAMS);
7923 /* The purpose of breaking add_advertising into two separate MGMT calls
7924 * for params and data is to allow more parameters to be added to this
7925 * structure in the future. For this reason, we verify that we have the
7926 * bare minimum structure we know of when the interface was defined. Any
7927 * extra parameters we don't know about will be ignored in this request.
7929 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7931 MGMT_STATUS_INVALID_PARAMS);
7933 flags = __le32_to_cpu(cp->flags);
7935 if (!requested_adv_flags_are_valid(hdev, flags))
7936 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7937 MGMT_STATUS_INVALID_PARAMS);
7941 /* In new interface, we require that we are powered to register */
7942 if (!hdev_is_powered(hdev)) {
7943 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7944 MGMT_STATUS_REJECTED);
7948 if (adv_busy(hdev)) {
7949 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7954 /* Parse defined parameters from request, use defaults otherwise */
7955 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7956 __le16_to_cpu(cp->timeout) : 0;
7958 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7959 __le16_to_cpu(cp->duration) :
7960 hdev->def_multi_adv_rotation_duration;
7962 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7963 __le32_to_cpu(cp->min_interval) :
7964 hdev->le_adv_min_interval;
7966 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7967 __le32_to_cpu(cp->max_interval) :
7968 hdev->le_adv_max_interval;
7970 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7972 HCI_ADV_TX_POWER_NO_PREFERENCE;
7974 /* Create advertising instance with no advertising or response data */
7975 err = hci_add_adv_instance(hdev, cp->instance, flags,
7976 0, NULL, 0, NULL, timeout, duration,
7977 tx_power, min_interval, max_interval);
7980 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7981 MGMT_STATUS_FAILED);
7985 /* Submit request for advertising params if ext adv available */
7986 if (ext_adv_capable(hdev)) {
7987 hci_req_init(&req, hdev);
7988 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7990 /* Updating parameters of an active instance will return a
7991 * Command Disallowed error, so we must first disable the
7992 * instance if it is active.
7994 if (!adv_instance->pending)
7995 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7997 __hci_req_setup_ext_adv_instance(&req, cp->instance);
7999 err = hci_req_run(&req, add_ext_adv_params_complete);
8002 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8003 hdev, data, data_len);
8006 hci_remove_adv_instance(hdev, cp->instance);
8011 rp.instance = cp->instance;
8012 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8013 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8014 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8015 err = mgmt_cmd_complete(sk, hdev->id,
8016 MGMT_OP_ADD_EXT_ADV_PARAMS,
8017 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8021 hci_dev_unlock(hdev);
8026 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8029 struct mgmt_cp_add_ext_adv_data *cp = data;
8030 struct mgmt_rp_add_ext_adv_data rp;
8031 u8 schedule_instance = 0;
8032 struct adv_info *next_instance;
8033 struct adv_info *adv_instance;
8035 struct mgmt_pending_cmd *cmd;
8036 struct hci_request req;
8038 BT_DBG("%s", hdev->name);
8042 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8044 if (!adv_instance) {
8045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8046 MGMT_STATUS_INVALID_PARAMS);
8050 /* In new interface, we require that we are powered to register */
8051 if (!hdev_is_powered(hdev)) {
8052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8053 MGMT_STATUS_REJECTED);
8054 goto clear_new_instance;
8057 if (adv_busy(hdev)) {
8058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8060 goto clear_new_instance;
8063 /* Validate new data */
8064 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8065 cp->adv_data_len, true) ||
8066 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8067 cp->adv_data_len, cp->scan_rsp_len, false)) {
8068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8069 MGMT_STATUS_INVALID_PARAMS);
8070 goto clear_new_instance;
8073 /* Set the data in the advertising instance */
8074 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8075 cp->data, cp->scan_rsp_len,
8076 cp->data + cp->adv_data_len);
8078 /* We're good to go, update advertising data, parameters, and start
8082 hci_req_init(&req, hdev);
8084 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8086 if (ext_adv_capable(hdev)) {
8087 __hci_req_update_adv_data(&req, cp->instance);
8088 __hci_req_update_scan_rsp_data(&req, cp->instance);
8089 __hci_req_enable_ext_advertising(&req, cp->instance);
8092 /* If using software rotation, determine next instance to use */
8094 if (hdev->cur_adv_instance == cp->instance) {
8095 /* If the currently advertised instance is being changed
8096 * then cancel the current advertising and schedule the
8097 * next instance. If there is only one instance then the
8098 * overridden advertising data will be visible right
8101 cancel_adv_timeout(hdev);
8103 next_instance = hci_get_next_instance(hdev,
8106 schedule_instance = next_instance->instance;
8107 } else if (!hdev->adv_instance_timeout) {
8108 /* Immediately advertise the new instance if no other
8109 * instance is currently being advertised.
8111 schedule_instance = cp->instance;
8114 /* If the HCI_ADVERTISING flag is set or there is no instance to
8115 * be advertised then we have no HCI communication to make.
8118 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8119 !schedule_instance) {
8120 if (adv_instance->pending) {
8121 mgmt_advertising_added(sk, hdev, cp->instance);
8122 adv_instance->pending = false;
8124 rp.instance = cp->instance;
8125 err = mgmt_cmd_complete(sk, hdev->id,
8126 MGMT_OP_ADD_EXT_ADV_DATA,
8127 MGMT_STATUS_SUCCESS, &rp,
8132 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8136 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8140 goto clear_new_instance;
8144 err = hci_req_run(&req, add_advertising_complete);
8147 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8148 MGMT_STATUS_FAILED);
8149 mgmt_pending_remove(cmd);
8150 goto clear_new_instance;
8153 /* We were successful in updating data, so trigger advertising_added
8154 * event if this is an instance that wasn't previously advertising. If
8155 * a failure occurs in the requests we initiated, we will remove the
8156 * instance again in add_advertising_complete
8158 if (adv_instance->pending)
8159 mgmt_advertising_added(sk, hdev, cp->instance);
8164 hci_remove_adv_instance(hdev, cp->instance);
8167 hci_dev_unlock(hdev);
8172 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8175 struct mgmt_pending_cmd *cmd;
8176 struct mgmt_cp_remove_advertising *cp;
8177 struct mgmt_rp_remove_advertising rp;
8179 bt_dev_dbg(hdev, "status %d", status);
8183 /* A failure status here only means that we failed to disable
8184 * advertising. Otherwise, the advertising instance has been removed,
8185 * so report success.
8187 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8192 rp.instance = cp->instance;
8194 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8196 mgmt_pending_remove(cmd);
8199 hci_dev_unlock(hdev);
8202 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8203 void *data, u16 data_len)
8205 struct mgmt_cp_remove_advertising *cp = data;
8206 struct mgmt_rp_remove_advertising rp;
8207 struct mgmt_pending_cmd *cmd;
8208 struct hci_request req;
8211 bt_dev_dbg(hdev, "sock %p", sk);
8213 /* Enabling the experimental LL Privay support disables support for
8216 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8218 MGMT_STATUS_NOT_SUPPORTED);
8222 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8223 err = mgmt_cmd_status(sk, hdev->id,
8224 MGMT_OP_REMOVE_ADVERTISING,
8225 MGMT_STATUS_INVALID_PARAMS);
8229 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8230 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8231 pending_find(MGMT_OP_SET_LE, hdev)) {
8232 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8237 if (list_empty(&hdev->adv_instances)) {
8238 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8239 MGMT_STATUS_INVALID_PARAMS);
8243 hci_req_init(&req, hdev);
8245 /* If we use extended advertising, instance is disabled and removed */
8246 if (ext_adv_capable(hdev)) {
8247 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8248 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8251 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8253 if (list_empty(&hdev->adv_instances))
8254 __hci_req_disable_advertising(&req);
8256 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8257 * flag is set or the device isn't powered then we have no HCI
8258 * communication to make. Simply return.
8260 if (skb_queue_empty(&req.cmd_q) ||
8261 !hdev_is_powered(hdev) ||
8262 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8263 hci_req_purge(&req);
8264 rp.instance = cp->instance;
8265 err = mgmt_cmd_complete(sk, hdev->id,
8266 MGMT_OP_REMOVE_ADVERTISING,
8267 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8271 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8278 err = hci_req_run(&req, remove_advertising_complete);
8280 mgmt_pending_remove(cmd);
8283 hci_dev_unlock(hdev);
8288 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8289 void *data, u16 data_len)
8291 struct mgmt_cp_get_adv_size_info *cp = data;
8292 struct mgmt_rp_get_adv_size_info rp;
8293 u32 flags, supported_flags;
8296 bt_dev_dbg(hdev, "sock %p", sk);
8298 if (!lmp_le_capable(hdev))
8299 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8300 MGMT_STATUS_REJECTED);
8302 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8304 MGMT_STATUS_INVALID_PARAMS);
8306 flags = __le32_to_cpu(cp->flags);
8308 /* The current implementation only supports a subset of the specified
8311 supported_flags = get_supported_adv_flags(hdev);
8312 if (flags & ~supported_flags)
8313 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8314 MGMT_STATUS_INVALID_PARAMS);
8316 rp.instance = cp->instance;
8317 rp.flags = cp->flags;
8318 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8319 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8321 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8322 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8327 static const struct hci_mgmt_handler mgmt_handlers[] = {
8328 { NULL }, /* 0x0000 (no command) */
8329 { read_version, MGMT_READ_VERSION_SIZE,
8331 HCI_MGMT_UNTRUSTED },
8332 { read_commands, MGMT_READ_COMMANDS_SIZE,
8334 HCI_MGMT_UNTRUSTED },
8335 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8337 HCI_MGMT_UNTRUSTED },
8338 { read_controller_info, MGMT_READ_INFO_SIZE,
8339 HCI_MGMT_UNTRUSTED },
8340 { set_powered, MGMT_SETTING_SIZE },
8341 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8342 { set_connectable, MGMT_SETTING_SIZE },
8343 { set_fast_connectable, MGMT_SETTING_SIZE },
8344 { set_bondable, MGMT_SETTING_SIZE },
8345 { set_link_security, MGMT_SETTING_SIZE },
8346 { set_ssp, MGMT_SETTING_SIZE },
8347 { set_hs, MGMT_SETTING_SIZE },
8348 { set_le, MGMT_SETTING_SIZE },
8349 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8350 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8351 { add_uuid, MGMT_ADD_UUID_SIZE },
8352 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8353 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8355 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8357 { disconnect, MGMT_DISCONNECT_SIZE },
8358 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8359 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8360 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8361 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8362 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8363 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8364 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8365 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8366 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8367 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8368 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8369 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8370 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8372 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8373 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8374 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8375 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8376 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8377 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8378 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8379 { set_advertising, MGMT_SETTING_SIZE },
8380 { set_bredr, MGMT_SETTING_SIZE },
8381 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8382 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8383 { set_secure_conn, MGMT_SETTING_SIZE },
8384 { set_debug_keys, MGMT_SETTING_SIZE },
8385 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8386 { load_irks, MGMT_LOAD_IRKS_SIZE,
8388 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8389 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8390 { add_device, MGMT_ADD_DEVICE_SIZE },
8391 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8392 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8394 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8396 HCI_MGMT_UNTRUSTED },
8397 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8398 HCI_MGMT_UNCONFIGURED |
8399 HCI_MGMT_UNTRUSTED },
8400 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8401 HCI_MGMT_UNCONFIGURED },
8402 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8403 HCI_MGMT_UNCONFIGURED },
8404 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8406 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8407 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8409 HCI_MGMT_UNTRUSTED },
8410 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8411 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8413 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8414 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8415 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8416 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8417 HCI_MGMT_UNTRUSTED },
8418 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8419 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8420 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8421 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8423 { set_wideband_speech, MGMT_SETTING_SIZE },
8424 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8425 HCI_MGMT_UNTRUSTED },
8426 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8427 HCI_MGMT_UNTRUSTED |
8428 HCI_MGMT_HDEV_OPTIONAL },
8429 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8431 HCI_MGMT_HDEV_OPTIONAL },
8432 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8433 HCI_MGMT_UNTRUSTED },
8434 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8436 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8437 HCI_MGMT_UNTRUSTED },
8438 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8440 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8441 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8442 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8443 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8445 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8446 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8448 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8450 { add_adv_patterns_monitor_rssi,
8451 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8455 void mgmt_index_added(struct hci_dev *hdev)
8457 struct mgmt_ev_ext_index ev;
8459 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8462 switch (hdev->dev_type) {
8464 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8465 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8466 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8469 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8470 HCI_MGMT_INDEX_EVENTS);
8483 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8484 HCI_MGMT_EXT_INDEX_EVENTS);
8487 void mgmt_index_removed(struct hci_dev *hdev)
8489 struct mgmt_ev_ext_index ev;
8490 u8 status = MGMT_STATUS_INVALID_INDEX;
8492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8495 switch (hdev->dev_type) {
8497 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8499 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8500 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8501 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8504 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8505 HCI_MGMT_INDEX_EVENTS);
8518 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8519 HCI_MGMT_EXT_INDEX_EVENTS);
8522 /* This function requires the caller holds hdev->lock */
8523 static void restart_le_actions(struct hci_dev *hdev)
8525 struct hci_conn_params *p;
8527 list_for_each_entry(p, &hdev->le_conn_params, list) {
8528 /* Needed for AUTO_OFF case where might not "really"
8529 * have been powered off.
8531 list_del_init(&p->action);
8533 switch (p->auto_connect) {
8534 case HCI_AUTO_CONN_DIRECT:
8535 case HCI_AUTO_CONN_ALWAYS:
8536 list_add(&p->action, &hdev->pend_le_conns);
8538 case HCI_AUTO_CONN_REPORT:
8539 list_add(&p->action, &hdev->pend_le_reports);
8547 void mgmt_power_on(struct hci_dev *hdev, int err)
8549 struct cmd_lookup match = { NULL, hdev };
8551 bt_dev_dbg(hdev, "err %d", err);
8556 restart_le_actions(hdev);
8557 hci_update_background_scan(hdev);
8560 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8562 new_settings(hdev, match.sk);
8567 hci_dev_unlock(hdev);
8570 void __mgmt_power_off(struct hci_dev *hdev)
8572 struct cmd_lookup match = { NULL, hdev };
8573 u8 status, zero_cod[] = { 0, 0, 0 };
8575 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8577 /* If the power off is because of hdev unregistration let
8578 * use the appropriate INVALID_INDEX status. Otherwise use
8579 * NOT_POWERED. We cover both scenarios here since later in
8580 * mgmt_index_removed() any hci_conn callbacks will have already
8581 * been triggered, potentially causing misleading DISCONNECTED
8584 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8585 status = MGMT_STATUS_INVALID_INDEX;
8587 status = MGMT_STATUS_NOT_POWERED;
8589 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8591 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8592 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8593 zero_cod, sizeof(zero_cod),
8594 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8595 ext_info_changed(hdev, NULL);
8598 new_settings(hdev, match.sk);
8604 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8606 struct mgmt_pending_cmd *cmd;
8609 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8613 if (err == -ERFKILL)
8614 status = MGMT_STATUS_RFKILLED;
8616 status = MGMT_STATUS_FAILED;
8618 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8620 mgmt_pending_remove(cmd);
8623 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8626 struct mgmt_ev_new_link_key ev;
8628 memset(&ev, 0, sizeof(ev));
8630 ev.store_hint = persistent;
8631 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8632 ev.key.addr.type = BDADDR_BREDR;
8633 ev.key.type = key->type;
8634 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8635 ev.key.pin_len = key->pin_len;
8637 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8640 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8642 switch (ltk->type) {
8645 if (ltk->authenticated)
8646 return MGMT_LTK_AUTHENTICATED;
8647 return MGMT_LTK_UNAUTHENTICATED;
8649 if (ltk->authenticated)
8650 return MGMT_LTK_P256_AUTH;
8651 return MGMT_LTK_P256_UNAUTH;
8652 case SMP_LTK_P256_DEBUG:
8653 return MGMT_LTK_P256_DEBUG;
8656 return MGMT_LTK_UNAUTHENTICATED;
8659 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8661 struct mgmt_ev_new_long_term_key ev;
8663 memset(&ev, 0, sizeof(ev));
8665 /* Devices using resolvable or non-resolvable random addresses
8666 * without providing an identity resolving key don't require
8667 * to store long term keys. Their addresses will change the
8670 * Only when a remote device provides an identity address
8671 * make sure the long term key is stored. If the remote
8672 * identity is known, the long term keys are internally
8673 * mapped to the identity address. So allow static random
8674 * and public addresses here.
8676 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8677 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8678 ev.store_hint = 0x00;
8680 ev.store_hint = persistent;
8682 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8683 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8684 ev.key.type = mgmt_ltk_type(key);
8685 ev.key.enc_size = key->enc_size;
8686 ev.key.ediv = key->ediv;
8687 ev.key.rand = key->rand;
8689 if (key->type == SMP_LTK)
8692 /* Make sure we copy only the significant bytes based on the
8693 * encryption key size, and set the rest of the value to zeroes.
8695 memcpy(ev.key.val, key->val, key->enc_size);
8696 memset(ev.key.val + key->enc_size, 0,
8697 sizeof(ev.key.val) - key->enc_size);
8699 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8702 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8704 struct mgmt_ev_new_irk ev;
8706 memset(&ev, 0, sizeof(ev));
8708 ev.store_hint = persistent;
8710 bacpy(&ev.rpa, &irk->rpa);
8711 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8712 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8713 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8715 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8718 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8721 struct mgmt_ev_new_csrk ev;
8723 memset(&ev, 0, sizeof(ev));
8725 /* Devices using resolvable or non-resolvable random addresses
8726 * without providing an identity resolving key don't require
8727 * to store signature resolving keys. Their addresses will change
8728 * the next time around.
8730 * Only when a remote device provides an identity address
8731 * make sure the signature resolving key is stored. So allow
8732 * static random and public addresses here.
8734 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8735 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8736 ev.store_hint = 0x00;
8738 ev.store_hint = persistent;
8740 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8741 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8742 ev.key.type = csrk->type;
8743 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8745 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8748 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8749 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8750 u16 max_interval, u16 latency, u16 timeout)
8752 struct mgmt_ev_new_conn_param ev;
8754 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8757 memset(&ev, 0, sizeof(ev));
8758 bacpy(&ev.addr.bdaddr, bdaddr);
8759 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8760 ev.store_hint = store_hint;
8761 ev.min_interval = cpu_to_le16(min_interval);
8762 ev.max_interval = cpu_to_le16(max_interval);
8763 ev.latency = cpu_to_le16(latency);
8764 ev.timeout = cpu_to_le16(timeout);
8766 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8769 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8770 u32 flags, u8 *name, u8 name_len)
8773 struct mgmt_ev_device_connected *ev = (void *) buf;
8776 bacpy(&ev->addr.bdaddr, &conn->dst);
8777 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8779 ev->flags = __cpu_to_le32(flags);
8781 /* We must ensure that the EIR Data fields are ordered and
8782 * unique. Keep it simple for now and avoid the problem by not
8783 * adding any BR/EDR data to the LE adv.
8785 if (conn->le_adv_data_len > 0) {
8786 memcpy(&ev->eir[eir_len],
8787 conn->le_adv_data, conn->le_adv_data_len);
8788 eir_len = conn->le_adv_data_len;
8791 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8794 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8795 eir_len = eir_append_data(ev->eir, eir_len,
8797 conn->dev_class, 3);
8800 ev->eir_len = cpu_to_le16(eir_len);
8802 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8803 sizeof(*ev) + eir_len, NULL);
8806 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8808 struct sock **sk = data;
8810 cmd->cmd_complete(cmd, 0);
8815 mgmt_pending_remove(cmd);
8818 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8820 struct hci_dev *hdev = data;
8821 struct mgmt_cp_unpair_device *cp = cmd->param;
8823 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8825 cmd->cmd_complete(cmd, 0);
8826 mgmt_pending_remove(cmd);
8829 bool mgmt_powering_down(struct hci_dev *hdev)
8831 struct mgmt_pending_cmd *cmd;
8832 struct mgmt_mode *cp;
8834 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8845 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8846 u8 link_type, u8 addr_type, u8 reason,
8847 bool mgmt_connected)
8849 struct mgmt_ev_device_disconnected ev;
8850 struct sock *sk = NULL;
8852 /* The connection is still in hci_conn_hash so test for 1
8853 * instead of 0 to know if this is the last one.
8855 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8856 cancel_delayed_work(&hdev->power_off);
8857 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8860 if (!mgmt_connected)
8863 if (link_type != ACL_LINK && link_type != LE_LINK)
8866 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8868 bacpy(&ev.addr.bdaddr, bdaddr);
8869 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8872 /* Report disconnects due to suspend */
8873 if (hdev->suspended)
8874 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8876 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8881 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8885 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8886 u8 link_type, u8 addr_type, u8 status)
8888 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8889 struct mgmt_cp_disconnect *cp;
8890 struct mgmt_pending_cmd *cmd;
8892 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8895 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8901 if (bacmp(bdaddr, &cp->addr.bdaddr))
8904 if (cp->addr.type != bdaddr_type)
8907 cmd->cmd_complete(cmd, mgmt_status(status));
8908 mgmt_pending_remove(cmd);
8911 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8912 u8 addr_type, u8 status)
8914 struct mgmt_ev_connect_failed ev;
8916 /* The connection is still in hci_conn_hash so test for 1
8917 * instead of 0 to know if this is the last one.
8919 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8920 cancel_delayed_work(&hdev->power_off);
8921 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8924 bacpy(&ev.addr.bdaddr, bdaddr);
8925 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8926 ev.status = mgmt_status(status);
8928 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8931 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8933 struct mgmt_ev_pin_code_request ev;
8935 bacpy(&ev.addr.bdaddr, bdaddr);
8936 ev.addr.type = BDADDR_BREDR;
8939 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8942 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8945 struct mgmt_pending_cmd *cmd;
8947 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8951 cmd->cmd_complete(cmd, mgmt_status(status));
8952 mgmt_pending_remove(cmd);
8955 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8958 struct mgmt_pending_cmd *cmd;
8960 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8964 cmd->cmd_complete(cmd, mgmt_status(status));
8965 mgmt_pending_remove(cmd);
8968 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8969 u8 link_type, u8 addr_type, u32 value,
8972 struct mgmt_ev_user_confirm_request ev;
8974 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8976 bacpy(&ev.addr.bdaddr, bdaddr);
8977 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8978 ev.confirm_hint = confirm_hint;
8979 ev.value = cpu_to_le32(value);
8981 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8985 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8986 u8 link_type, u8 addr_type)
8988 struct mgmt_ev_user_passkey_request ev;
8990 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8992 bacpy(&ev.addr.bdaddr, bdaddr);
8993 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8995 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8999 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9000 u8 link_type, u8 addr_type, u8 status,
9003 struct mgmt_pending_cmd *cmd;
9005 cmd = pending_find(opcode, hdev);
9009 cmd->cmd_complete(cmd, mgmt_status(status));
9010 mgmt_pending_remove(cmd);
9015 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9016 u8 link_type, u8 addr_type, u8 status)
9018 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9019 status, MGMT_OP_USER_CONFIRM_REPLY);
9022 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9023 u8 link_type, u8 addr_type, u8 status)
9025 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9027 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9030 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9031 u8 link_type, u8 addr_type, u8 status)
9033 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9034 status, MGMT_OP_USER_PASSKEY_REPLY);
9037 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9038 u8 link_type, u8 addr_type, u8 status)
9040 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9042 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9045 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9046 u8 link_type, u8 addr_type, u32 passkey,
9049 struct mgmt_ev_passkey_notify ev;
9051 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9053 bacpy(&ev.addr.bdaddr, bdaddr);
9054 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9055 ev.passkey = __cpu_to_le32(passkey);
9056 ev.entered = entered;
9058 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9061 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9063 struct mgmt_ev_auth_failed ev;
9064 struct mgmt_pending_cmd *cmd;
9065 u8 status = mgmt_status(hci_status);
9067 bacpy(&ev.addr.bdaddr, &conn->dst);
9068 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9071 cmd = find_pairing(conn);
9073 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9074 cmd ? cmd->sk : NULL);
9077 cmd->cmd_complete(cmd, status);
9078 mgmt_pending_remove(cmd);
9082 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9084 struct cmd_lookup match = { NULL, hdev };
9088 u8 mgmt_err = mgmt_status(status);
9089 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9090 cmd_status_rsp, &mgmt_err);
9094 if (test_bit(HCI_AUTH, &hdev->flags))
9095 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9097 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9099 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9103 new_settings(hdev, match.sk);
9109 static void clear_eir(struct hci_request *req)
9111 struct hci_dev *hdev = req->hdev;
9112 struct hci_cp_write_eir cp;
9114 if (!lmp_ext_inq_capable(hdev))
9117 memset(hdev->eir, 0, sizeof(hdev->eir));
9119 memset(&cp, 0, sizeof(cp));
9121 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9124 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9126 struct cmd_lookup match = { NULL, hdev };
9127 struct hci_request req;
9128 bool changed = false;
9131 u8 mgmt_err = mgmt_status(status);
9133 if (enable && hci_dev_test_and_clear_flag(hdev,
9135 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9136 new_settings(hdev, NULL);
9139 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9145 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9147 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9149 changed = hci_dev_test_and_clear_flag(hdev,
9152 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9155 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9158 new_settings(hdev, match.sk);
9163 hci_req_init(&req, hdev);
9165 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9166 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9167 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9168 sizeof(enable), &enable);
9169 __hci_req_update_eir(&req);
9174 hci_req_run(&req, NULL);
9177 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9179 struct cmd_lookup *match = data;
9181 if (match->sk == NULL) {
9182 match->sk = cmd->sk;
9183 sock_hold(match->sk);
9187 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9190 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9192 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9193 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9194 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9197 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9198 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9199 ext_info_changed(hdev, NULL);
9206 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9208 struct mgmt_cp_set_local_name ev;
9209 struct mgmt_pending_cmd *cmd;
9214 memset(&ev, 0, sizeof(ev));
9215 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9216 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9218 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9220 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9222 /* If this is a HCI command related to powering on the
9223 * HCI dev don't send any mgmt signals.
9225 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9229 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9230 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9231 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9234 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9238 for (i = 0; i < uuid_count; i++) {
9239 if (!memcmp(uuid, uuids[i], 16))
9246 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9250 while (parsed < eir_len) {
9251 u8 field_len = eir[0];
9258 if (eir_len - parsed < field_len + 1)
9262 case EIR_UUID16_ALL:
9263 case EIR_UUID16_SOME:
9264 for (i = 0; i + 3 <= field_len; i += 2) {
9265 memcpy(uuid, bluetooth_base_uuid, 16);
9266 uuid[13] = eir[i + 3];
9267 uuid[12] = eir[i + 2];
9268 if (has_uuid(uuid, uuid_count, uuids))
9272 case EIR_UUID32_ALL:
9273 case EIR_UUID32_SOME:
9274 for (i = 0; i + 5 <= field_len; i += 4) {
9275 memcpy(uuid, bluetooth_base_uuid, 16);
9276 uuid[15] = eir[i + 5];
9277 uuid[14] = eir[i + 4];
9278 uuid[13] = eir[i + 3];
9279 uuid[12] = eir[i + 2];
9280 if (has_uuid(uuid, uuid_count, uuids))
9284 case EIR_UUID128_ALL:
9285 case EIR_UUID128_SOME:
9286 for (i = 0; i + 17 <= field_len; i += 16) {
9287 memcpy(uuid, eir + i + 2, 16);
9288 if (has_uuid(uuid, uuid_count, uuids))
9294 parsed += field_len + 1;
9295 eir += field_len + 1;
9301 static void restart_le_scan(struct hci_dev *hdev)
9303 /* If controller is not scanning we are done. */
9304 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9307 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9308 hdev->discovery.scan_start +
9309 hdev->discovery.scan_duration))
9312 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9313 DISCOV_LE_RESTART_DELAY);
9316 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9317 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9319 /* If a RSSI threshold has been specified, and
9320 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9321 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9322 * is set, let it through for further processing, as we might need to
9325 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9326 * the results are also dropped.
9328 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9329 (rssi == HCI_RSSI_INVALID ||
9330 (rssi < hdev->discovery.rssi &&
9331 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9334 if (hdev->discovery.uuid_count != 0) {
9335 /* If a list of UUIDs is provided in filter, results with no
9336 * matching UUID should be dropped.
9338 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9339 hdev->discovery.uuids) &&
9340 !eir_has_uuids(scan_rsp, scan_rsp_len,
9341 hdev->discovery.uuid_count,
9342 hdev->discovery.uuids))
9346 /* If duplicate filtering does not report RSSI changes, then restart
9347 * scanning to ensure updated result with updated RSSI values.
9349 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9350 restart_le_scan(hdev);
9352 /* Validate RSSI value against the RSSI threshold once more. */
9353 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9354 rssi < hdev->discovery.rssi)
9361 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9362 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9363 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9366 struct mgmt_ev_device_found *ev = (void *)buf;
9369 /* Don't send events for a non-kernel initiated discovery. With
9370 * LE one exception is if we have pend_le_reports > 0 in which
9371 * case we're doing passive scanning and want these events.
9373 if (!hci_discovery_active(hdev)) {
9374 if (link_type == ACL_LINK)
9376 if (link_type == LE_LINK &&
9377 list_empty(&hdev->pend_le_reports) &&
9378 !hci_is_adv_monitoring(hdev)) {
9383 if (hdev->discovery.result_filtering) {
9384 /* We are using service discovery */
9385 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9390 if (hdev->discovery.limited) {
9391 /* Check for limited discoverable bit */
9393 if (!(dev_class[1] & 0x20))
9396 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9397 if (!flags || !(flags[0] & LE_AD_LIMITED))
9402 /* Make sure that the buffer is big enough. The 5 extra bytes
9403 * are for the potential CoD field.
9405 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9408 memset(buf, 0, sizeof(buf));
9410 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9411 * RSSI value was reported as 0 when not available. This behavior
9412 * is kept when using device discovery. This is required for full
9413 * backwards compatibility with the API.
9415 * However when using service discovery, the value 127 will be
9416 * returned when the RSSI is not available.
9418 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9419 link_type == ACL_LINK)
9422 bacpy(&ev->addr.bdaddr, bdaddr);
9423 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9425 ev->flags = cpu_to_le32(flags);
9428 /* Copy EIR or advertising data into event */
9429 memcpy(ev->eir, eir, eir_len);
9431 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9433 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9436 if (scan_rsp_len > 0)
9437 /* Append scan response data to event */
9438 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9440 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9441 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9443 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9446 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9447 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9449 struct mgmt_ev_device_found *ev;
9450 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9453 ev = (struct mgmt_ev_device_found *) buf;
9455 memset(buf, 0, sizeof(buf));
9457 bacpy(&ev->addr.bdaddr, bdaddr);
9458 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9461 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9464 ev->eir_len = cpu_to_le16(eir_len);
9466 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9469 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9471 struct mgmt_ev_discovering ev;
9473 bt_dev_dbg(hdev, "discovering %u", discovering);
9475 memset(&ev, 0, sizeof(ev));
9476 ev.type = hdev->discovery.type;
9477 ev.discovering = discovering;
9479 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9482 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9484 struct mgmt_ev_controller_suspend ev;
9486 ev.suspend_state = state;
9487 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9490 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9493 struct mgmt_ev_controller_resume ev;
9495 ev.wake_reason = reason;
9497 bacpy(&ev.addr.bdaddr, bdaddr);
9498 ev.addr.type = addr_type;
9500 memset(&ev.addr, 0, sizeof(ev.addr));
9503 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9506 static struct hci_mgmt_chan chan = {
9507 .channel = HCI_CHANNEL_CONTROL,
9508 .handler_count = ARRAY_SIZE(mgmt_handlers),
9509 .handlers = mgmt_handlers,
9510 .hdev_init = mgmt_init_hdev,
9515 return hci_mgmt_chan_register(&chan);
9518 void mgmt_exit(void)
9520 hci_mgmt_chan_unregister(&chan);