2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 19
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_LINK_SECURITY,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_CONTROLLER_CAP,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 MGMT_OP_ADD_EXT_ADV_PARAMS,
126 MGMT_OP_ADD_EXT_ADV_DATA,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
130 static const u16 mgmt_events[] = {
131 MGMT_EV_CONTROLLER_ERROR,
133 MGMT_EV_INDEX_REMOVED,
134 MGMT_EV_NEW_SETTINGS,
135 MGMT_EV_CLASS_OF_DEV_CHANGED,
136 MGMT_EV_LOCAL_NAME_CHANGED,
137 MGMT_EV_NEW_LINK_KEY,
138 MGMT_EV_NEW_LONG_TERM_KEY,
139 MGMT_EV_DEVICE_CONNECTED,
140 MGMT_EV_DEVICE_DISCONNECTED,
141 MGMT_EV_CONNECT_FAILED,
142 MGMT_EV_PIN_CODE_REQUEST,
143 MGMT_EV_USER_CONFIRM_REQUEST,
144 MGMT_EV_USER_PASSKEY_REQUEST,
146 MGMT_EV_DEVICE_FOUND,
148 MGMT_EV_DEVICE_BLOCKED,
149 MGMT_EV_DEVICE_UNBLOCKED,
150 MGMT_EV_DEVICE_UNPAIRED,
151 MGMT_EV_PASSKEY_NOTIFY,
154 MGMT_EV_DEVICE_ADDED,
155 MGMT_EV_DEVICE_REMOVED,
156 MGMT_EV_NEW_CONN_PARAM,
157 MGMT_EV_UNCONF_INDEX_ADDED,
158 MGMT_EV_UNCONF_INDEX_REMOVED,
159 MGMT_EV_NEW_CONFIG_OPTIONS,
160 MGMT_EV_EXT_INDEX_ADDED,
161 MGMT_EV_EXT_INDEX_REMOVED,
162 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
163 MGMT_EV_ADVERTISING_ADDED,
164 MGMT_EV_ADVERTISING_REMOVED,
165 MGMT_EV_EXT_INFO_CHANGED,
166 MGMT_EV_PHY_CONFIGURATION_CHANGED,
167 MGMT_EV_EXP_FEATURE_CHANGED,
168 MGMT_EV_DEVICE_FLAGS_CHANGED,
169 MGMT_EV_CONTROLLER_SUSPEND,
170 MGMT_EV_CONTROLLER_RESUME,
173 static const u16 mgmt_untrusted_commands[] = {
174 MGMT_OP_READ_INDEX_LIST,
176 MGMT_OP_READ_UNCONF_INDEX_LIST,
177 MGMT_OP_READ_CONFIG_INFO,
178 MGMT_OP_READ_EXT_INDEX_LIST,
179 MGMT_OP_READ_EXT_INFO,
180 MGMT_OP_READ_CONTROLLER_CAP,
181 MGMT_OP_READ_EXP_FEATURES_INFO,
182 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
183 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
186 static const u16 mgmt_untrusted_events[] = {
188 MGMT_EV_INDEX_REMOVED,
189 MGMT_EV_NEW_SETTINGS,
190 MGMT_EV_CLASS_OF_DEV_CHANGED,
191 MGMT_EV_LOCAL_NAME_CHANGED,
192 MGMT_EV_UNCONF_INDEX_ADDED,
193 MGMT_EV_UNCONF_INDEX_REMOVED,
194 MGMT_EV_NEW_CONFIG_OPTIONS,
195 MGMT_EV_EXT_INDEX_ADDED,
196 MGMT_EV_EXT_INDEX_REMOVED,
197 MGMT_EV_EXT_INFO_CHANGED,
198 MGMT_EV_EXP_FEATURE_CHANGED,
199 MGMT_EV_ADV_MONITOR_ADDED,
200 MGMT_EV_ADV_MONITOR_REMOVED,
203 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
205 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
206 "\x00\x00\x00\x00\x00\x00\x00\x00"
208 /* HCI to MGMT error code conversion table */
209 static const u8 mgmt_status_table[] = {
211 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
212 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
213 MGMT_STATUS_FAILED, /* Hardware Failure */
214 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
215 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
216 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
217 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
218 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
219 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
220 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
221 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
222 MGMT_STATUS_BUSY, /* Command Disallowed */
223 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
224 MGMT_STATUS_REJECTED, /* Rejected Security */
225 MGMT_STATUS_REJECTED, /* Rejected Personal */
226 MGMT_STATUS_TIMEOUT, /* Host Timeout */
227 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
228 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
229 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
230 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
231 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
232 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
233 MGMT_STATUS_BUSY, /* Repeated Attempts */
234 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
235 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
236 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
237 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
238 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
239 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
240 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
241 MGMT_STATUS_FAILED, /* Unspecified Error */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
243 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
244 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
245 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
246 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
247 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
248 MGMT_STATUS_FAILED, /* Unit Link Key Used */
249 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
250 MGMT_STATUS_TIMEOUT, /* Instant Passed */
251 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
252 MGMT_STATUS_FAILED, /* Transaction Collision */
253 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
254 MGMT_STATUS_REJECTED, /* QoS Rejected */
255 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
256 MGMT_STATUS_REJECTED, /* Insufficient Security */
257 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
258 MGMT_STATUS_BUSY, /* Role Switch Pending */
259 MGMT_STATUS_FAILED, /* Slot Violation */
260 MGMT_STATUS_FAILED, /* Role Switch Failed */
261 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
262 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
263 MGMT_STATUS_BUSY, /* Host Busy Pairing */
264 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
265 MGMT_STATUS_BUSY, /* Controller Busy */
266 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
267 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
268 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
269 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
270 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
273 static u8 mgmt_status(u8 hci_status)
275 if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 return mgmt_status_table[hci_status];
278 return MGMT_STATUS_FAILED;
281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
284 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 u16 len, int flag, struct sock *skip_sk)
291 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 struct sock *skip_sk)
298 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 HCI_SOCK_TRUSTED, skip_sk);
302 static u8 le_addr_type(u8 mgmt_addr_type)
304 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 return ADDR_LE_DEV_PUBLIC;
307 return ADDR_LE_DEV_RANDOM;
310 void mgmt_fill_version_info(void *ver)
312 struct mgmt_rp_read_version *rp = ver;
314 rp->version = MGMT_VERSION;
315 rp->revision = cpu_to_le16(MGMT_REVISION);
318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
321 struct mgmt_rp_read_version rp;
323 bt_dev_dbg(hdev, "sock %p", sk);
325 mgmt_fill_version_info(&rp);
327 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
334 struct mgmt_rp_read_commands *rp;
335 u16 num_commands, num_events;
339 bt_dev_dbg(hdev, "sock %p", sk);
341 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 num_commands = ARRAY_SIZE(mgmt_commands);
343 num_events = ARRAY_SIZE(mgmt_events);
345 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 num_events = ARRAY_SIZE(mgmt_untrusted_events);
349 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
351 rp = kmalloc(rp_size, GFP_KERNEL);
355 rp->num_commands = cpu_to_le16(num_commands);
356 rp->num_events = cpu_to_le16(num_events);
358 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 __le16 *opcode = rp->opcodes;
361 for (i = 0; i < num_commands; i++, opcode++)
362 put_unaligned_le16(mgmt_commands[i], opcode);
364 for (i = 0; i < num_events; i++, opcode++)
365 put_unaligned_le16(mgmt_events[i], opcode);
367 __le16 *opcode = rp->opcodes;
369 for (i = 0; i < num_commands; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
372 for (i = 0; i < num_events; i++, opcode++)
373 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
376 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_index_list *rp;
392 bt_dev_dbg(hdev, "sock %p", sk);
394 read_lock(&hci_dev_list_lock);
397 list_for_each_entry(d, &hci_dev_list, list) {
398 if (d->dev_type == HCI_PRIMARY &&
399 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
403 rp_len = sizeof(*rp) + (2 * count);
404 rp = kmalloc(rp_len, GFP_ATOMIC);
406 read_unlock(&hci_dev_list_lock);
411 list_for_each_entry(d, &hci_dev_list, list) {
412 if (hci_dev_test_flag(d, HCI_SETUP) ||
413 hci_dev_test_flag(d, HCI_CONFIG) ||
414 hci_dev_test_flag(d, HCI_USER_CHANNEL))
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
423 if (d->dev_type == HCI_PRIMARY &&
424 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 bt_dev_dbg(hdev, "Added hci%u", d->id);
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
433 read_unlock(&hci_dev_list_lock);
435 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 void *data, u16 data_len)
446 struct mgmt_rp_read_unconf_index_list *rp;
452 bt_dev_dbg(hdev, "sock %p", sk);
454 read_lock(&hci_dev_list_lock);
457 list_for_each_entry(d, &hci_dev_list, list) {
458 if (d->dev_type == HCI_PRIMARY &&
459 hci_dev_test_flag(d, HCI_UNCONFIGURED))
463 rp_len = sizeof(*rp) + (2 * count);
464 rp = kmalloc(rp_len, GFP_ATOMIC);
466 read_unlock(&hci_dev_list_lock);
471 list_for_each_entry(d, &hci_dev_list, list) {
472 if (hci_dev_test_flag(d, HCI_SETUP) ||
473 hci_dev_test_flag(d, HCI_CONFIG) ||
474 hci_dev_test_flag(d, HCI_USER_CHANNEL))
477 /* Devices marked as raw-only are neither configured
478 * nor unconfigured controllers.
480 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
483 if (d->dev_type == HCI_PRIMARY &&
484 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 rp->index[count++] = cpu_to_le16(d->id);
486 bt_dev_dbg(hdev, "Added hci%u", d->id);
490 rp->num_controllers = cpu_to_le16(count);
491 rp_len = sizeof(*rp) + (2 * count);
493 read_unlock(&hci_dev_list_lock);
495 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 void *data, u16 data_len)
506 struct mgmt_rp_read_ext_index_list *rp;
511 bt_dev_dbg(hdev, "sock %p", sk);
513 read_lock(&hci_dev_list_lock);
516 list_for_each_entry(d, &hci_dev_list, list) {
517 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
521 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
523 read_unlock(&hci_dev_list_lock);
528 list_for_each_entry(d, &hci_dev_list, list) {
529 if (hci_dev_test_flag(d, HCI_SETUP) ||
530 hci_dev_test_flag(d, HCI_CONFIG) ||
531 hci_dev_test_flag(d, HCI_USER_CHANNEL))
534 /* Devices marked as raw-only are neither configured
535 * nor unconfigured controllers.
537 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
540 if (d->dev_type == HCI_PRIMARY) {
541 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 rp->entry[count].type = 0x01;
544 rp->entry[count].type = 0x00;
545 } else if (d->dev_type == HCI_AMP) {
546 rp->entry[count].type = 0x02;
551 rp->entry[count].bus = d->bus;
552 rp->entry[count++].index = cpu_to_le16(d->id);
553 bt_dev_dbg(hdev, "Added hci%u", d->id);
556 rp->num_controllers = cpu_to_le16(count);
558 read_unlock(&hci_dev_list_lock);
560 /* If this command is called at least once, then all the
561 * default index and unconfigured index events are disabled
562 * and from now on only extended index events are used.
564 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
568 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 struct_size(rp, entry, count));
577 static bool is_configured(struct hci_dev *hdev)
579 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
583 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 !bacmp(&hdev->public_addr, BDADDR_ANY))
591 static __le32 get_missing_options(struct hci_dev *hdev)
595 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 options |= MGMT_OPTION_EXTERNAL_CONFIG;
599 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 !bacmp(&hdev->public_addr, BDADDR_ANY))
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
604 return cpu_to_le32(options);
607 static int new_options(struct hci_dev *hdev, struct sock *skip)
609 __le32 options = get_missing_options(hdev);
611 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
617 __le32 options = get_missing_options(hdev);
619 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 void *data, u16 data_len)
626 struct mgmt_rp_read_config_info rp;
629 bt_dev_dbg(hdev, "sock %p", sk);
633 memset(&rp, 0, sizeof(rp));
634 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
636 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 options |= MGMT_OPTION_EXTERNAL_CONFIG;
639 if (hdev->set_bdaddr)
640 options |= MGMT_OPTION_PUBLIC_ADDRESS;
642 rp.supported_options = cpu_to_le32(options);
643 rp.missing_options = get_missing_options(hdev);
645 hci_dev_unlock(hdev);
647 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
651 static u32 get_supported_phys(struct hci_dev *hdev)
653 u32 supported_phys = 0;
655 if (lmp_bredr_capable(hdev)) {
656 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
658 if (hdev->features[0][0] & LMP_3SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
661 if (hdev->features[0][0] & LMP_5SLOT)
662 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
664 if (lmp_edr_2m_capable(hdev)) {
665 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
667 if (lmp_edr_3slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
670 if (lmp_edr_5slot_capable(hdev))
671 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
673 if (lmp_edr_3m_capable(hdev)) {
674 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
676 if (lmp_edr_3slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
679 if (lmp_edr_5slot_capable(hdev))
680 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
685 if (lmp_le_capable(hdev)) {
686 supported_phys |= MGMT_PHY_LE_1M_TX;
687 supported_phys |= MGMT_PHY_LE_1M_RX;
689 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 supported_phys |= MGMT_PHY_LE_2M_TX;
691 supported_phys |= MGMT_PHY_LE_2M_RX;
694 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 supported_phys |= MGMT_PHY_LE_CODED_TX;
696 supported_phys |= MGMT_PHY_LE_CODED_RX;
700 return supported_phys;
703 static u32 get_selected_phys(struct hci_dev *hdev)
705 u32 selected_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 if (!(hdev->pkt_type & HCI_2DH1))
718 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
720 if (lmp_edr_3slot_capable(hdev) &&
721 !(hdev->pkt_type & HCI_2DH3))
722 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
724 if (lmp_edr_5slot_capable(hdev) &&
725 !(hdev->pkt_type & HCI_2DH5))
726 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
728 if (lmp_edr_3m_capable(hdev)) {
729 if (!(hdev->pkt_type & HCI_3DH1))
730 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
732 if (lmp_edr_3slot_capable(hdev) &&
733 !(hdev->pkt_type & HCI_3DH3))
734 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
736 if (lmp_edr_5slot_capable(hdev) &&
737 !(hdev->pkt_type & HCI_3DH5))
738 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
743 if (lmp_le_capable(hdev)) {
744 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_TX;
747 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 selected_phys |= MGMT_PHY_LE_1M_RX;
750 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_TX;
753 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 selected_phys |= MGMT_PHY_LE_2M_RX;
756 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_TX;
759 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 selected_phys |= MGMT_PHY_LE_CODED_RX;
763 return selected_phys;
766 static u32 get_configurable_phys(struct hci_dev *hdev)
768 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
772 static u32 get_supported_settings(struct hci_dev *hdev)
776 settings |= MGMT_SETTING_POWERED;
777 settings |= MGMT_SETTING_BONDABLE;
778 settings |= MGMT_SETTING_DEBUG_KEYS;
779 settings |= MGMT_SETTING_CONNECTABLE;
780 settings |= MGMT_SETTING_DISCOVERABLE;
782 if (lmp_bredr_capable(hdev)) {
783 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 settings |= MGMT_SETTING_BREDR;
786 settings |= MGMT_SETTING_LINK_SECURITY;
788 if (lmp_ssp_capable(hdev)) {
789 settings |= MGMT_SETTING_SSP;
790 if (IS_ENABLED(CONFIG_BT_HS))
791 settings |= MGMT_SETTING_HS;
794 if (lmp_sc_capable(hdev))
795 settings |= MGMT_SETTING_SECURE_CONN;
797 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
799 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
802 if (lmp_le_capable(hdev)) {
803 settings |= MGMT_SETTING_LE;
804 settings |= MGMT_SETTING_SECURE_CONN;
805 settings |= MGMT_SETTING_PRIVACY;
806 settings |= MGMT_SETTING_STATIC_ADDRESS;
808 /* When the experimental feature for LL Privacy support is
809 * enabled, then advertising is no longer supported.
811 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 settings |= MGMT_SETTING_ADVERTISING;
815 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
817 settings |= MGMT_SETTING_CONFIGURATION;
819 settings |= MGMT_SETTING_PHY_CONFIGURATION;
824 static u32 get_current_settings(struct hci_dev *hdev)
828 if (hdev_is_powered(hdev))
829 settings |= MGMT_SETTING_POWERED;
831 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 settings |= MGMT_SETTING_CONNECTABLE;
834 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 settings |= MGMT_SETTING_DISCOVERABLE;
840 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 settings |= MGMT_SETTING_BONDABLE;
843 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 settings |= MGMT_SETTING_BREDR;
846 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 settings |= MGMT_SETTING_LE;
849 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 settings |= MGMT_SETTING_LINK_SECURITY;
852 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 settings |= MGMT_SETTING_SSP;
855 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 settings |= MGMT_SETTING_HS;
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 settings |= MGMT_SETTING_ADVERTISING;
861 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 settings |= MGMT_SETTING_SECURE_CONN;
864 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 settings |= MGMT_SETTING_DEBUG_KEYS;
867 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 settings |= MGMT_SETTING_PRIVACY;
870 /* The current setting for static address has two purposes. The
871 * first is to indicate if the static address will be used and
872 * the second is to indicate if it is actually set.
874 * This means if the static address is not configured, this flag
875 * will never be set. If the address is configured, then if the
876 * address is actually used decides if the flag is set or not.
878 * For single mode LE only controllers and dual-mode controllers
879 * with BR/EDR disabled, the existence of the static address will
882 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 settings |= MGMT_SETTING_STATIC_ADDRESS;
889 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
897 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 struct hci_dev *hdev,
904 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
909 struct mgmt_pending_cmd *cmd;
911 /* If there's a pending mgmt command the flags will not yet have
912 * their final values, so check for this first.
914 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
916 struct mgmt_mode *cp = cmd->param;
918 return LE_AD_GENERAL;
919 else if (cp->val == 0x02)
920 return LE_AD_LIMITED;
922 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 return LE_AD_LIMITED;
924 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 return LE_AD_GENERAL;
931 bool mgmt_get_connectable(struct hci_dev *hdev)
933 struct mgmt_pending_cmd *cmd;
935 /* If there's a pending mgmt command the flag will not yet have
936 * it's final value, so check for this first.
938 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
940 struct mgmt_mode *cp = cmd->param;
945 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
948 static void service_cache_off(struct work_struct *work)
950 struct hci_dev *hdev = container_of(work, struct hci_dev,
952 struct hci_request req;
954 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
957 hci_req_init(&req, hdev);
961 __hci_req_update_eir(&req);
962 __hci_req_update_class(&req);
964 hci_dev_unlock(hdev);
966 hci_req_run(&req, NULL);
969 static void rpa_expired(struct work_struct *work)
971 struct hci_dev *hdev = container_of(work, struct hci_dev,
973 struct hci_request req;
975 bt_dev_dbg(hdev, "");
977 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
979 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
982 /* The generation of a new RPA and programming it into the
983 * controller happens in the hci_req_enable_advertising()
986 hci_req_init(&req, hdev);
987 if (ext_adv_capable(hdev))
988 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
990 __hci_req_enable_advertising(&req);
991 hci_req_run(&req, NULL);
994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
996 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
999 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1002 /* Non-mgmt controlled devices get this bit set
1003 * implicitly so that pairing works for them, however
1004 * for mgmt we require user-space to explicitly enable
1007 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 void *data, u16 data_len)
1013 struct mgmt_rp_read_info rp;
1015 bt_dev_dbg(hdev, "sock %p", sk);
1019 memset(&rp, 0, sizeof(rp));
1021 bacpy(&rp.bdaddr, &hdev->bdaddr);
1023 rp.version = hdev->hci_ver;
1024 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1026 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1029 memcpy(rp.dev_class, hdev->dev_class, 3);
1031 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1034 hci_dev_unlock(hdev);
1036 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1045 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 hdev->dev_class, 3);
1049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1053 name_len = strlen(hdev->dev_name);
1054 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 hdev->dev_name, name_len);
1057 name_len = strlen(hdev->short_name);
1058 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 hdev->short_name, name_len);
1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 void *data, u16 data_len)
1068 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1071 bt_dev_dbg(hdev, "sock %p", sk);
1073 memset(&buf, 0, sizeof(buf));
1077 bacpy(&rp->bdaddr, &hdev->bdaddr);
1079 rp->version = hdev->hci_ver;
1080 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1082 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1086 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 rp->eir_len = cpu_to_le16(eir_len);
1089 hci_dev_unlock(hdev);
1091 /* If this command is called at least once, then the events
1092 * for class of device and local name changes are disabled
1093 * and only the new extended controller information event
1096 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1100 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 sizeof(*rp) + eir_len);
1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1107 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1110 memset(buf, 0, sizeof(buf));
1112 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 ev->eir_len = cpu_to_le16(eir_len);
1115 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 sizeof(*ev) + eir_len,
1117 HCI_MGMT_EXT_INFO_EVENTS, skip);
1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1122 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1124 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1130 bt_dev_dbg(hdev, "status 0x%02x", status);
1132 if (hci_conn_count(hdev) == 0) {
1133 cancel_delayed_work(&hdev->power_off);
1134 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1140 struct mgmt_ev_advertising_added ev;
1142 ev.instance = instance;
1144 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1150 struct mgmt_ev_advertising_removed ev;
1152 ev.instance = instance;
1154 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1159 if (hdev->adv_instance_timeout) {
1160 hdev->adv_instance_timeout = 0;
1161 cancel_delayed_work(&hdev->adv_instance_expire);
1165 static int clean_up_hci_state(struct hci_dev *hdev)
1167 struct hci_request req;
1168 struct hci_conn *conn;
1169 bool discov_stopped;
1172 hci_req_init(&req, hdev);
1174 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 test_bit(HCI_PSCAN, &hdev->flags)) {
1177 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1182 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 __hci_req_disable_advertising(&req);
1185 discov_stopped = hci_req_stop_discovery(&req);
1187 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 /* 0x15 == Terminated due to Power Off */
1189 __hci_abort_conn(&req, conn, 0x15);
1192 err = hci_req_run(&req, clean_up_hci_complete);
1193 if (!err && discov_stopped)
1194 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1202 struct mgmt_mode *cp = data;
1203 struct mgmt_pending_cmd *cmd;
1206 bt_dev_dbg(hdev, "sock %p", sk);
1208 if (cp->val != 0x00 && cp->val != 0x01)
1209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 MGMT_STATUS_INVALID_PARAMS);
1214 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1220 if (!!cp->val == hdev_is_powered(hdev)) {
1221 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1232 queue_work(hdev->req_workqueue, &hdev->power_on);
1235 /* Disconnect connections, stop scans, etc */
1236 err = clean_up_hci_state(hdev);
1238 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 HCI_POWER_OFF_TIMEOUT);
1241 /* ENODATA means there were no HCI commands queued */
1242 if (err == -ENODATA) {
1243 cancel_delayed_work(&hdev->power_off);
1244 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 hci_dev_unlock(hdev);
1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1256 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1258 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1262 int mgmt_new_settings(struct hci_dev *hdev)
1264 return new_settings(hdev, NULL);
1269 struct hci_dev *hdev;
1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1275 struct cmd_lookup *match = data;
1277 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1279 list_del(&cmd->list);
1281 if (match->sk == NULL) {
1282 match->sk = cmd->sk;
1283 sock_hold(match->sk);
1286 mgmt_pending_free(cmd);
1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1293 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 mgmt_pending_remove(cmd);
1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1299 if (cmd->cmd_complete) {
1302 cmd->cmd_complete(cmd, *status);
1303 mgmt_pending_remove(cmd);
1308 cmd_status_rsp(cmd, data);
1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1313 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 cmd->param, cmd->param_len);
1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1319 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 cmd->param, sizeof(struct mgmt_addr_info));
1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1325 if (!lmp_bredr_capable(hdev))
1326 return MGMT_STATUS_NOT_SUPPORTED;
1327 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 return MGMT_STATUS_REJECTED;
1330 return MGMT_STATUS_SUCCESS;
1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1335 if (!lmp_le_capable(hdev))
1336 return MGMT_STATUS_NOT_SUPPORTED;
1337 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 return MGMT_STATUS_REJECTED;
1340 return MGMT_STATUS_SUCCESS;
1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1345 struct mgmt_pending_cmd *cmd;
1347 bt_dev_dbg(hdev, "status 0x%02x", status);
1351 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1356 u8 mgmt_err = mgmt_status(status);
1357 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1362 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 hdev->discov_timeout > 0) {
1364 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1368 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 new_settings(hdev, cmd->sk);
1372 mgmt_pending_remove(cmd);
1375 hci_dev_unlock(hdev);
1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1381 struct mgmt_cp_set_discoverable *cp = data;
1382 struct mgmt_pending_cmd *cmd;
1386 bt_dev_dbg(hdev, "sock %p", sk);
1388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1397 timeout = __le16_to_cpu(cp->timeout);
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1415 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1422 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1428 if (hdev->advertising_paused) {
1429 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1434 if (!hdev_is_powered(hdev)) {
1435 bool changed = false;
1437 /* Setting limited discoverable when powered off is
1438 * not a valid operation since it requires a timeout
1439 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1441 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1446 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1451 err = new_settings(hdev, sk);
1456 /* If the current mode is the same, then just update the timeout
1457 * value with the new value. And if only the timeout gets updated,
1458 * then no need for any HCI transactions.
1460 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 HCI_LIMITED_DISCOVERABLE)) {
1463 cancel_delayed_work(&hdev->discov_off);
1464 hdev->discov_timeout = timeout;
1466 if (cp->val && hdev->discov_timeout > 0) {
1467 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 queue_delayed_work(hdev->req_workqueue,
1469 &hdev->discov_off, to);
1472 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1476 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1482 /* Cancel any potential discoverable timeout that might be
1483 * still active and store new timeout value. The arming of
1484 * the timeout happens in the complete handler.
1486 cancel_delayed_work(&hdev->discov_off);
1487 hdev->discov_timeout = timeout;
1490 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1492 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1494 /* Limited discoverable mode */
1495 if (cp->val == 0x02)
1496 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1498 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1500 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1504 hci_dev_unlock(hdev);
1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1510 struct mgmt_pending_cmd *cmd;
1512 bt_dev_dbg(hdev, "status 0x%02x", status);
1516 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1521 u8 mgmt_err = mgmt_status(status);
1522 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1526 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 new_settings(hdev, cmd->sk);
1530 mgmt_pending_remove(cmd);
1533 hci_dev_unlock(hdev);
1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 struct sock *sk, u8 val)
1539 bool changed = false;
1542 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1546 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1548 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1552 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1557 hci_req_update_scan(hdev);
1558 hci_update_background_scan(hdev);
1559 return new_settings(hdev, sk);
1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1568 struct mgmt_mode *cp = data;
1569 struct mgmt_pending_cmd *cmd;
1572 bt_dev_dbg(hdev, "sock %p", sk);
1574 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 MGMT_STATUS_REJECTED);
1579 if (cp->val != 0x00 && cp->val != 0x01)
1580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1585 if (!hdev_is_powered(hdev)) {
1586 err = set_connectable_update_settings(hdev, sk, cp->val);
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1604 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1606 if (hdev->discov_timeout > 0)
1607 cancel_delayed_work(&hdev->discov_off);
1609 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1614 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1618 hci_dev_unlock(hdev);
1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1625 struct mgmt_mode *cp = data;
1629 bt_dev_dbg(hdev, "sock %p", sk);
1631 if (cp->val != 0x00 && cp->val != 0x01)
1632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 MGMT_STATUS_INVALID_PARAMS);
1638 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1640 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1642 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1647 /* In limited privacy mode the change of bondable mode
1648 * may affect the local advertising address.
1650 if (hdev_is_powered(hdev) &&
1651 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 queue_work(hdev->req_workqueue,
1655 &hdev->discoverable_update);
1657 err = new_settings(hdev, sk);
1661 hci_dev_unlock(hdev);
1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1668 struct mgmt_mode *cp = data;
1669 struct mgmt_pending_cmd *cmd;
1673 bt_dev_dbg(hdev, "sock %p", sk);
1675 status = mgmt_bredr_support(hdev);
1677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1680 if (cp->val != 0x00 && cp->val != 0x01)
1681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 MGMT_STATUS_INVALID_PARAMS);
1686 if (!hdev_is_powered(hdev)) {
1687 bool changed = false;
1689 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1694 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1699 err = new_settings(hdev, sk);
1704 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1712 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1717 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1723 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1725 mgmt_pending_remove(cmd);
1730 hci_dev_unlock(hdev);
1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1736 struct mgmt_mode *cp = data;
1737 struct mgmt_pending_cmd *cmd;
1741 bt_dev_dbg(hdev, "sock %p", sk);
1743 status = mgmt_bredr_support(hdev);
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1747 if (!lmp_ssp_capable(hdev))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 MGMT_STATUS_NOT_SUPPORTED);
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_INVALID_PARAMS);
1757 if (!hdev_is_powered(hdev)) {
1761 changed = !hci_dev_test_and_set_flag(hdev,
1764 changed = hci_dev_test_and_clear_flag(hdev,
1767 changed = hci_dev_test_and_clear_flag(hdev,
1770 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1773 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1778 err = new_settings(hdev, sk);
1783 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1789 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1794 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1800 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 sizeof(cp->val), &cp->val);
1804 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1806 mgmt_pending_remove(cmd);
1811 hci_dev_unlock(hdev);
1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1817 struct mgmt_mode *cp = data;
1822 bt_dev_dbg(hdev, "sock %p", sk);
1824 if (!IS_ENABLED(CONFIG_BT_HS))
1825 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 MGMT_STATUS_NOT_SUPPORTED);
1828 status = mgmt_bredr_support(hdev);
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1832 if (!lmp_ssp_capable(hdev))
1833 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 MGMT_STATUS_NOT_SUPPORTED);
1836 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 MGMT_STATUS_REJECTED);
1840 if (cp->val != 0x00 && cp->val != 0x01)
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 MGMT_STATUS_INVALID_PARAMS);
1846 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1855 if (hdev_is_powered(hdev)) {
1856 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 MGMT_STATUS_REJECTED);
1861 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1869 err = new_settings(hdev, sk);
1872 hci_dev_unlock(hdev);
1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1878 struct cmd_lookup match = { NULL, hdev };
1883 u8 mgmt_err = mgmt_status(status);
1885 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1890 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1892 new_settings(hdev, match.sk);
1897 /* Make sure the controller has a good default for
1898 * advertising data. Restrict the update to when LE
1899 * has actually been enabled. During power on, the
1900 * update in powered_update_hci will take care of it.
1902 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 struct hci_request req;
1904 hci_req_init(&req, hdev);
1905 if (ext_adv_capable(hdev)) {
1908 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1912 __hci_req_update_adv_data(&req, 0x00);
1913 __hci_req_update_scan_rsp_data(&req, 0x00);
1915 hci_req_run(&req, NULL);
1916 hci_update_background_scan(hdev);
1920 hci_dev_unlock(hdev);
1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1925 struct mgmt_mode *cp = data;
1926 struct hci_cp_write_le_host_supported hci_cp;
1927 struct mgmt_pending_cmd *cmd;
1928 struct hci_request req;
1932 bt_dev_dbg(hdev, "sock %p", sk);
1934 if (!lmp_le_capable(hdev))
1935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 MGMT_STATUS_NOT_SUPPORTED);
1938 if (cp->val != 0x00 && cp->val != 0x01)
1939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 MGMT_STATUS_INVALID_PARAMS);
1942 /* Bluetooth single mode LE only controllers or dual-mode
1943 * controllers configured as LE only devices, do not allow
1944 * switching LE off. These have either LE enabled explicitly
1945 * or BR/EDR has been previously switched off.
1947 * When trying to enable an already enabled LE, then gracefully
1948 * send a positive response. Trying to disable it however will
1949 * result into rejection.
1951 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 if (cp->val == 0x01)
1953 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1955 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 MGMT_STATUS_REJECTED);
1962 enabled = lmp_host_le_capable(hdev);
1965 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1967 if (!hdev_is_powered(hdev) || val == enabled) {
1968 bool changed = false;
1970 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1975 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1980 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1985 err = new_settings(hdev, sk);
1990 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1997 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003 hci_req_init(&req, hdev);
2005 memset(&hci_cp, 0, sizeof(hci_cp));
2009 hci_cp.simul = 0x00;
2011 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 __hci_req_disable_advertising(&req);
2014 if (ext_adv_capable(hdev))
2015 __hci_req_clear_ext_adv_sets(&req);
2018 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021 err = hci_req_run(&req, le_enable_complete);
2023 mgmt_pending_remove(cmd);
2026 hci_dev_unlock(hdev);
2030 /* This is a helper function to test for pending mgmt commands that can
2031 * cause CoD or EIR HCI commands. We can only allow one such pending
2032 * mgmt command at a time since otherwise we cannot easily track what
2033 * the current values are, will be, and based on that calculate if a new
2034 * HCI command needs to be sent and if yes with what value.
2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2038 struct mgmt_pending_cmd *cmd;
2040 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 switch (cmd->opcode) {
2042 case MGMT_OP_ADD_UUID:
2043 case MGMT_OP_REMOVE_UUID:
2044 case MGMT_OP_SET_DEV_CLASS:
2045 case MGMT_OP_SET_POWERED:
2053 static const u8 bluetooth_base_uuid[] = {
2054 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2058 static u8 get_uuid_size(const u8 *uuid)
2062 if (memcmp(uuid, bluetooth_base_uuid, 12))
2065 val = get_unaligned_le32(&uuid[12]);
2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2074 struct mgmt_pending_cmd *cmd;
2078 cmd = pending_find(mgmt_op, hdev);
2082 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 mgmt_status(status), hdev->dev_class, 3);
2085 mgmt_pending_remove(cmd);
2088 hci_dev_unlock(hdev);
2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2093 bt_dev_dbg(hdev, "status 0x%02x", status);
2095 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2100 struct mgmt_cp_add_uuid *cp = data;
2101 struct mgmt_pending_cmd *cmd;
2102 struct hci_request req;
2103 struct bt_uuid *uuid;
2106 bt_dev_dbg(hdev, "sock %p", sk);
2110 if (pending_eir_or_class(hdev)) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2116 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2122 memcpy(uuid->uuid, cp->uuid, 16);
2123 uuid->svc_hint = cp->svc_hint;
2124 uuid->size = get_uuid_size(cp->uuid);
2126 list_add_tail(&uuid->list, &hdev->uuids);
2128 hci_req_init(&req, hdev);
2130 __hci_req_update_class(&req);
2131 __hci_req_update_eir(&req);
2133 err = hci_req_run(&req, add_uuid_complete);
2135 if (err != -ENODATA)
2138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 hdev->dev_class, 3);
2143 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2152 hci_dev_unlock(hdev);
2156 static bool enable_service_cache(struct hci_dev *hdev)
2158 if (!hdev_is_powered(hdev))
2161 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2172 bt_dev_dbg(hdev, "status 0x%02x", status);
2174 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180 struct mgmt_cp_remove_uuid *cp = data;
2181 struct mgmt_pending_cmd *cmd;
2182 struct bt_uuid *match, *tmp;
2183 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 struct hci_request req;
2187 bt_dev_dbg(hdev, "sock %p", sk);
2191 if (pending_eir_or_class(hdev)) {
2192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2197 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 hci_uuids_clear(hdev);
2200 if (enable_service_cache(hdev)) {
2201 err = mgmt_cmd_complete(sk, hdev->id,
2202 MGMT_OP_REMOVE_UUID,
2203 0, hdev->dev_class, 3);
2212 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2216 list_del(&match->list);
2222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 MGMT_STATUS_INVALID_PARAMS);
2228 hci_req_init(&req, hdev);
2230 __hci_req_update_class(&req);
2231 __hci_req_update_eir(&req);
2233 err = hci_req_run(&req, remove_uuid_complete);
2235 if (err != -ENODATA)
2238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 hdev->dev_class, 3);
2243 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2252 hci_dev_unlock(hdev);
2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2258 bt_dev_dbg(hdev, "status 0x%02x", status);
2260 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2266 struct mgmt_cp_set_dev_class *cp = data;
2267 struct mgmt_pending_cmd *cmd;
2268 struct hci_request req;
2271 bt_dev_dbg(hdev, "sock %p", sk);
2273 if (!lmp_bredr_capable(hdev))
2274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 MGMT_STATUS_NOT_SUPPORTED);
2279 if (pending_eir_or_class(hdev)) {
2280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2285 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 MGMT_STATUS_INVALID_PARAMS);
2291 hdev->major_class = cp->major;
2292 hdev->minor_class = cp->minor;
2294 if (!hdev_is_powered(hdev)) {
2295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 hdev->dev_class, 3);
2300 hci_req_init(&req, hdev);
2302 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 hci_dev_unlock(hdev);
2304 cancel_delayed_work_sync(&hdev->service_cache);
2306 __hci_req_update_eir(&req);
2309 __hci_req_update_class(&req);
2311 err = hci_req_run(&req, set_class_complete);
2313 if (err != -ENODATA)
2316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 hdev->dev_class, 3);
2321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2330 hci_dev_unlock(hdev);
2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2337 struct mgmt_cp_load_link_keys *cp = data;
2338 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 sizeof(struct mgmt_link_key_info));
2340 u16 key_count, expected_len;
2344 bt_dev_dbg(hdev, "sock %p", sk);
2346 if (!lmp_bredr_capable(hdev))
2347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 MGMT_STATUS_NOT_SUPPORTED);
2350 key_count = __le16_to_cpu(cp->key_count);
2351 if (key_count > max_key_count) {
2352 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 MGMT_STATUS_INVALID_PARAMS);
2358 expected_len = struct_size(cp, keys, key_count);
2359 if (expected_len != len) {
2360 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2362 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 MGMT_STATUS_INVALID_PARAMS);
2366 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 MGMT_STATUS_INVALID_PARAMS);
2370 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2373 for (i = 0; i < key_count; i++) {
2374 struct mgmt_link_key_info *key = &cp->keys[i];
2376 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2377 return mgmt_cmd_status(sk, hdev->id,
2378 MGMT_OP_LOAD_LINK_KEYS,
2379 MGMT_STATUS_INVALID_PARAMS);
2384 hci_link_keys_clear(hdev);
2387 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2389 changed = hci_dev_test_and_clear_flag(hdev,
2390 HCI_KEEP_DEBUG_KEYS);
2393 new_settings(hdev, NULL);
2395 for (i = 0; i < key_count; i++) {
2396 struct mgmt_link_key_info *key = &cp->keys[i];
2398 if (hci_is_blocked_key(hdev,
2399 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2401 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2406 /* Always ignore debug keys and require a new pairing if
2407 * the user wants to use them.
2409 if (key->type == HCI_LK_DEBUG_COMBINATION)
2412 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2413 key->type, key->pin_len, NULL);
2416 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2418 hci_dev_unlock(hdev);
2423 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2424 u8 addr_type, struct sock *skip_sk)
2426 struct mgmt_ev_device_unpaired ev;
2428 bacpy(&ev.addr.bdaddr, bdaddr);
2429 ev.addr.type = addr_type;
2431 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2435 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2438 struct mgmt_cp_unpair_device *cp = data;
2439 struct mgmt_rp_unpair_device rp;
2440 struct hci_conn_params *params;
2441 struct mgmt_pending_cmd *cmd;
2442 struct hci_conn *conn;
2446 memset(&rp, 0, sizeof(rp));
2447 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2448 rp.addr.type = cp->addr.type;
2450 if (!bdaddr_type_is_valid(cp->addr.type))
2451 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2452 MGMT_STATUS_INVALID_PARAMS,
2455 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2456 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2457 MGMT_STATUS_INVALID_PARAMS,
2462 if (!hdev_is_powered(hdev)) {
2463 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 MGMT_STATUS_NOT_POWERED, &rp,
2469 if (cp->addr.type == BDADDR_BREDR) {
2470 /* If disconnection is requested, then look up the
2471 * connection. If the remote device is connected, it
2472 * will be later used to terminate the link.
2474 * Setting it to NULL explicitly will cause no
2475 * termination of the link.
2478 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2483 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2485 err = mgmt_cmd_complete(sk, hdev->id,
2486 MGMT_OP_UNPAIR_DEVICE,
2487 MGMT_STATUS_NOT_PAIRED, &rp,
2495 /* LE address type */
2496 addr_type = le_addr_type(cp->addr.type);
2498 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2499 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2502 MGMT_STATUS_NOT_PAIRED, &rp,
2507 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2509 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2514 /* Defer clearing up the connection parameters until closing to
2515 * give a chance of keeping them if a repairing happens.
2517 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2519 /* Disable auto-connection parameters if present */
2520 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2522 if (params->explicit_connect)
2523 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2525 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2528 /* If disconnection is not requested, then clear the connection
2529 * variable so that the link is not terminated.
2531 if (!cp->disconnect)
2535 /* If the connection variable is set, then termination of the
2536 * link is requested.
2539 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2541 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2545 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2552 cmd->cmd_complete = addr_cmd_complete;
2554 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2556 mgmt_pending_remove(cmd);
2559 hci_dev_unlock(hdev);
2563 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2566 struct mgmt_cp_disconnect *cp = data;
2567 struct mgmt_rp_disconnect rp;
2568 struct mgmt_pending_cmd *cmd;
2569 struct hci_conn *conn;
2572 bt_dev_dbg(hdev, "sock %p", sk);
2574 memset(&rp, 0, sizeof(rp));
2575 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2576 rp.addr.type = cp->addr.type;
2578 if (!bdaddr_type_is_valid(cp->addr.type))
2579 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2580 MGMT_STATUS_INVALID_PARAMS,
2585 if (!test_bit(HCI_UP, &hdev->flags)) {
2586 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2587 MGMT_STATUS_NOT_POWERED, &rp,
2592 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2594 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2598 if (cp->addr.type == BDADDR_BREDR)
2599 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2602 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2603 le_addr_type(cp->addr.type));
2605 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2606 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2607 MGMT_STATUS_NOT_CONNECTED, &rp,
2612 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2618 cmd->cmd_complete = generic_cmd_complete;
2620 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2622 mgmt_pending_remove(cmd);
2625 hci_dev_unlock(hdev);
2629 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2631 switch (link_type) {
2633 switch (addr_type) {
2634 case ADDR_LE_DEV_PUBLIC:
2635 return BDADDR_LE_PUBLIC;
2638 /* Fallback to LE Random address type */
2639 return BDADDR_LE_RANDOM;
2643 /* Fallback to BR/EDR type */
2644 return BDADDR_BREDR;
2648 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2651 struct mgmt_rp_get_connections *rp;
2656 bt_dev_dbg(hdev, "sock %p", sk);
2660 if (!hdev_is_powered(hdev)) {
2661 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2662 MGMT_STATUS_NOT_POWERED);
2667 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2668 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2672 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2679 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2680 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2682 bacpy(&rp->addr[i].bdaddr, &c->dst);
2683 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2684 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2689 rp->conn_count = cpu_to_le16(i);
2691 /* Recalculate length in case of filtered SCO connections, etc */
2692 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2693 struct_size(rp, addr, i));
2698 hci_dev_unlock(hdev);
2702 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2703 struct mgmt_cp_pin_code_neg_reply *cp)
2705 struct mgmt_pending_cmd *cmd;
2708 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2713 cmd->cmd_complete = addr_cmd_complete;
2715 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2716 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2718 mgmt_pending_remove(cmd);
2723 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2726 struct hci_conn *conn;
2727 struct mgmt_cp_pin_code_reply *cp = data;
2728 struct hci_cp_pin_code_reply reply;
2729 struct mgmt_pending_cmd *cmd;
2732 bt_dev_dbg(hdev, "sock %p", sk);
2736 if (!hdev_is_powered(hdev)) {
2737 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 MGMT_STATUS_NOT_POWERED);
2742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2744 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2745 MGMT_STATUS_NOT_CONNECTED);
2749 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2750 struct mgmt_cp_pin_code_neg_reply ncp;
2752 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2754 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2756 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2758 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2759 MGMT_STATUS_INVALID_PARAMS);
2764 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2770 cmd->cmd_complete = addr_cmd_complete;
2772 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2773 reply.pin_len = cp->pin_len;
2774 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2776 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2778 mgmt_pending_remove(cmd);
2781 hci_dev_unlock(hdev);
2785 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2788 struct mgmt_cp_set_io_capability *cp = data;
2790 bt_dev_dbg(hdev, "sock %p", sk);
2792 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2793 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2794 MGMT_STATUS_INVALID_PARAMS);
2798 hdev->io_capability = cp->io_capability;
2800 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2802 hci_dev_unlock(hdev);
2804 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2808 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2810 struct hci_dev *hdev = conn->hdev;
2811 struct mgmt_pending_cmd *cmd;
2813 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2814 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2817 if (cmd->user_data != conn)
2826 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2828 struct mgmt_rp_pair_device rp;
2829 struct hci_conn *conn = cmd->user_data;
2832 bacpy(&rp.addr.bdaddr, &conn->dst);
2833 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2835 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2836 status, &rp, sizeof(rp));
2838 /* So we don't get further callbacks for this connection */
2839 conn->connect_cfm_cb = NULL;
2840 conn->security_cfm_cb = NULL;
2841 conn->disconn_cfm_cb = NULL;
2843 hci_conn_drop(conn);
2845 /* The device is paired so there is no need to remove
2846 * its connection parameters anymore.
2848 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2855 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2857 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2858 struct mgmt_pending_cmd *cmd;
2860 cmd = find_pairing(conn);
2862 cmd->cmd_complete(cmd, status);
2863 mgmt_pending_remove(cmd);
2867 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2869 struct mgmt_pending_cmd *cmd;
2871 BT_DBG("status %u", status);
2873 cmd = find_pairing(conn);
2875 BT_DBG("Unable to find a pending command");
2879 cmd->cmd_complete(cmd, mgmt_status(status));
2880 mgmt_pending_remove(cmd);
2883 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 struct mgmt_pending_cmd *cmd;
2887 BT_DBG("status %u", status);
2892 cmd = find_pairing(conn);
2894 BT_DBG("Unable to find a pending command");
2898 cmd->cmd_complete(cmd, mgmt_status(status));
2899 mgmt_pending_remove(cmd);
2902 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2905 struct mgmt_cp_pair_device *cp = data;
2906 struct mgmt_rp_pair_device rp;
2907 struct mgmt_pending_cmd *cmd;
2908 u8 sec_level, auth_type;
2909 struct hci_conn *conn;
2912 bt_dev_dbg(hdev, "sock %p", sk);
2914 memset(&rp, 0, sizeof(rp));
2915 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2916 rp.addr.type = cp->addr.type;
2918 if (!bdaddr_type_is_valid(cp->addr.type))
2919 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2920 MGMT_STATUS_INVALID_PARAMS,
2923 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2924 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 MGMT_STATUS_INVALID_PARAMS,
2930 if (!hdev_is_powered(hdev)) {
2931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932 MGMT_STATUS_NOT_POWERED, &rp,
2937 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2938 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2939 MGMT_STATUS_ALREADY_PAIRED, &rp,
2944 sec_level = BT_SECURITY_MEDIUM;
2945 auth_type = HCI_AT_DEDICATED_BONDING;
2947 if (cp->addr.type == BDADDR_BREDR) {
2948 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2949 auth_type, CONN_REASON_PAIR_DEVICE);
2951 u8 addr_type = le_addr_type(cp->addr.type);
2952 struct hci_conn_params *p;
2954 /* When pairing a new device, it is expected to remember
2955 * this device for future connections. Adding the connection
2956 * parameter information ahead of time allows tracking
2957 * of the slave preferred values and will speed up any
2958 * further connection establishment.
2960 * If connection parameters already exist, then they
2961 * will be kept and this function does nothing.
2963 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2965 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2966 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2968 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2969 sec_level, HCI_LE_CONN_TIMEOUT,
2970 CONN_REASON_PAIR_DEVICE);
2976 if (PTR_ERR(conn) == -EBUSY)
2977 status = MGMT_STATUS_BUSY;
2978 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2979 status = MGMT_STATUS_NOT_SUPPORTED;
2980 else if (PTR_ERR(conn) == -ECONNREFUSED)
2981 status = MGMT_STATUS_REJECTED;
2983 status = MGMT_STATUS_CONNECT_FAILED;
2985 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2986 status, &rp, sizeof(rp));
2990 if (conn->connect_cfm_cb) {
2991 hci_conn_drop(conn);
2992 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2993 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2997 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3000 hci_conn_drop(conn);
3004 cmd->cmd_complete = pairing_complete;
3006 /* For LE, just connecting isn't a proof that the pairing finished */
3007 if (cp->addr.type == BDADDR_BREDR) {
3008 conn->connect_cfm_cb = pairing_complete_cb;
3009 conn->security_cfm_cb = pairing_complete_cb;
3010 conn->disconn_cfm_cb = pairing_complete_cb;
3012 conn->connect_cfm_cb = le_pairing_complete_cb;
3013 conn->security_cfm_cb = le_pairing_complete_cb;
3014 conn->disconn_cfm_cb = le_pairing_complete_cb;
3017 conn->io_capability = cp->io_cap;
3018 cmd->user_data = hci_conn_get(conn);
3020 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3021 hci_conn_security(conn, sec_level, auth_type, true)) {
3022 cmd->cmd_complete(cmd, 0);
3023 mgmt_pending_remove(cmd);
3029 hci_dev_unlock(hdev);
3033 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3036 struct mgmt_addr_info *addr = data;
3037 struct mgmt_pending_cmd *cmd;
3038 struct hci_conn *conn;
3041 bt_dev_dbg(hdev, "sock %p", sk);
3045 if (!hdev_is_powered(hdev)) {
3046 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3047 MGMT_STATUS_NOT_POWERED);
3051 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3054 MGMT_STATUS_INVALID_PARAMS);
3058 conn = cmd->user_data;
3060 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 MGMT_STATUS_INVALID_PARAMS);
3066 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3067 mgmt_pending_remove(cmd);
3069 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3070 addr, sizeof(*addr));
3072 /* Since user doesn't want to proceed with the connection, abort any
3073 * ongoing pairing and then terminate the link if it was created
3074 * because of the pair device action.
3076 if (addr->type == BDADDR_BREDR)
3077 hci_remove_link_key(hdev, &addr->bdaddr);
3079 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3080 le_addr_type(addr->type));
3082 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3083 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3086 hci_dev_unlock(hdev);
3090 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3091 struct mgmt_addr_info *addr, u16 mgmt_op,
3092 u16 hci_op, __le32 passkey)
3094 struct mgmt_pending_cmd *cmd;
3095 struct hci_conn *conn;
3100 if (!hdev_is_powered(hdev)) {
3101 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3102 MGMT_STATUS_NOT_POWERED, addr,
3107 if (addr->type == BDADDR_BREDR)
3108 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3110 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3111 le_addr_type(addr->type));
3114 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3115 MGMT_STATUS_NOT_CONNECTED, addr,
3120 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3121 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3123 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 MGMT_STATUS_SUCCESS, addr,
3127 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3128 MGMT_STATUS_FAILED, addr,
3134 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3140 cmd->cmd_complete = addr_cmd_complete;
3142 /* Continue with pairing via HCI */
3143 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3144 struct hci_cp_user_passkey_reply cp;
3146 bacpy(&cp.bdaddr, &addr->bdaddr);
3147 cp.passkey = passkey;
3148 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3150 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3154 mgmt_pending_remove(cmd);
3157 hci_dev_unlock(hdev);
3161 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3162 void *data, u16 len)
3164 struct mgmt_cp_pin_code_neg_reply *cp = data;
3166 bt_dev_dbg(hdev, "sock %p", sk);
3168 return user_pairing_resp(sk, hdev, &cp->addr,
3169 MGMT_OP_PIN_CODE_NEG_REPLY,
3170 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3173 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3176 struct mgmt_cp_user_confirm_reply *cp = data;
3178 bt_dev_dbg(hdev, "sock %p", sk);
3180 if (len != sizeof(*cp))
3181 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3182 MGMT_STATUS_INVALID_PARAMS);
3184 return user_pairing_resp(sk, hdev, &cp->addr,
3185 MGMT_OP_USER_CONFIRM_REPLY,
3186 HCI_OP_USER_CONFIRM_REPLY, 0);
3189 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3190 void *data, u16 len)
3192 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3194 bt_dev_dbg(hdev, "sock %p", sk);
3196 return user_pairing_resp(sk, hdev, &cp->addr,
3197 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3198 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3201 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3204 struct mgmt_cp_user_passkey_reply *cp = data;
3206 bt_dev_dbg(hdev, "sock %p", sk);
3208 return user_pairing_resp(sk, hdev, &cp->addr,
3209 MGMT_OP_USER_PASSKEY_REPLY,
3210 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3213 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3214 void *data, u16 len)
3216 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3218 bt_dev_dbg(hdev, "sock %p", sk);
3220 return user_pairing_resp(sk, hdev, &cp->addr,
3221 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3222 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3225 static void adv_expire(struct hci_dev *hdev, u32 flags)
3227 struct adv_info *adv_instance;
3228 struct hci_request req;
3231 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3235 /* stop if current instance doesn't need to be changed */
3236 if (!(adv_instance->flags & flags))
3239 cancel_adv_timeout(hdev);
3241 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3245 hci_req_init(&req, hdev);
3246 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3251 hci_req_run(&req, NULL);
3254 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3256 struct mgmt_cp_set_local_name *cp;
3257 struct mgmt_pending_cmd *cmd;
3259 bt_dev_dbg(hdev, "status 0x%02x", status);
3263 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3270 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3271 mgmt_status(status));
3273 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3276 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3277 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3280 mgmt_pending_remove(cmd);
3283 hci_dev_unlock(hdev);
3286 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3289 struct mgmt_cp_set_local_name *cp = data;
3290 struct mgmt_pending_cmd *cmd;
3291 struct hci_request req;
3294 bt_dev_dbg(hdev, "sock %p", sk);
3298 /* If the old values are the same as the new ones just return a
3299 * direct command complete event.
3301 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3302 !memcmp(hdev->short_name, cp->short_name,
3303 sizeof(hdev->short_name))) {
3304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3309 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3311 if (!hdev_is_powered(hdev)) {
3312 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3314 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3319 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3320 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3321 ext_info_changed(hdev, sk);
3326 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3332 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3334 hci_req_init(&req, hdev);
3336 if (lmp_bredr_capable(hdev)) {
3337 __hci_req_update_name(&req);
3338 __hci_req_update_eir(&req);
3341 /* The name is stored in the scan response data and so
3342 * no need to udpate the advertising data here.
3344 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3345 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3347 err = hci_req_run(&req, set_name_complete);
3349 mgmt_pending_remove(cmd);
3352 hci_dev_unlock(hdev);
3356 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3359 struct mgmt_cp_set_appearance *cp = data;
3363 bt_dev_dbg(hdev, "sock %p", sk);
3365 if (!lmp_le_capable(hdev))
3366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3367 MGMT_STATUS_NOT_SUPPORTED);
3369 appearance = le16_to_cpu(cp->appearance);
3373 if (hdev->appearance != appearance) {
3374 hdev->appearance = appearance;
3376 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3377 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3379 ext_info_changed(hdev, sk);
3382 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3385 hci_dev_unlock(hdev);
3390 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3391 void *data, u16 len)
3393 struct mgmt_rp_get_phy_configuration rp;
3395 bt_dev_dbg(hdev, "sock %p", sk);
3399 memset(&rp, 0, sizeof(rp));
3401 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3402 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3403 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3405 hci_dev_unlock(hdev);
3407 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3411 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3413 struct mgmt_ev_phy_configuration_changed ev;
3415 memset(&ev, 0, sizeof(ev));
3417 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3419 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3423 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3424 u16 opcode, struct sk_buff *skb)
3426 struct mgmt_pending_cmd *cmd;
3428 bt_dev_dbg(hdev, "status 0x%02x", status);
3432 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3437 mgmt_cmd_status(cmd->sk, hdev->id,
3438 MGMT_OP_SET_PHY_CONFIGURATION,
3439 mgmt_status(status));
3441 mgmt_cmd_complete(cmd->sk, hdev->id,
3442 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3445 mgmt_phy_configuration_changed(hdev, cmd->sk);
3448 mgmt_pending_remove(cmd);
3451 hci_dev_unlock(hdev);
3454 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3455 void *data, u16 len)
3457 struct mgmt_cp_set_phy_configuration *cp = data;
3458 struct hci_cp_le_set_default_phy cp_phy;
3459 struct mgmt_pending_cmd *cmd;
3460 struct hci_request req;
3461 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3462 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3463 bool changed = false;
3466 bt_dev_dbg(hdev, "sock %p", sk);
3468 configurable_phys = get_configurable_phys(hdev);
3469 supported_phys = get_supported_phys(hdev);
3470 selected_phys = __le32_to_cpu(cp->selected_phys);
3472 if (selected_phys & ~supported_phys)
3473 return mgmt_cmd_status(sk, hdev->id,
3474 MGMT_OP_SET_PHY_CONFIGURATION,
3475 MGMT_STATUS_INVALID_PARAMS);
3477 unconfigure_phys = supported_phys & ~configurable_phys;
3479 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3480 return mgmt_cmd_status(sk, hdev->id,
3481 MGMT_OP_SET_PHY_CONFIGURATION,
3482 MGMT_STATUS_INVALID_PARAMS);
3484 if (selected_phys == get_selected_phys(hdev))
3485 return mgmt_cmd_complete(sk, hdev->id,
3486 MGMT_OP_SET_PHY_CONFIGURATION,
3491 if (!hdev_is_powered(hdev)) {
3492 err = mgmt_cmd_status(sk, hdev->id,
3493 MGMT_OP_SET_PHY_CONFIGURATION,
3494 MGMT_STATUS_REJECTED);
3498 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3499 err = mgmt_cmd_status(sk, hdev->id,
3500 MGMT_OP_SET_PHY_CONFIGURATION,
3505 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3506 pkt_type |= (HCI_DH3 | HCI_DM3);
3508 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3510 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3511 pkt_type |= (HCI_DH5 | HCI_DM5);
3513 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3515 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3516 pkt_type &= ~HCI_2DH1;
3518 pkt_type |= HCI_2DH1;
3520 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3521 pkt_type &= ~HCI_2DH3;
3523 pkt_type |= HCI_2DH3;
3525 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3526 pkt_type &= ~HCI_2DH5;
3528 pkt_type |= HCI_2DH5;
3530 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3531 pkt_type &= ~HCI_3DH1;
3533 pkt_type |= HCI_3DH1;
3535 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3536 pkt_type &= ~HCI_3DH3;
3538 pkt_type |= HCI_3DH3;
3540 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3541 pkt_type &= ~HCI_3DH5;
3543 pkt_type |= HCI_3DH5;
3545 if (pkt_type != hdev->pkt_type) {
3546 hdev->pkt_type = pkt_type;
3550 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3551 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3553 mgmt_phy_configuration_changed(hdev, sk);
3555 err = mgmt_cmd_complete(sk, hdev->id,
3556 MGMT_OP_SET_PHY_CONFIGURATION,
3562 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3569 hci_req_init(&req, hdev);
3571 memset(&cp_phy, 0, sizeof(cp_phy));
3573 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3574 cp_phy.all_phys |= 0x01;
3576 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3577 cp_phy.all_phys |= 0x02;
3579 if (selected_phys & MGMT_PHY_LE_1M_TX)
3580 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3582 if (selected_phys & MGMT_PHY_LE_2M_TX)
3583 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3585 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3586 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3588 if (selected_phys & MGMT_PHY_LE_1M_RX)
3589 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3591 if (selected_phys & MGMT_PHY_LE_2M_RX)
3592 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3594 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3595 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3597 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3599 err = hci_req_run_skb(&req, set_default_phy_complete);
3601 mgmt_pending_remove(cmd);
3604 hci_dev_unlock(hdev);
3609 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3612 int err = MGMT_STATUS_SUCCESS;
3613 struct mgmt_cp_set_blocked_keys *keys = data;
3614 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3615 sizeof(struct mgmt_blocked_key_info));
3616 u16 key_count, expected_len;
3619 bt_dev_dbg(hdev, "sock %p", sk);
3621 key_count = __le16_to_cpu(keys->key_count);
3622 if (key_count > max_key_count) {
3623 bt_dev_err(hdev, "too big key_count value %u", key_count);
3624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3625 MGMT_STATUS_INVALID_PARAMS);
3628 expected_len = struct_size(keys, keys, key_count);
3629 if (expected_len != len) {
3630 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 MGMT_STATUS_INVALID_PARAMS);
3638 hci_blocked_keys_clear(hdev);
3640 for (i = 0; i < keys->key_count; ++i) {
3641 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3644 err = MGMT_STATUS_NO_RESOURCES;
3648 b->type = keys->keys[i].type;
3649 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3650 list_add_rcu(&b->list, &hdev->blocked_keys);
3652 hci_dev_unlock(hdev);
3654 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3658 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3659 void *data, u16 len)
3661 struct mgmt_mode *cp = data;
3663 bool changed = false;
3665 bt_dev_dbg(hdev, "sock %p", sk);
3667 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3668 return mgmt_cmd_status(sk, hdev->id,
3669 MGMT_OP_SET_WIDEBAND_SPEECH,
3670 MGMT_STATUS_NOT_SUPPORTED);
3672 if (cp->val != 0x00 && cp->val != 0x01)
3673 return mgmt_cmd_status(sk, hdev->id,
3674 MGMT_OP_SET_WIDEBAND_SPEECH,
3675 MGMT_STATUS_INVALID_PARAMS);
3679 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3680 err = mgmt_cmd_status(sk, hdev->id,
3681 MGMT_OP_SET_WIDEBAND_SPEECH,
3686 if (hdev_is_powered(hdev) &&
3687 !!cp->val != hci_dev_test_flag(hdev,
3688 HCI_WIDEBAND_SPEECH_ENABLED)) {
3689 err = mgmt_cmd_status(sk, hdev->id,
3690 MGMT_OP_SET_WIDEBAND_SPEECH,
3691 MGMT_STATUS_REJECTED);
3696 changed = !hci_dev_test_and_set_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED);
3699 changed = hci_dev_test_and_clear_flag(hdev,
3700 HCI_WIDEBAND_SPEECH_ENABLED);
3702 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3707 err = new_settings(hdev, sk);
3710 hci_dev_unlock(hdev);
3714 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3715 void *data, u16 data_len)
3718 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3721 u8 tx_power_range[2];
3723 bt_dev_dbg(hdev, "sock %p", sk);
3725 memset(&buf, 0, sizeof(buf));
3729 /* When the Read Simple Pairing Options command is supported, then
3730 * the remote public key validation is supported.
3732 if (hdev->commands[41] & 0x08)
3733 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3735 flags |= 0x02; /* Remote public key validation (LE) */
3737 /* When the Read Encryption Key Size command is supported, then the
3738 * encryption key size is enforced.
3740 if (hdev->commands[20] & 0x10)
3741 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3743 flags |= 0x08; /* Encryption key size enforcement (LE) */
3745 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3748 /* When the Read Simple Pairing Options command is supported, then
3749 * also max encryption key size information is provided.
3751 if (hdev->commands[41] & 0x08)
3752 cap_len = eir_append_le16(rp->cap, cap_len,
3753 MGMT_CAP_MAX_ENC_KEY_SIZE,
3754 hdev->max_enc_key_size);
3756 cap_len = eir_append_le16(rp->cap, cap_len,
3757 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3758 SMP_MAX_ENC_KEY_SIZE);
3760 /* Append the min/max LE tx power parameters if we were able to fetch
3761 * it from the controller
3763 if (hdev->commands[38] & 0x80) {
3764 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3765 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3766 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3770 rp->cap_len = cpu_to_le16(cap_len);
3772 hci_dev_unlock(hdev);
3774 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3775 rp, sizeof(*rp) + cap_len);
3778 #ifdef CONFIG_BT_FEATURE_DEBUG
3779 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3780 static const u8 debug_uuid[16] = {
3781 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3782 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3786 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3787 static const u8 simult_central_periph_uuid[16] = {
3788 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3789 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3792 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3793 static const u8 rpa_resolution_uuid[16] = {
3794 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3795 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3798 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3799 void *data, u16 data_len)
3801 char buf[62]; /* Enough space for 3 features */
3802 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3806 bt_dev_dbg(hdev, "sock %p", sk);
3808 memset(&buf, 0, sizeof(buf));
3810 #ifdef CONFIG_BT_FEATURE_DEBUG
3812 flags = bt_dbg_get() ? BIT(0) : 0;
3814 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3815 rp->features[idx].flags = cpu_to_le32(flags);
3821 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3822 (hdev->le_states[4] & 0x08) && /* Central */
3823 (hdev->le_states[4] & 0x40) && /* Peripheral */
3824 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3829 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3830 rp->features[idx].flags = cpu_to_le32(flags);
3834 if (hdev && use_ll_privacy(hdev)) {
3835 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3836 flags = BIT(0) | BIT(1);
3840 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3841 rp->features[idx].flags = cpu_to_le32(flags);
3845 rp->feature_count = cpu_to_le16(idx);
3847 /* After reading the experimental features information, enable
3848 * the events to update client on any future change.
3850 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3852 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3853 MGMT_OP_READ_EXP_FEATURES_INFO,
3854 0, rp, sizeof(*rp) + (20 * idx));
3857 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3860 struct mgmt_ev_exp_feature_changed ev;
3862 memset(&ev, 0, sizeof(ev));
3863 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3864 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3866 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3868 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3872 #ifdef CONFIG_BT_FEATURE_DEBUG
3873 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3875 struct mgmt_ev_exp_feature_changed ev;
3877 memset(&ev, 0, sizeof(ev));
3878 memcpy(ev.uuid, debug_uuid, 16);
3879 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3881 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3883 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3887 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3888 void *data, u16 data_len)
3890 struct mgmt_cp_set_exp_feature *cp = data;
3891 struct mgmt_rp_set_exp_feature rp;
3893 bt_dev_dbg(hdev, "sock %p", sk);
3895 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3896 memset(rp.uuid, 0, 16);
3897 rp.flags = cpu_to_le32(0);
3899 #ifdef CONFIG_BT_FEATURE_DEBUG
3901 bool changed = bt_dbg_get();
3906 exp_debug_feature_changed(false, sk);
3910 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3911 bool changed = hci_dev_test_flag(hdev,
3912 HCI_ENABLE_LL_PRIVACY);
3914 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3917 exp_ll_privacy_feature_changed(false, hdev, sk);
3920 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3922 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3923 MGMT_OP_SET_EXP_FEATURE, 0,
3927 #ifdef CONFIG_BT_FEATURE_DEBUG
3928 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3932 /* Command requires to use the non-controller index */
3934 return mgmt_cmd_status(sk, hdev->id,
3935 MGMT_OP_SET_EXP_FEATURE,
3936 MGMT_STATUS_INVALID_INDEX);
3938 /* Parameters are limited to a single octet */
3939 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3940 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3941 MGMT_OP_SET_EXP_FEATURE,
3942 MGMT_STATUS_INVALID_PARAMS);
3944 /* Only boolean on/off is supported */
3945 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3946 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3947 MGMT_OP_SET_EXP_FEATURE,
3948 MGMT_STATUS_INVALID_PARAMS);
3950 val = !!cp->param[0];
3951 changed = val ? !bt_dbg_get() : bt_dbg_get();
3954 memcpy(rp.uuid, debug_uuid, 16);
3955 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3957 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3959 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3960 MGMT_OP_SET_EXP_FEATURE, 0,
3964 exp_debug_feature_changed(val, sk);
3970 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3975 /* Command requires to use the controller index */
3977 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3978 MGMT_OP_SET_EXP_FEATURE,
3979 MGMT_STATUS_INVALID_INDEX);
3981 /* Changes can only be made when controller is powered down */
3982 if (hdev_is_powered(hdev))
3983 return mgmt_cmd_status(sk, hdev->id,
3984 MGMT_OP_SET_EXP_FEATURE,
3985 MGMT_STATUS_NOT_POWERED);
3987 /* Parameters are limited to a single octet */
3988 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3989 return mgmt_cmd_status(sk, hdev->id,
3990 MGMT_OP_SET_EXP_FEATURE,
3991 MGMT_STATUS_INVALID_PARAMS);
3993 /* Only boolean on/off is supported */
3994 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3995 return mgmt_cmd_status(sk, hdev->id,
3996 MGMT_OP_SET_EXP_FEATURE,
3997 MGMT_STATUS_INVALID_PARAMS);
3999 val = !!cp->param[0];
4002 changed = !hci_dev_test_flag(hdev,
4003 HCI_ENABLE_LL_PRIVACY);
4004 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4005 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4007 /* Enable LL privacy + supported settings changed */
4008 flags = BIT(0) | BIT(1);
4010 changed = hci_dev_test_flag(hdev,
4011 HCI_ENABLE_LL_PRIVACY);
4012 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4014 /* Disable LL privacy + supported settings changed */
4018 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4019 rp.flags = cpu_to_le32(flags);
4021 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4023 err = mgmt_cmd_complete(sk, hdev->id,
4024 MGMT_OP_SET_EXP_FEATURE, 0,
4028 exp_ll_privacy_feature_changed(val, hdev, sk);
4033 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4034 MGMT_OP_SET_EXP_FEATURE,
4035 MGMT_STATUS_NOT_SUPPORTED);
4038 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4040 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4043 struct mgmt_cp_get_device_flags *cp = data;
4044 struct mgmt_rp_get_device_flags rp;
4045 struct bdaddr_list_with_flags *br_params;
4046 struct hci_conn_params *params;
4047 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4048 u32 current_flags = 0;
4049 u8 status = MGMT_STATUS_INVALID_PARAMS;
4051 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4052 &cp->addr.bdaddr, cp->addr.type);
4056 if (cp->addr.type == BDADDR_BREDR) {
4057 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4063 current_flags = br_params->current_flags;
4065 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4066 le_addr_type(cp->addr.type));
4071 current_flags = params->current_flags;
4074 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4075 rp.addr.type = cp->addr.type;
4076 rp.supported_flags = cpu_to_le32(supported_flags);
4077 rp.current_flags = cpu_to_le32(current_flags);
4079 status = MGMT_STATUS_SUCCESS;
4082 hci_dev_unlock(hdev);
4084 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4088 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4089 bdaddr_t *bdaddr, u8 bdaddr_type,
4090 u32 supported_flags, u32 current_flags)
4092 struct mgmt_ev_device_flags_changed ev;
4094 bacpy(&ev.addr.bdaddr, bdaddr);
4095 ev.addr.type = bdaddr_type;
4096 ev.supported_flags = cpu_to_le32(supported_flags);
4097 ev.current_flags = cpu_to_le32(current_flags);
4099 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4102 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4105 struct mgmt_cp_set_device_flags *cp = data;
4106 struct bdaddr_list_with_flags *br_params;
4107 struct hci_conn_params *params;
4108 u8 status = MGMT_STATUS_INVALID_PARAMS;
4109 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4110 u32 current_flags = __le32_to_cpu(cp->current_flags);
4112 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4113 &cp->addr.bdaddr, cp->addr.type,
4114 __le32_to_cpu(current_flags));
4116 if ((supported_flags | current_flags) != supported_flags) {
4117 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4118 current_flags, supported_flags);
4124 if (cp->addr.type == BDADDR_BREDR) {
4125 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4130 br_params->current_flags = current_flags;
4131 status = MGMT_STATUS_SUCCESS;
4133 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4134 &cp->addr.bdaddr, cp->addr.type);
4137 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4138 le_addr_type(cp->addr.type));
4140 params->current_flags = current_flags;
4141 status = MGMT_STATUS_SUCCESS;
4143 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4145 le_addr_type(cp->addr.type));
4150 hci_dev_unlock(hdev);
4152 if (status == MGMT_STATUS_SUCCESS)
4153 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4154 supported_flags, current_flags);
4156 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4157 &cp->addr, sizeof(cp->addr));
4160 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4163 struct mgmt_ev_adv_monitor_added ev;
4165 ev.monitor_handle = cpu_to_le16(handle);
4167 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4170 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4172 struct mgmt_ev_adv_monitor_removed ev;
4173 struct mgmt_pending_cmd *cmd;
4174 struct sock *sk_skip = NULL;
4175 struct mgmt_cp_remove_adv_monitor *cp;
4177 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4181 if (cp->monitor_handle)
4185 ev.monitor_handle = cpu_to_le16(handle);
4187 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4190 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4191 void *data, u16 len)
4193 struct adv_monitor *monitor = NULL;
4194 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4197 __u32 supported = 0;
4199 __u16 num_handles = 0;
4200 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4202 BT_DBG("request for %s", hdev->name);
4206 if (msft_monitor_supported(hdev))
4207 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4209 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4210 handles[num_handles++] = monitor->handle;
4212 hci_dev_unlock(hdev);
4214 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4215 rp = kmalloc(rp_size, GFP_KERNEL);
4219 /* All supported features are currently enabled */
4220 enabled = supported;
4222 rp->supported_features = cpu_to_le32(supported);
4223 rp->enabled_features = cpu_to_le32(enabled);
4224 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4225 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4226 rp->num_handles = cpu_to_le16(num_handles);
4228 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4230 err = mgmt_cmd_complete(sk, hdev->id,
4231 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4232 MGMT_STATUS_SUCCESS, rp, rp_size);
4239 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4241 struct mgmt_rp_add_adv_patterns_monitor rp;
4242 struct mgmt_pending_cmd *cmd;
4243 struct adv_monitor *monitor;
4248 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4250 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4255 monitor = cmd->user_data;
4256 rp.monitor_handle = cpu_to_le16(monitor->handle);
4259 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4260 hdev->adv_monitors_cnt++;
4261 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4262 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4263 hci_update_background_scan(hdev);
4266 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4267 mgmt_status(status), &rp, sizeof(rp));
4268 mgmt_pending_remove(cmd);
4271 hci_dev_unlock(hdev);
4272 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4273 rp.monitor_handle, status);
4278 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4279 struct adv_monitor *m, u8 status,
4280 void *data, u16 len, u16 op)
4282 struct mgmt_rp_add_adv_patterns_monitor rp;
4283 struct mgmt_pending_cmd *cmd;
4292 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4293 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4294 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4295 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4296 status = MGMT_STATUS_BUSY;
4300 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4302 status = MGMT_STATUS_NO_RESOURCES;
4307 pending = hci_add_adv_monitor(hdev, m, &err);
4309 if (err == -ENOSPC || err == -ENOMEM)
4310 status = MGMT_STATUS_NO_RESOURCES;
4311 else if (err == -EINVAL)
4312 status = MGMT_STATUS_INVALID_PARAMS;
4314 status = MGMT_STATUS_FAILED;
4316 mgmt_pending_remove(cmd);
4321 mgmt_pending_remove(cmd);
4322 rp.monitor_handle = cpu_to_le16(m->handle);
4323 mgmt_adv_monitor_added(sk, hdev, m->handle);
4324 m->state = ADV_MONITOR_STATE_REGISTERED;
4325 hdev->adv_monitors_cnt++;
4327 hci_dev_unlock(hdev);
4328 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4332 hci_dev_unlock(hdev);
4337 hci_free_adv_monitor(hdev, m);
4338 hci_dev_unlock(hdev);
4339 return mgmt_cmd_status(sk, hdev->id, op, status);
4342 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4343 struct mgmt_adv_rssi_thresholds *rssi)
4346 m->rssi.low_threshold = rssi->low_threshold;
4347 m->rssi.low_threshold_timeout =
4348 __le16_to_cpu(rssi->low_threshold_timeout);
4349 m->rssi.high_threshold = rssi->high_threshold;
4350 m->rssi.high_threshold_timeout =
4351 __le16_to_cpu(rssi->high_threshold_timeout);
4352 m->rssi.sampling_period = rssi->sampling_period;
4354 /* Default values. These numbers are the least constricting
4355 * parameters for MSFT API to work, so it behaves as if there
4356 * are no rssi parameter to consider. May need to be changed
4357 * if other API are to be supported.
4359 m->rssi.low_threshold = -127;
4360 m->rssi.low_threshold_timeout = 60;
4361 m->rssi.high_threshold = -127;
4362 m->rssi.high_threshold_timeout = 0;
4363 m->rssi.sampling_period = 0;
4367 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4368 struct mgmt_adv_pattern *patterns)
4370 u8 offset = 0, length = 0;
4371 struct adv_pattern *p = NULL;
4374 for (i = 0; i < pattern_count; i++) {
4375 offset = patterns[i].offset;
4376 length = patterns[i].length;
4377 if (offset >= HCI_MAX_AD_LENGTH ||
4378 length > HCI_MAX_AD_LENGTH ||
4379 (offset + length) > HCI_MAX_AD_LENGTH)
4380 return MGMT_STATUS_INVALID_PARAMS;
4382 p = kmalloc(sizeof(*p), GFP_KERNEL);
4384 return MGMT_STATUS_NO_RESOURCES;
4386 p->ad_type = patterns[i].ad_type;
4387 p->offset = patterns[i].offset;
4388 p->length = patterns[i].length;
4389 memcpy(p->value, patterns[i].value, p->length);
4391 INIT_LIST_HEAD(&p->list);
4392 list_add(&p->list, &m->patterns);
4395 return MGMT_STATUS_SUCCESS;
4398 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4399 void *data, u16 len)
4401 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4402 struct adv_monitor *m = NULL;
4403 u8 status = MGMT_STATUS_SUCCESS;
4404 size_t expected_size = sizeof(*cp);
4406 BT_DBG("request for %s", hdev->name);
4408 if (len <= sizeof(*cp)) {
4409 status = MGMT_STATUS_INVALID_PARAMS;
4413 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4414 if (len != expected_size) {
4415 status = MGMT_STATUS_INVALID_PARAMS;
4419 m = kzalloc(sizeof(*m), GFP_KERNEL);
4421 status = MGMT_STATUS_NO_RESOURCES;
4425 INIT_LIST_HEAD(&m->patterns);
4427 parse_adv_monitor_rssi(m, NULL);
4428 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4431 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4432 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4435 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4436 void *data, u16 len)
4438 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4439 struct adv_monitor *m = NULL;
4440 u8 status = MGMT_STATUS_SUCCESS;
4441 size_t expected_size = sizeof(*cp);
4443 BT_DBG("request for %s", hdev->name);
4445 if (len <= sizeof(*cp)) {
4446 status = MGMT_STATUS_INVALID_PARAMS;
4450 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4451 if (len != expected_size) {
4452 status = MGMT_STATUS_INVALID_PARAMS;
4456 m = kzalloc(sizeof(*m), GFP_KERNEL);
4458 status = MGMT_STATUS_NO_RESOURCES;
4462 INIT_LIST_HEAD(&m->patterns);
4464 parse_adv_monitor_rssi(m, &cp->rssi);
4465 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4468 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4469 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4472 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4474 struct mgmt_rp_remove_adv_monitor rp;
4475 struct mgmt_cp_remove_adv_monitor *cp;
4476 struct mgmt_pending_cmd *cmd;
4481 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4486 rp.monitor_handle = cp->monitor_handle;
4489 hci_update_background_scan(hdev);
4491 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4492 mgmt_status(status), &rp, sizeof(rp));
4493 mgmt_pending_remove(cmd);
4496 hci_dev_unlock(hdev);
4497 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
4498 rp.monitor_handle, status);
4503 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4504 void *data, u16 len)
4506 struct mgmt_cp_remove_adv_monitor *cp = data;
4507 struct mgmt_rp_remove_adv_monitor rp;
4508 struct mgmt_pending_cmd *cmd;
4509 u16 handle = __le16_to_cpu(cp->monitor_handle);
4513 BT_DBG("request for %s", hdev->name);
4514 rp.monitor_handle = cp->monitor_handle;
4518 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4519 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4520 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4521 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4522 status = MGMT_STATUS_BUSY;
4526 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4528 status = MGMT_STATUS_NO_RESOURCES;
4533 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4535 pending = hci_remove_all_adv_monitor(hdev, &err);
4538 mgmt_pending_remove(cmd);
4541 status = MGMT_STATUS_INVALID_INDEX;
4543 status = MGMT_STATUS_FAILED;
4548 /* monitor can be removed without forwarding request to controller */
4550 mgmt_pending_remove(cmd);
4551 hci_dev_unlock(hdev);
4553 return mgmt_cmd_complete(sk, hdev->id,
4554 MGMT_OP_REMOVE_ADV_MONITOR,
4555 MGMT_STATUS_SUCCESS,
4559 hci_dev_unlock(hdev);
4563 hci_dev_unlock(hdev);
4564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4568 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4569 u16 opcode, struct sk_buff *skb)
4571 struct mgmt_rp_read_local_oob_data mgmt_rp;
4572 size_t rp_size = sizeof(mgmt_rp);
4573 struct mgmt_pending_cmd *cmd;
4575 bt_dev_dbg(hdev, "status %u", status);
4577 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4581 if (status || !skb) {
4582 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4583 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4587 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4589 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4590 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4592 if (skb->len < sizeof(*rp)) {
4593 mgmt_cmd_status(cmd->sk, hdev->id,
4594 MGMT_OP_READ_LOCAL_OOB_DATA,
4595 MGMT_STATUS_FAILED);
4599 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4600 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4602 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4604 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4606 if (skb->len < sizeof(*rp)) {
4607 mgmt_cmd_status(cmd->sk, hdev->id,
4608 MGMT_OP_READ_LOCAL_OOB_DATA,
4609 MGMT_STATUS_FAILED);
4613 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4614 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4616 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4617 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4620 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4621 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4624 mgmt_pending_remove(cmd);
4627 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4628 void *data, u16 data_len)
4630 struct mgmt_pending_cmd *cmd;
4631 struct hci_request req;
4634 bt_dev_dbg(hdev, "sock %p", sk);
4638 if (!hdev_is_powered(hdev)) {
4639 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4640 MGMT_STATUS_NOT_POWERED);
4644 if (!lmp_ssp_capable(hdev)) {
4645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4646 MGMT_STATUS_NOT_SUPPORTED);
4650 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4656 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4662 hci_req_init(&req, hdev);
4664 if (bredr_sc_enabled(hdev))
4665 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4667 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4669 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4671 mgmt_pending_remove(cmd);
4674 hci_dev_unlock(hdev);
4678 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4679 void *data, u16 len)
4681 struct mgmt_addr_info *addr = data;
4684 bt_dev_dbg(hdev, "sock %p", sk);
4686 if (!bdaddr_type_is_valid(addr->type))
4687 return mgmt_cmd_complete(sk, hdev->id,
4688 MGMT_OP_ADD_REMOTE_OOB_DATA,
4689 MGMT_STATUS_INVALID_PARAMS,
4690 addr, sizeof(*addr));
4694 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4695 struct mgmt_cp_add_remote_oob_data *cp = data;
4698 if (cp->addr.type != BDADDR_BREDR) {
4699 err = mgmt_cmd_complete(sk, hdev->id,
4700 MGMT_OP_ADD_REMOTE_OOB_DATA,
4701 MGMT_STATUS_INVALID_PARAMS,
4702 &cp->addr, sizeof(cp->addr));
4706 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4707 cp->addr.type, cp->hash,
4708 cp->rand, NULL, NULL);
4710 status = MGMT_STATUS_FAILED;
4712 status = MGMT_STATUS_SUCCESS;
4714 err = mgmt_cmd_complete(sk, hdev->id,
4715 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4716 &cp->addr, sizeof(cp->addr));
4717 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4718 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4719 u8 *rand192, *hash192, *rand256, *hash256;
4722 if (bdaddr_type_is_le(cp->addr.type)) {
4723 /* Enforce zero-valued 192-bit parameters as
4724 * long as legacy SMP OOB isn't implemented.
4726 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4727 memcmp(cp->hash192, ZERO_KEY, 16)) {
4728 err = mgmt_cmd_complete(sk, hdev->id,
4729 MGMT_OP_ADD_REMOTE_OOB_DATA,
4730 MGMT_STATUS_INVALID_PARAMS,
4731 addr, sizeof(*addr));
4738 /* In case one of the P-192 values is set to zero,
4739 * then just disable OOB data for P-192.
4741 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4742 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4746 rand192 = cp->rand192;
4747 hash192 = cp->hash192;
4751 /* In case one of the P-256 values is set to zero, then just
4752 * disable OOB data for P-256.
4754 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4755 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4759 rand256 = cp->rand256;
4760 hash256 = cp->hash256;
4763 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4764 cp->addr.type, hash192, rand192,
4767 status = MGMT_STATUS_FAILED;
4769 status = MGMT_STATUS_SUCCESS;
4771 err = mgmt_cmd_complete(sk, hdev->id,
4772 MGMT_OP_ADD_REMOTE_OOB_DATA,
4773 status, &cp->addr, sizeof(cp->addr));
4775 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4777 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4778 MGMT_STATUS_INVALID_PARAMS);
4782 hci_dev_unlock(hdev);
4786 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4787 void *data, u16 len)
4789 struct mgmt_cp_remove_remote_oob_data *cp = data;
4793 bt_dev_dbg(hdev, "sock %p", sk);
4795 if (cp->addr.type != BDADDR_BREDR)
4796 return mgmt_cmd_complete(sk, hdev->id,
4797 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4798 MGMT_STATUS_INVALID_PARAMS,
4799 &cp->addr, sizeof(cp->addr));
4803 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4804 hci_remote_oob_data_clear(hdev);
4805 status = MGMT_STATUS_SUCCESS;
4809 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4811 status = MGMT_STATUS_INVALID_PARAMS;
4813 status = MGMT_STATUS_SUCCESS;
4816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4817 status, &cp->addr, sizeof(cp->addr));
4819 hci_dev_unlock(hdev);
4823 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4825 struct mgmt_pending_cmd *cmd;
4827 bt_dev_dbg(hdev, "status %d", status);
4831 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4833 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4836 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4839 cmd->cmd_complete(cmd, mgmt_status(status));
4840 mgmt_pending_remove(cmd);
4843 hci_dev_unlock(hdev);
4845 /* Handle suspend notifier */
4846 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4847 hdev->suspend_tasks)) {
4848 bt_dev_dbg(hdev, "Unpaused discovery");
4849 wake_up(&hdev->suspend_wait_q);
4853 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4854 uint8_t *mgmt_status)
4857 case DISCOV_TYPE_LE:
4858 *mgmt_status = mgmt_le_support(hdev);
4862 case DISCOV_TYPE_INTERLEAVED:
4863 *mgmt_status = mgmt_le_support(hdev);
4867 case DISCOV_TYPE_BREDR:
4868 *mgmt_status = mgmt_bredr_support(hdev);
4873 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4880 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4881 u16 op, void *data, u16 len)
4883 struct mgmt_cp_start_discovery *cp = data;
4884 struct mgmt_pending_cmd *cmd;
4888 bt_dev_dbg(hdev, "sock %p", sk);
4892 if (!hdev_is_powered(hdev)) {
4893 err = mgmt_cmd_complete(sk, hdev->id, op,
4894 MGMT_STATUS_NOT_POWERED,
4895 &cp->type, sizeof(cp->type));
4899 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4900 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4901 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4902 &cp->type, sizeof(cp->type));
4906 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4907 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4908 &cp->type, sizeof(cp->type));
4912 /* Can't start discovery when it is paused */
4913 if (hdev->discovery_paused) {
4914 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4915 &cp->type, sizeof(cp->type));
4919 /* Clear the discovery filter first to free any previously
4920 * allocated memory for the UUID list.
4922 hci_discovery_filter_clear(hdev);
4924 hdev->discovery.type = cp->type;
4925 hdev->discovery.report_invalid_rssi = false;
4926 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4927 hdev->discovery.limited = true;
4929 hdev->discovery.limited = false;
4931 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4937 cmd->cmd_complete = generic_cmd_complete;
4939 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4940 queue_work(hdev->req_workqueue, &hdev->discov_update);
4944 hci_dev_unlock(hdev);
4948 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4949 void *data, u16 len)
4951 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4955 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4956 void *data, u16 len)
4958 return start_discovery_internal(sk, hdev,
4959 MGMT_OP_START_LIMITED_DISCOVERY,
4963 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4966 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4970 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4971 void *data, u16 len)
4973 struct mgmt_cp_start_service_discovery *cp = data;
4974 struct mgmt_pending_cmd *cmd;
4975 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4976 u16 uuid_count, expected_len;
4980 bt_dev_dbg(hdev, "sock %p", sk);
4984 if (!hdev_is_powered(hdev)) {
4985 err = mgmt_cmd_complete(sk, hdev->id,
4986 MGMT_OP_START_SERVICE_DISCOVERY,
4987 MGMT_STATUS_NOT_POWERED,
4988 &cp->type, sizeof(cp->type));
4992 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4993 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4994 err = mgmt_cmd_complete(sk, hdev->id,
4995 MGMT_OP_START_SERVICE_DISCOVERY,
4996 MGMT_STATUS_BUSY, &cp->type,
5001 if (hdev->discovery_paused) {
5002 err = mgmt_cmd_complete(sk, hdev->id,
5003 MGMT_OP_START_SERVICE_DISCOVERY,
5004 MGMT_STATUS_BUSY, &cp->type,
5009 uuid_count = __le16_to_cpu(cp->uuid_count);
5010 if (uuid_count > max_uuid_count) {
5011 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5013 err = mgmt_cmd_complete(sk, hdev->id,
5014 MGMT_OP_START_SERVICE_DISCOVERY,
5015 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5020 expected_len = sizeof(*cp) + uuid_count * 16;
5021 if (expected_len != len) {
5022 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5024 err = mgmt_cmd_complete(sk, hdev->id,
5025 MGMT_OP_START_SERVICE_DISCOVERY,
5026 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5031 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5032 err = mgmt_cmd_complete(sk, hdev->id,
5033 MGMT_OP_START_SERVICE_DISCOVERY,
5034 status, &cp->type, sizeof(cp->type));
5038 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5045 cmd->cmd_complete = service_discovery_cmd_complete;
5047 /* Clear the discovery filter first to free any previously
5048 * allocated memory for the UUID list.
5050 hci_discovery_filter_clear(hdev);
5052 hdev->discovery.result_filtering = true;
5053 hdev->discovery.type = cp->type;
5054 hdev->discovery.rssi = cp->rssi;
5055 hdev->discovery.uuid_count = uuid_count;
5057 if (uuid_count > 0) {
5058 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5060 if (!hdev->discovery.uuids) {
5061 err = mgmt_cmd_complete(sk, hdev->id,
5062 MGMT_OP_START_SERVICE_DISCOVERY,
5064 &cp->type, sizeof(cp->type));
5065 mgmt_pending_remove(cmd);
5070 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5071 queue_work(hdev->req_workqueue, &hdev->discov_update);
5075 hci_dev_unlock(hdev);
5079 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5081 struct mgmt_pending_cmd *cmd;
5083 bt_dev_dbg(hdev, "status %d", status);
5087 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5089 cmd->cmd_complete(cmd, mgmt_status(status));
5090 mgmt_pending_remove(cmd);
5093 hci_dev_unlock(hdev);
5095 /* Handle suspend notifier */
5096 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5097 bt_dev_dbg(hdev, "Paused discovery");
5098 wake_up(&hdev->suspend_wait_q);
5102 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5105 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5106 struct mgmt_pending_cmd *cmd;
5109 bt_dev_dbg(hdev, "sock %p", sk);
5113 if (!hci_discovery_active(hdev)) {
5114 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5115 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5116 sizeof(mgmt_cp->type));
5120 if (hdev->discovery.type != mgmt_cp->type) {
5121 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5122 MGMT_STATUS_INVALID_PARAMS,
5123 &mgmt_cp->type, sizeof(mgmt_cp->type));
5127 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5133 cmd->cmd_complete = generic_cmd_complete;
5135 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5136 queue_work(hdev->req_workqueue, &hdev->discov_update);
5140 hci_dev_unlock(hdev);
5144 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5147 struct mgmt_cp_confirm_name *cp = data;
5148 struct inquiry_entry *e;
5151 bt_dev_dbg(hdev, "sock %p", sk);
5155 if (!hci_discovery_active(hdev)) {
5156 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5157 MGMT_STATUS_FAILED, &cp->addr,
5162 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5165 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5170 if (cp->name_known) {
5171 e->name_state = NAME_KNOWN;
5174 e->name_state = NAME_NEEDED;
5175 hci_inquiry_cache_update_resolve(hdev, e);
5178 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5179 &cp->addr, sizeof(cp->addr));
5182 hci_dev_unlock(hdev);
5186 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5189 struct mgmt_cp_block_device *cp = data;
5193 bt_dev_dbg(hdev, "sock %p", sk);
5195 if (!bdaddr_type_is_valid(cp->addr.type))
5196 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5197 MGMT_STATUS_INVALID_PARAMS,
5198 &cp->addr, sizeof(cp->addr));
5202 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
5205 status = MGMT_STATUS_FAILED;
5209 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5211 status = MGMT_STATUS_SUCCESS;
5214 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5215 &cp->addr, sizeof(cp->addr));
5217 hci_dev_unlock(hdev);
5222 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5225 struct mgmt_cp_unblock_device *cp = data;
5229 bt_dev_dbg(hdev, "sock %p", sk);
5231 if (!bdaddr_type_is_valid(cp->addr.type))
5232 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5233 MGMT_STATUS_INVALID_PARAMS,
5234 &cp->addr, sizeof(cp->addr));
5238 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5241 status = MGMT_STATUS_INVALID_PARAMS;
5245 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5247 status = MGMT_STATUS_SUCCESS;
5250 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5251 &cp->addr, sizeof(cp->addr));
5253 hci_dev_unlock(hdev);
5258 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5261 struct mgmt_cp_set_device_id *cp = data;
5262 struct hci_request req;
5266 bt_dev_dbg(hdev, "sock %p", sk);
5268 source = __le16_to_cpu(cp->source);
5270 if (source > 0x0002)
5271 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5272 MGMT_STATUS_INVALID_PARAMS);
5276 hdev->devid_source = source;
5277 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5278 hdev->devid_product = __le16_to_cpu(cp->product);
5279 hdev->devid_version = __le16_to_cpu(cp->version);
5281 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5284 hci_req_init(&req, hdev);
5285 __hci_req_update_eir(&req);
5286 hci_req_run(&req, NULL);
5288 hci_dev_unlock(hdev);
5293 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5296 bt_dev_dbg(hdev, "status %d", status);
5299 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5302 struct cmd_lookup match = { NULL, hdev };
5303 struct hci_request req;
5305 struct adv_info *adv_instance;
5311 u8 mgmt_err = mgmt_status(status);
5313 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5314 cmd_status_rsp, &mgmt_err);
5318 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5319 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5321 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5323 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5326 new_settings(hdev, match.sk);
5331 /* Handle suspend notifier */
5332 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5333 hdev->suspend_tasks)) {
5334 bt_dev_dbg(hdev, "Paused advertising");
5335 wake_up(&hdev->suspend_wait_q);
5336 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5337 hdev->suspend_tasks)) {
5338 bt_dev_dbg(hdev, "Unpaused advertising");
5339 wake_up(&hdev->suspend_wait_q);
5342 /* If "Set Advertising" was just disabled and instance advertising was
5343 * set up earlier, then re-enable multi-instance advertising.
5345 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5346 list_empty(&hdev->adv_instances))
5349 instance = hdev->cur_adv_instance;
5351 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5352 struct adv_info, list);
5356 instance = adv_instance->instance;
5359 hci_req_init(&req, hdev);
5361 err = __hci_req_schedule_adv_instance(&req, instance, true);
5364 err = hci_req_run(&req, enable_advertising_instance);
5367 bt_dev_err(hdev, "failed to re-configure advertising");
5370 hci_dev_unlock(hdev);
5373 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5376 struct mgmt_mode *cp = data;
5377 struct mgmt_pending_cmd *cmd;
5378 struct hci_request req;
5382 bt_dev_dbg(hdev, "sock %p", sk);
5384 status = mgmt_le_support(hdev);
5386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5389 /* Enabling the experimental LL Privay support disables support for
5392 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5394 MGMT_STATUS_NOT_SUPPORTED);
5396 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5397 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5398 MGMT_STATUS_INVALID_PARAMS);
5400 if (hdev->advertising_paused)
5401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5408 /* The following conditions are ones which mean that we should
5409 * not do any HCI communication but directly send a mgmt
5410 * response to user space (after toggling the flag if
5413 if (!hdev_is_powered(hdev) ||
5414 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5415 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5416 hci_conn_num(hdev, LE_LINK) > 0 ||
5417 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5418 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5422 hdev->cur_adv_instance = 0x00;
5423 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5424 if (cp->val == 0x02)
5425 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5427 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5429 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5430 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5433 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5438 err = new_settings(hdev, sk);
5443 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5444 pending_find(MGMT_OP_SET_LE, hdev)) {
5445 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5450 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5456 hci_req_init(&req, hdev);
5458 if (cp->val == 0x02)
5459 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5461 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5463 cancel_adv_timeout(hdev);
5466 /* Switch to instance "0" for the Set Advertising setting.
5467 * We cannot use update_[adv|scan_rsp]_data() here as the
5468 * HCI_ADVERTISING flag is not yet set.
5470 hdev->cur_adv_instance = 0x00;
5472 if (ext_adv_capable(hdev)) {
5473 __hci_req_start_ext_adv(&req, 0x00);
5475 __hci_req_update_adv_data(&req, 0x00);
5476 __hci_req_update_scan_rsp_data(&req, 0x00);
5477 __hci_req_enable_advertising(&req);
5480 __hci_req_disable_advertising(&req);
5483 err = hci_req_run(&req, set_advertising_complete);
5485 mgmt_pending_remove(cmd);
5488 hci_dev_unlock(hdev);
5492 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5493 void *data, u16 len)
5495 struct mgmt_cp_set_static_address *cp = data;
5498 bt_dev_dbg(hdev, "sock %p", sk);
5500 if (!lmp_le_capable(hdev))
5501 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5502 MGMT_STATUS_NOT_SUPPORTED);
5504 if (hdev_is_powered(hdev))
5505 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5506 MGMT_STATUS_REJECTED);
5508 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5509 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5510 return mgmt_cmd_status(sk, hdev->id,
5511 MGMT_OP_SET_STATIC_ADDRESS,
5512 MGMT_STATUS_INVALID_PARAMS);
5514 /* Two most significant bits shall be set */
5515 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5516 return mgmt_cmd_status(sk, hdev->id,
5517 MGMT_OP_SET_STATIC_ADDRESS,
5518 MGMT_STATUS_INVALID_PARAMS);
5523 bacpy(&hdev->static_addr, &cp->bdaddr);
5525 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5529 err = new_settings(hdev, sk);
5532 hci_dev_unlock(hdev);
5536 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5537 void *data, u16 len)
5539 struct mgmt_cp_set_scan_params *cp = data;
5540 __u16 interval, window;
5543 bt_dev_dbg(hdev, "sock %p", sk);
5545 if (!lmp_le_capable(hdev))
5546 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5547 MGMT_STATUS_NOT_SUPPORTED);
5549 interval = __le16_to_cpu(cp->interval);
5551 if (interval < 0x0004 || interval > 0x4000)
5552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5553 MGMT_STATUS_INVALID_PARAMS);
5555 window = __le16_to_cpu(cp->window);
5557 if (window < 0x0004 || window > 0x4000)
5558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5559 MGMT_STATUS_INVALID_PARAMS);
5561 if (window > interval)
5562 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5563 MGMT_STATUS_INVALID_PARAMS);
5567 hdev->le_scan_interval = interval;
5568 hdev->le_scan_window = window;
5570 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5573 /* If background scan is running, restart it so new parameters are
5576 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5577 hdev->discovery.state == DISCOVERY_STOPPED) {
5578 struct hci_request req;
5580 hci_req_init(&req, hdev);
5582 hci_req_add_le_scan_disable(&req, false);
5583 hci_req_add_le_passive_scan(&req);
5585 hci_req_run(&req, NULL);
5588 hci_dev_unlock(hdev);
5593 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5596 struct mgmt_pending_cmd *cmd;
5598 bt_dev_dbg(hdev, "status 0x%02x", status);
5602 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5607 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5608 mgmt_status(status));
5610 struct mgmt_mode *cp = cmd->param;
5613 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5615 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5617 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5618 new_settings(hdev, cmd->sk);
5621 mgmt_pending_remove(cmd);
5624 hci_dev_unlock(hdev);
5627 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5628 void *data, u16 len)
5630 struct mgmt_mode *cp = data;
5631 struct mgmt_pending_cmd *cmd;
5632 struct hci_request req;
5635 bt_dev_dbg(hdev, "sock %p", sk);
5637 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5638 hdev->hci_ver < BLUETOOTH_VER_1_2)
5639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5640 MGMT_STATUS_NOT_SUPPORTED);
5642 if (cp->val != 0x00 && cp->val != 0x01)
5643 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5644 MGMT_STATUS_INVALID_PARAMS);
5648 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5649 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5654 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5655 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5660 if (!hdev_is_powered(hdev)) {
5661 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5662 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5664 new_settings(hdev, sk);
5668 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5675 hci_req_init(&req, hdev);
5677 __hci_req_write_fast_connectable(&req, cp->val);
5679 err = hci_req_run(&req, fast_connectable_complete);
5681 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5682 MGMT_STATUS_FAILED);
5683 mgmt_pending_remove(cmd);
5687 hci_dev_unlock(hdev);
5692 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5694 struct mgmt_pending_cmd *cmd;
5696 bt_dev_dbg(hdev, "status 0x%02x", status);
5700 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5705 u8 mgmt_err = mgmt_status(status);
5707 /* We need to restore the flag if related HCI commands
5710 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5712 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5714 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5715 new_settings(hdev, cmd->sk);
5718 mgmt_pending_remove(cmd);
5721 hci_dev_unlock(hdev);
5724 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5726 struct mgmt_mode *cp = data;
5727 struct mgmt_pending_cmd *cmd;
5728 struct hci_request req;
5731 bt_dev_dbg(hdev, "sock %p", sk);
5733 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5734 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5735 MGMT_STATUS_NOT_SUPPORTED);
5737 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5739 MGMT_STATUS_REJECTED);
5741 if (cp->val != 0x00 && cp->val != 0x01)
5742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5743 MGMT_STATUS_INVALID_PARAMS);
5747 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5748 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5752 if (!hdev_is_powered(hdev)) {
5754 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5755 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5756 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5757 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5758 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5761 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5763 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5767 err = new_settings(hdev, sk);
5771 /* Reject disabling when powered on */
5773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5774 MGMT_STATUS_REJECTED);
5777 /* When configuring a dual-mode controller to operate
5778 * with LE only and using a static address, then switching
5779 * BR/EDR back on is not allowed.
5781 * Dual-mode controllers shall operate with the public
5782 * address as its identity address for BR/EDR and LE. So
5783 * reject the attempt to create an invalid configuration.
5785 * The same restrictions applies when secure connections
5786 * has been enabled. For BR/EDR this is a controller feature
5787 * while for LE it is a host stack feature. This means that
5788 * switching BR/EDR back on when secure connections has been
5789 * enabled is not a supported transaction.
5791 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5792 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5793 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5795 MGMT_STATUS_REJECTED);
5800 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5806 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5812 /* We need to flip the bit already here so that
5813 * hci_req_update_adv_data generates the correct flags.
5815 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5817 hci_req_init(&req, hdev);
5819 __hci_req_write_fast_connectable(&req, false);
5820 __hci_req_update_scan(&req);
5822 /* Since only the advertising data flags will change, there
5823 * is no need to update the scan response data.
5825 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5827 err = hci_req_run(&req, set_bredr_complete);
5829 mgmt_pending_remove(cmd);
5832 hci_dev_unlock(hdev);
5836 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5838 struct mgmt_pending_cmd *cmd;
5839 struct mgmt_mode *cp;
5841 bt_dev_dbg(hdev, "status %u", status);
5845 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5850 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5851 mgmt_status(status));
5859 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5864 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5867 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5868 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5872 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5873 new_settings(hdev, cmd->sk);
5876 mgmt_pending_remove(cmd);
5878 hci_dev_unlock(hdev);
5881 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5882 void *data, u16 len)
5884 struct mgmt_mode *cp = data;
5885 struct mgmt_pending_cmd *cmd;
5886 struct hci_request req;
5890 bt_dev_dbg(hdev, "sock %p", sk);
5892 if (!lmp_sc_capable(hdev) &&
5893 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5894 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5895 MGMT_STATUS_NOT_SUPPORTED);
5897 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5898 lmp_sc_capable(hdev) &&
5899 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5901 MGMT_STATUS_REJECTED);
5903 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5905 MGMT_STATUS_INVALID_PARAMS);
5909 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5910 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5914 changed = !hci_dev_test_and_set_flag(hdev,
5916 if (cp->val == 0x02)
5917 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5919 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5921 changed = hci_dev_test_and_clear_flag(hdev,
5923 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5926 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5931 err = new_settings(hdev, sk);
5936 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5937 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5944 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5945 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5946 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5950 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5956 hci_req_init(&req, hdev);
5957 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5958 err = hci_req_run(&req, sc_enable_complete);
5960 mgmt_pending_remove(cmd);
5965 hci_dev_unlock(hdev);
5969 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5970 void *data, u16 len)
5972 struct mgmt_mode *cp = data;
5973 bool changed, use_changed;
5976 bt_dev_dbg(hdev, "sock %p", sk);
5978 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5980 MGMT_STATUS_INVALID_PARAMS);
5985 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5987 changed = hci_dev_test_and_clear_flag(hdev,
5988 HCI_KEEP_DEBUG_KEYS);
5990 if (cp->val == 0x02)
5991 use_changed = !hci_dev_test_and_set_flag(hdev,
5992 HCI_USE_DEBUG_KEYS);
5994 use_changed = hci_dev_test_and_clear_flag(hdev,
5995 HCI_USE_DEBUG_KEYS);
5997 if (hdev_is_powered(hdev) && use_changed &&
5998 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5999 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6000 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6001 sizeof(mode), &mode);
6004 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6009 err = new_settings(hdev, sk);
6012 hci_dev_unlock(hdev);
6016 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6019 struct mgmt_cp_set_privacy *cp = cp_data;
6023 bt_dev_dbg(hdev, "sock %p", sk);
6025 if (!lmp_le_capable(hdev))
6026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6027 MGMT_STATUS_NOT_SUPPORTED);
6029 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6031 MGMT_STATUS_INVALID_PARAMS);
6033 if (hdev_is_powered(hdev))
6034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6035 MGMT_STATUS_REJECTED);
6039 /* If user space supports this command it is also expected to
6040 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6042 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6045 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6046 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6047 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6048 hci_adv_instances_set_rpa_expired(hdev, true);
6049 if (cp->privacy == 0x02)
6050 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6052 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6054 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6055 memset(hdev->irk, 0, sizeof(hdev->irk));
6056 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6057 hci_adv_instances_set_rpa_expired(hdev, false);
6058 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6061 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6066 err = new_settings(hdev, sk);
6069 hci_dev_unlock(hdev);
6073 static bool irk_is_valid(struct mgmt_irk_info *irk)
6075 switch (irk->addr.type) {
6076 case BDADDR_LE_PUBLIC:
6079 case BDADDR_LE_RANDOM:
6080 /* Two most significant bits shall be set */
6081 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6089 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6092 struct mgmt_cp_load_irks *cp = cp_data;
6093 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6094 sizeof(struct mgmt_irk_info));
6095 u16 irk_count, expected_len;
6098 bt_dev_dbg(hdev, "sock %p", sk);
6100 if (!lmp_le_capable(hdev))
6101 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6102 MGMT_STATUS_NOT_SUPPORTED);
6104 irk_count = __le16_to_cpu(cp->irk_count);
6105 if (irk_count > max_irk_count) {
6106 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6108 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6109 MGMT_STATUS_INVALID_PARAMS);
6112 expected_len = struct_size(cp, irks, irk_count);
6113 if (expected_len != len) {
6114 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6116 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6117 MGMT_STATUS_INVALID_PARAMS);
6120 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6122 for (i = 0; i < irk_count; i++) {
6123 struct mgmt_irk_info *key = &cp->irks[i];
6125 if (!irk_is_valid(key))
6126 return mgmt_cmd_status(sk, hdev->id,
6128 MGMT_STATUS_INVALID_PARAMS);
6133 hci_smp_irks_clear(hdev);
6135 for (i = 0; i < irk_count; i++) {
6136 struct mgmt_irk_info *irk = &cp->irks[i];
6138 if (hci_is_blocked_key(hdev,
6139 HCI_BLOCKED_KEY_TYPE_IRK,
6141 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6146 hci_add_irk(hdev, &irk->addr.bdaddr,
6147 le_addr_type(irk->addr.type), irk->val,
6151 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6153 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6155 hci_dev_unlock(hdev);
6160 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6162 if (key->master != 0x00 && key->master != 0x01)
6165 switch (key->addr.type) {
6166 case BDADDR_LE_PUBLIC:
6169 case BDADDR_LE_RANDOM:
6170 /* Two most significant bits shall be set */
6171 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6179 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6180 void *cp_data, u16 len)
6182 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6183 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6184 sizeof(struct mgmt_ltk_info));
6185 u16 key_count, expected_len;
6188 bt_dev_dbg(hdev, "sock %p", sk);
6190 if (!lmp_le_capable(hdev))
6191 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6192 MGMT_STATUS_NOT_SUPPORTED);
6194 key_count = __le16_to_cpu(cp->key_count);
6195 if (key_count > max_key_count) {
6196 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6198 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6199 MGMT_STATUS_INVALID_PARAMS);
6202 expected_len = struct_size(cp, keys, key_count);
6203 if (expected_len != len) {
6204 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6207 MGMT_STATUS_INVALID_PARAMS);
6210 bt_dev_dbg(hdev, "key_count %u", key_count);
6212 for (i = 0; i < key_count; i++) {
6213 struct mgmt_ltk_info *key = &cp->keys[i];
6215 if (!ltk_is_valid(key))
6216 return mgmt_cmd_status(sk, hdev->id,
6217 MGMT_OP_LOAD_LONG_TERM_KEYS,
6218 MGMT_STATUS_INVALID_PARAMS);
6223 hci_smp_ltks_clear(hdev);
6225 for (i = 0; i < key_count; i++) {
6226 struct mgmt_ltk_info *key = &cp->keys[i];
6227 u8 type, authenticated;
6229 if (hci_is_blocked_key(hdev,
6230 HCI_BLOCKED_KEY_TYPE_LTK,
6232 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6237 switch (key->type) {
6238 case MGMT_LTK_UNAUTHENTICATED:
6239 authenticated = 0x00;
6240 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6242 case MGMT_LTK_AUTHENTICATED:
6243 authenticated = 0x01;
6244 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6246 case MGMT_LTK_P256_UNAUTH:
6247 authenticated = 0x00;
6248 type = SMP_LTK_P256;
6250 case MGMT_LTK_P256_AUTH:
6251 authenticated = 0x01;
6252 type = SMP_LTK_P256;
6254 case MGMT_LTK_P256_DEBUG:
6255 authenticated = 0x00;
6256 type = SMP_LTK_P256_DEBUG;
6262 hci_add_ltk(hdev, &key->addr.bdaddr,
6263 le_addr_type(key->addr.type), type, authenticated,
6264 key->val, key->enc_size, key->ediv, key->rand);
6267 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6270 hci_dev_unlock(hdev);
6275 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6277 struct hci_conn *conn = cmd->user_data;
6278 struct mgmt_rp_get_conn_info rp;
6281 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6283 if (status == MGMT_STATUS_SUCCESS) {
6284 rp.rssi = conn->rssi;
6285 rp.tx_power = conn->tx_power;
6286 rp.max_tx_power = conn->max_tx_power;
6288 rp.rssi = HCI_RSSI_INVALID;
6289 rp.tx_power = HCI_TX_POWER_INVALID;
6290 rp.max_tx_power = HCI_TX_POWER_INVALID;
6293 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6294 status, &rp, sizeof(rp));
6296 hci_conn_drop(conn);
6302 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6305 struct hci_cp_read_rssi *cp;
6306 struct mgmt_pending_cmd *cmd;
6307 struct hci_conn *conn;
6311 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6315 /* Commands sent in request are either Read RSSI or Read Transmit Power
6316 * Level so we check which one was last sent to retrieve connection
6317 * handle. Both commands have handle as first parameter so it's safe to
6318 * cast data on the same command struct.
6320 * First command sent is always Read RSSI and we fail only if it fails.
6321 * In other case we simply override error to indicate success as we
6322 * already remembered if TX power value is actually valid.
6324 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6326 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6327 status = MGMT_STATUS_SUCCESS;
6329 status = mgmt_status(hci_status);
6333 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6337 handle = __le16_to_cpu(cp->handle);
6338 conn = hci_conn_hash_lookup_handle(hdev, handle);
6340 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6345 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6349 cmd->cmd_complete(cmd, status);
6350 mgmt_pending_remove(cmd);
6353 hci_dev_unlock(hdev);
6356 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6359 struct mgmt_cp_get_conn_info *cp = data;
6360 struct mgmt_rp_get_conn_info rp;
6361 struct hci_conn *conn;
6362 unsigned long conn_info_age;
6365 bt_dev_dbg(hdev, "sock %p", sk);
6367 memset(&rp, 0, sizeof(rp));
6368 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6369 rp.addr.type = cp->addr.type;
6371 if (!bdaddr_type_is_valid(cp->addr.type))
6372 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6373 MGMT_STATUS_INVALID_PARAMS,
6378 if (!hdev_is_powered(hdev)) {
6379 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6380 MGMT_STATUS_NOT_POWERED, &rp,
6385 if (cp->addr.type == BDADDR_BREDR)
6386 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6389 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6391 if (!conn || conn->state != BT_CONNECTED) {
6392 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6393 MGMT_STATUS_NOT_CONNECTED, &rp,
6398 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6399 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6400 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6404 /* To avoid client trying to guess when to poll again for information we
6405 * calculate conn info age as random value between min/max set in hdev.
6407 conn_info_age = hdev->conn_info_min_age +
6408 prandom_u32_max(hdev->conn_info_max_age -
6409 hdev->conn_info_min_age);
6411 /* Query controller to refresh cached values if they are too old or were
6414 if (time_after(jiffies, conn->conn_info_timestamp +
6415 msecs_to_jiffies(conn_info_age)) ||
6416 !conn->conn_info_timestamp) {
6417 struct hci_request req;
6418 struct hci_cp_read_tx_power req_txp_cp;
6419 struct hci_cp_read_rssi req_rssi_cp;
6420 struct mgmt_pending_cmd *cmd;
6422 hci_req_init(&req, hdev);
6423 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6424 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6427 /* For LE links TX power does not change thus we don't need to
6428 * query for it once value is known.
6430 if (!bdaddr_type_is_le(cp->addr.type) ||
6431 conn->tx_power == HCI_TX_POWER_INVALID) {
6432 req_txp_cp.handle = cpu_to_le16(conn->handle);
6433 req_txp_cp.type = 0x00;
6434 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6435 sizeof(req_txp_cp), &req_txp_cp);
6438 /* Max TX power needs to be read only once per connection */
6439 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6440 req_txp_cp.handle = cpu_to_le16(conn->handle);
6441 req_txp_cp.type = 0x01;
6442 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6443 sizeof(req_txp_cp), &req_txp_cp);
6446 err = hci_req_run(&req, conn_info_refresh_complete);
6450 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6457 hci_conn_hold(conn);
6458 cmd->user_data = hci_conn_get(conn);
6459 cmd->cmd_complete = conn_info_cmd_complete;
6461 conn->conn_info_timestamp = jiffies;
6463 /* Cache is valid, just reply with values cached in hci_conn */
6464 rp.rssi = conn->rssi;
6465 rp.tx_power = conn->tx_power;
6466 rp.max_tx_power = conn->max_tx_power;
6468 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6469 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6473 hci_dev_unlock(hdev);
6477 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6479 struct hci_conn *conn = cmd->user_data;
6480 struct mgmt_rp_get_clock_info rp;
6481 struct hci_dev *hdev;
6484 memset(&rp, 0, sizeof(rp));
6485 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6490 hdev = hci_dev_get(cmd->index);
6492 rp.local_clock = cpu_to_le32(hdev->clock);
6497 rp.piconet_clock = cpu_to_le32(conn->clock);
6498 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6502 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6506 hci_conn_drop(conn);
6513 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6515 struct hci_cp_read_clock *hci_cp;
6516 struct mgmt_pending_cmd *cmd;
6517 struct hci_conn *conn;
6519 bt_dev_dbg(hdev, "status %u", status);
6523 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6527 if (hci_cp->which) {
6528 u16 handle = __le16_to_cpu(hci_cp->handle);
6529 conn = hci_conn_hash_lookup_handle(hdev, handle);
6534 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6538 cmd->cmd_complete(cmd, mgmt_status(status));
6539 mgmt_pending_remove(cmd);
6542 hci_dev_unlock(hdev);
6545 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6548 struct mgmt_cp_get_clock_info *cp = data;
6549 struct mgmt_rp_get_clock_info rp;
6550 struct hci_cp_read_clock hci_cp;
6551 struct mgmt_pending_cmd *cmd;
6552 struct hci_request req;
6553 struct hci_conn *conn;
6556 bt_dev_dbg(hdev, "sock %p", sk);
6558 memset(&rp, 0, sizeof(rp));
6559 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6560 rp.addr.type = cp->addr.type;
6562 if (cp->addr.type != BDADDR_BREDR)
6563 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6564 MGMT_STATUS_INVALID_PARAMS,
6569 if (!hdev_is_powered(hdev)) {
6570 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6571 MGMT_STATUS_NOT_POWERED, &rp,
6576 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6577 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6579 if (!conn || conn->state != BT_CONNECTED) {
6580 err = mgmt_cmd_complete(sk, hdev->id,
6581 MGMT_OP_GET_CLOCK_INFO,
6582 MGMT_STATUS_NOT_CONNECTED,
6590 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6596 cmd->cmd_complete = clock_info_cmd_complete;
6598 hci_req_init(&req, hdev);
6600 memset(&hci_cp, 0, sizeof(hci_cp));
6601 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6604 hci_conn_hold(conn);
6605 cmd->user_data = hci_conn_get(conn);
6607 hci_cp.handle = cpu_to_le16(conn->handle);
6608 hci_cp.which = 0x01; /* Piconet clock */
6609 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6612 err = hci_req_run(&req, get_clock_info_complete);
6614 mgmt_pending_remove(cmd);
6617 hci_dev_unlock(hdev);
6621 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6623 struct hci_conn *conn;
6625 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6629 if (conn->dst_type != type)
6632 if (conn->state != BT_CONNECTED)
6638 /* This function requires the caller holds hdev->lock */
6639 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6640 u8 addr_type, u8 auto_connect)
6642 struct hci_conn_params *params;
6644 params = hci_conn_params_add(hdev, addr, addr_type);
6648 if (params->auto_connect == auto_connect)
6651 list_del_init(¶ms->action);
6653 switch (auto_connect) {
6654 case HCI_AUTO_CONN_DISABLED:
6655 case HCI_AUTO_CONN_LINK_LOSS:
6656 /* If auto connect is being disabled when we're trying to
6657 * connect to device, keep connecting.
6659 if (params->explicit_connect)
6660 list_add(¶ms->action, &hdev->pend_le_conns);
6662 case HCI_AUTO_CONN_REPORT:
6663 if (params->explicit_connect)
6664 list_add(¶ms->action, &hdev->pend_le_conns);
6666 list_add(¶ms->action, &hdev->pend_le_reports);
6668 case HCI_AUTO_CONN_DIRECT:
6669 case HCI_AUTO_CONN_ALWAYS:
6670 if (!is_connected(hdev, addr, addr_type))
6671 list_add(¶ms->action, &hdev->pend_le_conns);
6675 params->auto_connect = auto_connect;
6677 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6678 addr, addr_type, auto_connect);
6683 static void device_added(struct sock *sk, struct hci_dev *hdev,
6684 bdaddr_t *bdaddr, u8 type, u8 action)
6686 struct mgmt_ev_device_added ev;
6688 bacpy(&ev.addr.bdaddr, bdaddr);
6689 ev.addr.type = type;
6692 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6695 static int add_device(struct sock *sk, struct hci_dev *hdev,
6696 void *data, u16 len)
6698 struct mgmt_cp_add_device *cp = data;
6699 u8 auto_conn, addr_type;
6700 struct hci_conn_params *params;
6702 u32 current_flags = 0;
6704 bt_dev_dbg(hdev, "sock %p", sk);
6706 if (!bdaddr_type_is_valid(cp->addr.type) ||
6707 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6708 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6709 MGMT_STATUS_INVALID_PARAMS,
6710 &cp->addr, sizeof(cp->addr));
6712 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6713 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6714 MGMT_STATUS_INVALID_PARAMS,
6715 &cp->addr, sizeof(cp->addr));
6719 if (cp->addr.type == BDADDR_BREDR) {
6720 /* Only incoming connections action is supported for now */
6721 if (cp->action != 0x01) {
6722 err = mgmt_cmd_complete(sk, hdev->id,
6724 MGMT_STATUS_INVALID_PARAMS,
6725 &cp->addr, sizeof(cp->addr));
6729 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6735 hci_req_update_scan(hdev);
6740 addr_type = le_addr_type(cp->addr.type);
6742 if (cp->action == 0x02)
6743 auto_conn = HCI_AUTO_CONN_ALWAYS;
6744 else if (cp->action == 0x01)
6745 auto_conn = HCI_AUTO_CONN_DIRECT;
6747 auto_conn = HCI_AUTO_CONN_REPORT;
6749 /* Kernel internally uses conn_params with resolvable private
6750 * address, but Add Device allows only identity addresses.
6751 * Make sure it is enforced before calling
6752 * hci_conn_params_lookup.
6754 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6755 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6756 MGMT_STATUS_INVALID_PARAMS,
6757 &cp->addr, sizeof(cp->addr));
6761 /* If the connection parameters don't exist for this device,
6762 * they will be created and configured with defaults.
6764 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6766 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6767 MGMT_STATUS_FAILED, &cp->addr,
6771 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6774 current_flags = params->current_flags;
6777 hci_update_background_scan(hdev);
6780 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6781 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6782 SUPPORTED_DEVICE_FLAGS(), current_flags);
6784 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6785 MGMT_STATUS_SUCCESS, &cp->addr,
6789 hci_dev_unlock(hdev);
6793 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6794 bdaddr_t *bdaddr, u8 type)
6796 struct mgmt_ev_device_removed ev;
6798 bacpy(&ev.addr.bdaddr, bdaddr);
6799 ev.addr.type = type;
6801 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6804 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6805 void *data, u16 len)
6807 struct mgmt_cp_remove_device *cp = data;
6810 bt_dev_dbg(hdev, "sock %p", sk);
6814 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6815 struct hci_conn_params *params;
6818 if (!bdaddr_type_is_valid(cp->addr.type)) {
6819 err = mgmt_cmd_complete(sk, hdev->id,
6820 MGMT_OP_REMOVE_DEVICE,
6821 MGMT_STATUS_INVALID_PARAMS,
6822 &cp->addr, sizeof(cp->addr));
6826 if (cp->addr.type == BDADDR_BREDR) {
6827 err = hci_bdaddr_list_del(&hdev->whitelist,
6831 err = mgmt_cmd_complete(sk, hdev->id,
6832 MGMT_OP_REMOVE_DEVICE,
6833 MGMT_STATUS_INVALID_PARAMS,
6839 hci_req_update_scan(hdev);
6841 device_removed(sk, hdev, &cp->addr.bdaddr,
6846 addr_type = le_addr_type(cp->addr.type);
6848 /* Kernel internally uses conn_params with resolvable private
6849 * address, but Remove Device allows only identity addresses.
6850 * Make sure it is enforced before calling
6851 * hci_conn_params_lookup.
6853 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6854 err = mgmt_cmd_complete(sk, hdev->id,
6855 MGMT_OP_REMOVE_DEVICE,
6856 MGMT_STATUS_INVALID_PARAMS,
6857 &cp->addr, sizeof(cp->addr));
6861 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6864 err = mgmt_cmd_complete(sk, hdev->id,
6865 MGMT_OP_REMOVE_DEVICE,
6866 MGMT_STATUS_INVALID_PARAMS,
6867 &cp->addr, sizeof(cp->addr));
6871 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6872 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6873 err = mgmt_cmd_complete(sk, hdev->id,
6874 MGMT_OP_REMOVE_DEVICE,
6875 MGMT_STATUS_INVALID_PARAMS,
6876 &cp->addr, sizeof(cp->addr));
6880 list_del(¶ms->action);
6881 list_del(¶ms->list);
6883 hci_update_background_scan(hdev);
6885 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6887 struct hci_conn_params *p, *tmp;
6888 struct bdaddr_list *b, *btmp;
6890 if (cp->addr.type) {
6891 err = mgmt_cmd_complete(sk, hdev->id,
6892 MGMT_OP_REMOVE_DEVICE,
6893 MGMT_STATUS_INVALID_PARAMS,
6894 &cp->addr, sizeof(cp->addr));
6898 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6899 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6904 hci_req_update_scan(hdev);
6906 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6907 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6909 device_removed(sk, hdev, &p->addr, p->addr_type);
6910 if (p->explicit_connect) {
6911 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6914 list_del(&p->action);
6919 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6921 hci_update_background_scan(hdev);
6925 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6926 MGMT_STATUS_SUCCESS, &cp->addr,
6929 hci_dev_unlock(hdev);
6933 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6936 struct mgmt_cp_load_conn_param *cp = data;
6937 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6938 sizeof(struct mgmt_conn_param));
6939 u16 param_count, expected_len;
6942 if (!lmp_le_capable(hdev))
6943 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6944 MGMT_STATUS_NOT_SUPPORTED);
6946 param_count = __le16_to_cpu(cp->param_count);
6947 if (param_count > max_param_count) {
6948 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6951 MGMT_STATUS_INVALID_PARAMS);
6954 expected_len = struct_size(cp, params, param_count);
6955 if (expected_len != len) {
6956 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6959 MGMT_STATUS_INVALID_PARAMS);
6962 bt_dev_dbg(hdev, "param_count %u", param_count);
6966 hci_conn_params_clear_disabled(hdev);
6968 for (i = 0; i < param_count; i++) {
6969 struct mgmt_conn_param *param = &cp->params[i];
6970 struct hci_conn_params *hci_param;
6971 u16 min, max, latency, timeout;
6974 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6977 if (param->addr.type == BDADDR_LE_PUBLIC) {
6978 addr_type = ADDR_LE_DEV_PUBLIC;
6979 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6980 addr_type = ADDR_LE_DEV_RANDOM;
6982 bt_dev_err(hdev, "ignoring invalid connection parameters");
6986 min = le16_to_cpu(param->min_interval);
6987 max = le16_to_cpu(param->max_interval);
6988 latency = le16_to_cpu(param->latency);
6989 timeout = le16_to_cpu(param->timeout);
6991 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6992 min, max, latency, timeout);
6994 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6995 bt_dev_err(hdev, "ignoring invalid connection parameters");
6999 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7002 bt_dev_err(hdev, "failed to add connection parameters");
7006 hci_param->conn_min_interval = min;
7007 hci_param->conn_max_interval = max;
7008 hci_param->conn_latency = latency;
7009 hci_param->supervision_timeout = timeout;
7012 hci_dev_unlock(hdev);
7014 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7018 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7019 void *data, u16 len)
7021 struct mgmt_cp_set_external_config *cp = data;
7025 bt_dev_dbg(hdev, "sock %p", sk);
7027 if (hdev_is_powered(hdev))
7028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7029 MGMT_STATUS_REJECTED);
7031 if (cp->config != 0x00 && cp->config != 0x01)
7032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7033 MGMT_STATUS_INVALID_PARAMS);
7035 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7037 MGMT_STATUS_NOT_SUPPORTED);
7042 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7044 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7046 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7053 err = new_options(hdev, sk);
7055 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7056 mgmt_index_removed(hdev);
7058 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7059 hci_dev_set_flag(hdev, HCI_CONFIG);
7060 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7062 queue_work(hdev->req_workqueue, &hdev->power_on);
7064 set_bit(HCI_RAW, &hdev->flags);
7065 mgmt_index_added(hdev);
7070 hci_dev_unlock(hdev);
7074 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7075 void *data, u16 len)
7077 struct mgmt_cp_set_public_address *cp = data;
7081 bt_dev_dbg(hdev, "sock %p", sk);
7083 if (hdev_is_powered(hdev))
7084 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7085 MGMT_STATUS_REJECTED);
7087 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7089 MGMT_STATUS_INVALID_PARAMS);
7091 if (!hdev->set_bdaddr)
7092 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7093 MGMT_STATUS_NOT_SUPPORTED);
7097 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7098 bacpy(&hdev->public_addr, &cp->bdaddr);
7100 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7107 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7108 err = new_options(hdev, sk);
7110 if (is_configured(hdev)) {
7111 mgmt_index_removed(hdev);
7113 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7115 hci_dev_set_flag(hdev, HCI_CONFIG);
7116 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7118 queue_work(hdev->req_workqueue, &hdev->power_on);
7122 hci_dev_unlock(hdev);
7126 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7127 u16 opcode, struct sk_buff *skb)
7129 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7130 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7131 u8 *h192, *r192, *h256, *r256;
7132 struct mgmt_pending_cmd *cmd;
7136 bt_dev_dbg(hdev, "status %u", status);
7138 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7142 mgmt_cp = cmd->param;
7145 status = mgmt_status(status);
7152 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7153 struct hci_rp_read_local_oob_data *rp;
7155 if (skb->len != sizeof(*rp)) {
7156 status = MGMT_STATUS_FAILED;
7159 status = MGMT_STATUS_SUCCESS;
7160 rp = (void *)skb->data;
7162 eir_len = 5 + 18 + 18;
7169 struct hci_rp_read_local_oob_ext_data *rp;
7171 if (skb->len != sizeof(*rp)) {
7172 status = MGMT_STATUS_FAILED;
7175 status = MGMT_STATUS_SUCCESS;
7176 rp = (void *)skb->data;
7178 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7179 eir_len = 5 + 18 + 18;
7183 eir_len = 5 + 18 + 18 + 18 + 18;
7193 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7200 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7201 hdev->dev_class, 3);
7204 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7205 EIR_SSP_HASH_C192, h192, 16);
7206 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7207 EIR_SSP_RAND_R192, r192, 16);
7211 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7212 EIR_SSP_HASH_C256, h256, 16);
7213 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7214 EIR_SSP_RAND_R256, r256, 16);
7218 mgmt_rp->type = mgmt_cp->type;
7219 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7221 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7222 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7223 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7224 if (err < 0 || status)
7227 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7229 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7230 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7231 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7234 mgmt_pending_remove(cmd);
7237 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7238 struct mgmt_cp_read_local_oob_ext_data *cp)
7240 struct mgmt_pending_cmd *cmd;
7241 struct hci_request req;
7244 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7249 hci_req_init(&req, hdev);
7251 if (bredr_sc_enabled(hdev))
7252 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7254 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7256 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7258 mgmt_pending_remove(cmd);
7265 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7266 void *data, u16 data_len)
7268 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7269 struct mgmt_rp_read_local_oob_ext_data *rp;
7272 u8 status, flags, role, addr[7], hash[16], rand[16];
7275 bt_dev_dbg(hdev, "sock %p", sk);
7277 if (hdev_is_powered(hdev)) {
7279 case BIT(BDADDR_BREDR):
7280 status = mgmt_bredr_support(hdev);
7286 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7287 status = mgmt_le_support(hdev);
7291 eir_len = 9 + 3 + 18 + 18 + 3;
7294 status = MGMT_STATUS_INVALID_PARAMS;
7299 status = MGMT_STATUS_NOT_POWERED;
7303 rp_len = sizeof(*rp) + eir_len;
7304 rp = kmalloc(rp_len, GFP_ATOMIC);
7315 case BIT(BDADDR_BREDR):
7316 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7317 err = read_local_ssp_oob_req(hdev, sk, cp);
7318 hci_dev_unlock(hdev);
7322 status = MGMT_STATUS_FAILED;
7325 eir_len = eir_append_data(rp->eir, eir_len,
7327 hdev->dev_class, 3);
7330 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7331 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7332 smp_generate_oob(hdev, hash, rand) < 0) {
7333 hci_dev_unlock(hdev);
7334 status = MGMT_STATUS_FAILED;
7338 /* This should return the active RPA, but since the RPA
7339 * is only programmed on demand, it is really hard to fill
7340 * this in at the moment. For now disallow retrieving
7341 * local out-of-band data when privacy is in use.
7343 * Returning the identity address will not help here since
7344 * pairing happens before the identity resolving key is
7345 * known and thus the connection establishment happens
7346 * based on the RPA and not the identity address.
7348 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7349 hci_dev_unlock(hdev);
7350 status = MGMT_STATUS_REJECTED;
7354 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7355 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7356 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7357 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7358 memcpy(addr, &hdev->static_addr, 6);
7361 memcpy(addr, &hdev->bdaddr, 6);
7365 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7366 addr, sizeof(addr));
7368 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7373 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7374 &role, sizeof(role));
7376 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7377 eir_len = eir_append_data(rp->eir, eir_len,
7379 hash, sizeof(hash));
7381 eir_len = eir_append_data(rp->eir, eir_len,
7383 rand, sizeof(rand));
7386 flags = mgmt_get_adv_discov_flags(hdev);
7388 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7389 flags |= LE_AD_NO_BREDR;
7391 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7392 &flags, sizeof(flags));
7396 hci_dev_unlock(hdev);
7398 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7400 status = MGMT_STATUS_SUCCESS;
7403 rp->type = cp->type;
7404 rp->eir_len = cpu_to_le16(eir_len);
7406 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7407 status, rp, sizeof(*rp) + eir_len);
7408 if (err < 0 || status)
7411 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7412 rp, sizeof(*rp) + eir_len,
7413 HCI_MGMT_OOB_DATA_EVENTS, sk);
7421 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7425 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7426 flags |= MGMT_ADV_FLAG_DISCOV;
7427 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7428 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7429 flags |= MGMT_ADV_FLAG_APPEARANCE;
7430 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7431 flags |= MGMT_ADV_PARAM_DURATION;
7432 flags |= MGMT_ADV_PARAM_TIMEOUT;
7433 flags |= MGMT_ADV_PARAM_INTERVALS;
7434 flags |= MGMT_ADV_PARAM_TX_POWER;
7436 /* In extended adv TX_POWER returned from Set Adv Param
7437 * will be always valid.
7439 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7440 ext_adv_capable(hdev))
7441 flags |= MGMT_ADV_FLAG_TX_POWER;
7443 if (ext_adv_capable(hdev)) {
7444 flags |= MGMT_ADV_FLAG_SEC_1M;
7445 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7446 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7448 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7449 flags |= MGMT_ADV_FLAG_SEC_2M;
7451 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7452 flags |= MGMT_ADV_FLAG_SEC_CODED;
7458 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7459 void *data, u16 data_len)
7461 struct mgmt_rp_read_adv_features *rp;
7464 struct adv_info *adv_instance;
7465 u32 supported_flags;
7468 bt_dev_dbg(hdev, "sock %p", sk);
7470 if (!lmp_le_capable(hdev))
7471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7472 MGMT_STATUS_REJECTED);
7474 /* Enabling the experimental LL Privay support disables support for
7477 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7479 MGMT_STATUS_NOT_SUPPORTED);
7483 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7484 rp = kmalloc(rp_len, GFP_ATOMIC);
7486 hci_dev_unlock(hdev);
7490 supported_flags = get_supported_adv_flags(hdev);
7492 rp->supported_flags = cpu_to_le32(supported_flags);
7493 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7494 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7495 rp->max_instances = hdev->le_num_of_adv_sets;
7496 rp->num_instances = hdev->adv_instance_cnt;
7498 instance = rp->instance;
7499 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7500 *instance = adv_instance->instance;
7504 hci_dev_unlock(hdev);
7506 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7507 MGMT_STATUS_SUCCESS, rp, rp_len);
7514 static u8 calculate_name_len(struct hci_dev *hdev)
7516 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7518 return append_local_name(hdev, buf, 0);
7521 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7524 u8 max_len = HCI_MAX_AD_LENGTH;
7527 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7528 MGMT_ADV_FLAG_LIMITED_DISCOV |
7529 MGMT_ADV_FLAG_MANAGED_FLAGS))
7532 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7535 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7536 max_len -= calculate_name_len(hdev);
7538 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7545 static bool flags_managed(u32 adv_flags)
7547 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7548 MGMT_ADV_FLAG_LIMITED_DISCOV |
7549 MGMT_ADV_FLAG_MANAGED_FLAGS);
7552 static bool tx_power_managed(u32 adv_flags)
7554 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7557 static bool name_managed(u32 adv_flags)
7559 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7562 static bool appearance_managed(u32 adv_flags)
7564 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7567 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7568 u8 len, bool is_adv_data)
7573 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7578 /* Make sure that the data is correctly formatted. */
7579 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7582 if (data[i + 1] == EIR_FLAGS &&
7583 (!is_adv_data || flags_managed(adv_flags)))
7586 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7589 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7592 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7595 if (data[i + 1] == EIR_APPEARANCE &&
7596 appearance_managed(adv_flags))
7599 /* If the current field length would exceed the total data
7600 * length, then it's invalid.
7602 if (i + cur_len >= len)
7609 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7611 u32 supported_flags, phy_flags;
7613 /* The current implementation only supports a subset of the specified
7614 * flags. Also need to check mutual exclusiveness of sec flags.
7616 supported_flags = get_supported_adv_flags(hdev);
7617 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7618 if (adv_flags & ~supported_flags ||
7619 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7625 static bool adv_busy(struct hci_dev *hdev)
7627 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7628 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7629 pending_find(MGMT_OP_SET_LE, hdev) ||
7630 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7631 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7634 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7637 struct mgmt_pending_cmd *cmd;
7638 struct mgmt_cp_add_advertising *cp;
7639 struct mgmt_rp_add_advertising rp;
7640 struct adv_info *adv_instance, *n;
7643 bt_dev_dbg(hdev, "status %d", status);
7647 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7649 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7651 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7652 if (!adv_instance->pending)
7656 adv_instance->pending = false;
7660 instance = adv_instance->instance;
7662 if (hdev->cur_adv_instance == instance)
7663 cancel_adv_timeout(hdev);
7665 hci_remove_adv_instance(hdev, instance);
7666 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7673 rp.instance = cp->instance;
7676 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7677 mgmt_status(status));
7679 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7680 mgmt_status(status), &rp, sizeof(rp));
7682 mgmt_pending_remove(cmd);
7685 hci_dev_unlock(hdev);
7688 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7689 void *data, u16 data_len)
7691 struct mgmt_cp_add_advertising *cp = data;
7692 struct mgmt_rp_add_advertising rp;
7695 u16 timeout, duration;
7696 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7697 u8 schedule_instance = 0;
7698 struct adv_info *next_instance;
7700 struct mgmt_pending_cmd *cmd;
7701 struct hci_request req;
7703 bt_dev_dbg(hdev, "sock %p", sk);
7705 status = mgmt_le_support(hdev);
7707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7710 /* Enabling the experimental LL Privay support disables support for
7713 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7714 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7715 MGMT_STATUS_NOT_SUPPORTED);
7717 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7719 MGMT_STATUS_INVALID_PARAMS);
7721 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7722 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7723 MGMT_STATUS_INVALID_PARAMS);
7725 flags = __le32_to_cpu(cp->flags);
7726 timeout = __le16_to_cpu(cp->timeout);
7727 duration = __le16_to_cpu(cp->duration);
7729 if (!requested_adv_flags_are_valid(hdev, flags))
7730 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7731 MGMT_STATUS_INVALID_PARAMS);
7735 if (timeout && !hdev_is_powered(hdev)) {
7736 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7737 MGMT_STATUS_REJECTED);
7741 if (adv_busy(hdev)) {
7742 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7747 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7748 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7749 cp->scan_rsp_len, false)) {
7750 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7751 MGMT_STATUS_INVALID_PARAMS);
7755 err = hci_add_adv_instance(hdev, cp->instance, flags,
7756 cp->adv_data_len, cp->data,
7758 cp->data + cp->adv_data_len,
7760 HCI_ADV_TX_POWER_NO_PREFERENCE,
7761 hdev->le_adv_min_interval,
7762 hdev->le_adv_max_interval);
7764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7765 MGMT_STATUS_FAILED);
7769 /* Only trigger an advertising added event if a new instance was
7772 if (hdev->adv_instance_cnt > prev_instance_cnt)
7773 mgmt_advertising_added(sk, hdev, cp->instance);
7775 if (hdev->cur_adv_instance == cp->instance) {
7776 /* If the currently advertised instance is being changed then
7777 * cancel the current advertising and schedule the next
7778 * instance. If there is only one instance then the overridden
7779 * advertising data will be visible right away.
7781 cancel_adv_timeout(hdev);
7783 next_instance = hci_get_next_instance(hdev, cp->instance);
7785 schedule_instance = next_instance->instance;
7786 } else if (!hdev->adv_instance_timeout) {
7787 /* Immediately advertise the new instance if no other
7788 * instance is currently being advertised.
7790 schedule_instance = cp->instance;
7793 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7794 * there is no instance to be advertised then we have no HCI
7795 * communication to make. Simply return.
7797 if (!hdev_is_powered(hdev) ||
7798 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7799 !schedule_instance) {
7800 rp.instance = cp->instance;
7801 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7802 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7806 /* We're good to go, update advertising data, parameters, and start
7809 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7816 hci_req_init(&req, hdev);
7818 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7821 err = hci_req_run(&req, add_advertising_complete);
7824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7825 MGMT_STATUS_FAILED);
7826 mgmt_pending_remove(cmd);
7830 hci_dev_unlock(hdev);
7835 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7838 struct mgmt_pending_cmd *cmd;
7839 struct mgmt_cp_add_ext_adv_params *cp;
7840 struct mgmt_rp_add_ext_adv_params rp;
7841 struct adv_info *adv_instance;
7844 BT_DBG("%s", hdev->name);
7848 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7853 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7857 rp.instance = cp->instance;
7858 rp.tx_power = adv_instance->tx_power;
7860 /* While we're at it, inform userspace of the available space for this
7861 * advertisement, given the flags that will be used.
7863 flags = __le32_to_cpu(cp->flags);
7864 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7865 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7868 /* If this advertisement was previously advertising and we
7869 * failed to update it, we signal that it has been removed and
7870 * delete its structure
7872 if (!adv_instance->pending)
7873 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7875 hci_remove_adv_instance(hdev, cp->instance);
7877 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7878 mgmt_status(status));
7881 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7882 mgmt_status(status), &rp, sizeof(rp));
7887 mgmt_pending_remove(cmd);
7889 hci_dev_unlock(hdev);
7892 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7893 void *data, u16 data_len)
7895 struct mgmt_cp_add_ext_adv_params *cp = data;
7896 struct mgmt_rp_add_ext_adv_params rp;
7897 struct mgmt_pending_cmd *cmd = NULL;
7898 struct adv_info *adv_instance;
7899 struct hci_request req;
7900 u32 flags, min_interval, max_interval;
7901 u16 timeout, duration;
7906 BT_DBG("%s", hdev->name);
7908 status = mgmt_le_support(hdev);
7910 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7913 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7915 MGMT_STATUS_INVALID_PARAMS);
7917 /* The purpose of breaking add_advertising into two separate MGMT calls
7918 * for params and data is to allow more parameters to be added to this
7919 * structure in the future. For this reason, we verify that we have the
7920 * bare minimum structure we know of when the interface was defined. Any
7921 * extra parameters we don't know about will be ignored in this request.
7923 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7924 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7925 MGMT_STATUS_INVALID_PARAMS);
7927 flags = __le32_to_cpu(cp->flags);
7929 if (!requested_adv_flags_are_valid(hdev, flags))
7930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7931 MGMT_STATUS_INVALID_PARAMS);
7935 /* In new interface, we require that we are powered to register */
7936 if (!hdev_is_powered(hdev)) {
7937 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7938 MGMT_STATUS_REJECTED);
7942 if (adv_busy(hdev)) {
7943 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7948 /* Parse defined parameters from request, use defaults otherwise */
7949 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7950 __le16_to_cpu(cp->timeout) : 0;
7952 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7953 __le16_to_cpu(cp->duration) :
7954 hdev->def_multi_adv_rotation_duration;
7956 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7957 __le32_to_cpu(cp->min_interval) :
7958 hdev->le_adv_min_interval;
7960 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7961 __le32_to_cpu(cp->max_interval) :
7962 hdev->le_adv_max_interval;
7964 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7966 HCI_ADV_TX_POWER_NO_PREFERENCE;
7968 /* Create advertising instance with no advertising or response data */
7969 err = hci_add_adv_instance(hdev, cp->instance, flags,
7970 0, NULL, 0, NULL, timeout, duration,
7971 tx_power, min_interval, max_interval);
7974 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7975 MGMT_STATUS_FAILED);
7979 hdev->cur_adv_instance = cp->instance;
7980 /* Submit request for advertising params if ext adv available */
7981 if (ext_adv_capable(hdev)) {
7982 hci_req_init(&req, hdev);
7983 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7985 /* Updating parameters of an active instance will return a
7986 * Command Disallowed error, so we must first disable the
7987 * instance if it is active.
7989 if (!adv_instance->pending)
7990 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7992 __hci_req_setup_ext_adv_instance(&req, cp->instance);
7994 err = hci_req_run(&req, add_ext_adv_params_complete);
7997 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
7998 hdev, data, data_len);
8001 hci_remove_adv_instance(hdev, cp->instance);
8006 rp.instance = cp->instance;
8007 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8008 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8009 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8010 err = mgmt_cmd_complete(sk, hdev->id,
8011 MGMT_OP_ADD_EXT_ADV_PARAMS,
8012 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8016 hci_dev_unlock(hdev);
8021 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8024 struct mgmt_cp_add_ext_adv_data *cp = data;
8025 struct mgmt_rp_add_ext_adv_data rp;
8026 u8 schedule_instance = 0;
8027 struct adv_info *next_instance;
8028 struct adv_info *adv_instance;
8030 struct mgmt_pending_cmd *cmd;
8031 struct hci_request req;
8033 BT_DBG("%s", hdev->name);
8037 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8039 if (!adv_instance) {
8040 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8041 MGMT_STATUS_INVALID_PARAMS);
8045 /* In new interface, we require that we are powered to register */
8046 if (!hdev_is_powered(hdev)) {
8047 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8048 MGMT_STATUS_REJECTED);
8049 goto clear_new_instance;
8052 if (adv_busy(hdev)) {
8053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8055 goto clear_new_instance;
8058 /* Validate new data */
8059 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8060 cp->adv_data_len, true) ||
8061 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8062 cp->adv_data_len, cp->scan_rsp_len, false)) {
8063 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8064 MGMT_STATUS_INVALID_PARAMS);
8065 goto clear_new_instance;
8068 /* Set the data in the advertising instance */
8069 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8070 cp->data, cp->scan_rsp_len,
8071 cp->data + cp->adv_data_len);
8073 /* We're good to go, update advertising data, parameters, and start
8077 hci_req_init(&req, hdev);
8079 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8081 if (ext_adv_capable(hdev)) {
8082 __hci_req_update_adv_data(&req, cp->instance);
8083 __hci_req_update_scan_rsp_data(&req, cp->instance);
8084 __hci_req_enable_ext_advertising(&req, cp->instance);
8087 /* If using software rotation, determine next instance to use */
8089 if (hdev->cur_adv_instance == cp->instance) {
8090 /* If the currently advertised instance is being changed
8091 * then cancel the current advertising and schedule the
8092 * next instance. If there is only one instance then the
8093 * overridden advertising data will be visible right
8096 cancel_adv_timeout(hdev);
8098 next_instance = hci_get_next_instance(hdev,
8101 schedule_instance = next_instance->instance;
8102 } else if (!hdev->adv_instance_timeout) {
8103 /* Immediately advertise the new instance if no other
8104 * instance is currently being advertised.
8106 schedule_instance = cp->instance;
8109 /* If the HCI_ADVERTISING flag is set or there is no instance to
8110 * be advertised then we have no HCI communication to make.
8113 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8114 !schedule_instance) {
8115 if (adv_instance->pending) {
8116 mgmt_advertising_added(sk, hdev, cp->instance);
8117 adv_instance->pending = false;
8119 rp.instance = cp->instance;
8120 err = mgmt_cmd_complete(sk, hdev->id,
8121 MGMT_OP_ADD_EXT_ADV_DATA,
8122 MGMT_STATUS_SUCCESS, &rp,
8127 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8131 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8135 goto clear_new_instance;
8139 err = hci_req_run(&req, add_advertising_complete);
8142 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8143 MGMT_STATUS_FAILED);
8144 mgmt_pending_remove(cmd);
8145 goto clear_new_instance;
8148 /* We were successful in updating data, so trigger advertising_added
8149 * event if this is an instance that wasn't previously advertising. If
8150 * a failure occurs in the requests we initiated, we will remove the
8151 * instance again in add_advertising_complete
8153 if (adv_instance->pending)
8154 mgmt_advertising_added(sk, hdev, cp->instance);
8159 hci_remove_adv_instance(hdev, cp->instance);
8162 hci_dev_unlock(hdev);
8167 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8170 struct mgmt_pending_cmd *cmd;
8171 struct mgmt_cp_remove_advertising *cp;
8172 struct mgmt_rp_remove_advertising rp;
8174 bt_dev_dbg(hdev, "status %d", status);
8178 /* A failure status here only means that we failed to disable
8179 * advertising. Otherwise, the advertising instance has been removed,
8180 * so report success.
8182 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8187 rp.instance = cp->instance;
8189 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8191 mgmt_pending_remove(cmd);
8194 hci_dev_unlock(hdev);
8197 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8198 void *data, u16 data_len)
8200 struct mgmt_cp_remove_advertising *cp = data;
8201 struct mgmt_rp_remove_advertising rp;
8202 struct mgmt_pending_cmd *cmd;
8203 struct hci_request req;
8206 bt_dev_dbg(hdev, "sock %p", sk);
8208 /* Enabling the experimental LL Privay support disables support for
8211 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8212 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8213 MGMT_STATUS_NOT_SUPPORTED);
8217 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8218 err = mgmt_cmd_status(sk, hdev->id,
8219 MGMT_OP_REMOVE_ADVERTISING,
8220 MGMT_STATUS_INVALID_PARAMS);
8224 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8225 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8226 pending_find(MGMT_OP_SET_LE, hdev)) {
8227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8232 if (list_empty(&hdev->adv_instances)) {
8233 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8234 MGMT_STATUS_INVALID_PARAMS);
8238 hci_req_init(&req, hdev);
8240 /* If we use extended advertising, instance is disabled and removed */
8241 if (ext_adv_capable(hdev)) {
8242 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8243 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8246 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8248 if (list_empty(&hdev->adv_instances))
8249 __hci_req_disable_advertising(&req);
8251 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8252 * flag is set or the device isn't powered then we have no HCI
8253 * communication to make. Simply return.
8255 if (skb_queue_empty(&req.cmd_q) ||
8256 !hdev_is_powered(hdev) ||
8257 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8258 hci_req_purge(&req);
8259 rp.instance = cp->instance;
8260 err = mgmt_cmd_complete(sk, hdev->id,
8261 MGMT_OP_REMOVE_ADVERTISING,
8262 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8266 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8273 err = hci_req_run(&req, remove_advertising_complete);
8275 mgmt_pending_remove(cmd);
8278 hci_dev_unlock(hdev);
8283 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8284 void *data, u16 data_len)
8286 struct mgmt_cp_get_adv_size_info *cp = data;
8287 struct mgmt_rp_get_adv_size_info rp;
8288 u32 flags, supported_flags;
8291 bt_dev_dbg(hdev, "sock %p", sk);
8293 if (!lmp_le_capable(hdev))
8294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8295 MGMT_STATUS_REJECTED);
8297 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8298 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8299 MGMT_STATUS_INVALID_PARAMS);
8301 flags = __le32_to_cpu(cp->flags);
8303 /* The current implementation only supports a subset of the specified
8306 supported_flags = get_supported_adv_flags(hdev);
8307 if (flags & ~supported_flags)
8308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8309 MGMT_STATUS_INVALID_PARAMS);
8311 rp.instance = cp->instance;
8312 rp.flags = cp->flags;
8313 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8314 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8317 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8322 static const struct hci_mgmt_handler mgmt_handlers[] = {
8323 { NULL }, /* 0x0000 (no command) */
8324 { read_version, MGMT_READ_VERSION_SIZE,
8326 HCI_MGMT_UNTRUSTED },
8327 { read_commands, MGMT_READ_COMMANDS_SIZE,
8329 HCI_MGMT_UNTRUSTED },
8330 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8332 HCI_MGMT_UNTRUSTED },
8333 { read_controller_info, MGMT_READ_INFO_SIZE,
8334 HCI_MGMT_UNTRUSTED },
8335 { set_powered, MGMT_SETTING_SIZE },
8336 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8337 { set_connectable, MGMT_SETTING_SIZE },
8338 { set_fast_connectable, MGMT_SETTING_SIZE },
8339 { set_bondable, MGMT_SETTING_SIZE },
8340 { set_link_security, MGMT_SETTING_SIZE },
8341 { set_ssp, MGMT_SETTING_SIZE },
8342 { set_hs, MGMT_SETTING_SIZE },
8343 { set_le, MGMT_SETTING_SIZE },
8344 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8345 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8346 { add_uuid, MGMT_ADD_UUID_SIZE },
8347 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8348 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8350 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8352 { disconnect, MGMT_DISCONNECT_SIZE },
8353 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8354 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8355 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8356 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8357 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8358 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8359 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8360 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8361 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8362 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8363 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8364 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8365 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8367 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8368 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8369 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8370 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8371 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8372 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8373 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8374 { set_advertising, MGMT_SETTING_SIZE },
8375 { set_bredr, MGMT_SETTING_SIZE },
8376 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8377 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8378 { set_secure_conn, MGMT_SETTING_SIZE },
8379 { set_debug_keys, MGMT_SETTING_SIZE },
8380 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8381 { load_irks, MGMT_LOAD_IRKS_SIZE,
8383 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8384 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8385 { add_device, MGMT_ADD_DEVICE_SIZE },
8386 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8387 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8389 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8391 HCI_MGMT_UNTRUSTED },
8392 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8393 HCI_MGMT_UNCONFIGURED |
8394 HCI_MGMT_UNTRUSTED },
8395 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8396 HCI_MGMT_UNCONFIGURED },
8397 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8398 HCI_MGMT_UNCONFIGURED },
8399 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8401 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8402 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8404 HCI_MGMT_UNTRUSTED },
8405 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8406 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8408 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8409 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8410 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8411 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8412 HCI_MGMT_UNTRUSTED },
8413 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8414 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8415 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8416 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8418 { set_wideband_speech, MGMT_SETTING_SIZE },
8419 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8420 HCI_MGMT_UNTRUSTED },
8421 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8422 HCI_MGMT_UNTRUSTED |
8423 HCI_MGMT_HDEV_OPTIONAL },
8424 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8426 HCI_MGMT_HDEV_OPTIONAL },
8427 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8428 HCI_MGMT_UNTRUSTED },
8429 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8431 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8432 HCI_MGMT_UNTRUSTED },
8433 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8435 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8436 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8437 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8438 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8440 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8441 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8443 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8445 { add_adv_patterns_monitor_rssi,
8446 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8450 void mgmt_index_added(struct hci_dev *hdev)
8452 struct mgmt_ev_ext_index ev;
8454 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8457 switch (hdev->dev_type) {
8459 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8460 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8461 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8464 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8465 HCI_MGMT_INDEX_EVENTS);
8478 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8479 HCI_MGMT_EXT_INDEX_EVENTS);
8482 void mgmt_index_removed(struct hci_dev *hdev)
8484 struct mgmt_ev_ext_index ev;
8485 u8 status = MGMT_STATUS_INVALID_INDEX;
8487 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8490 switch (hdev->dev_type) {
8492 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8494 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8495 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8496 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8499 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8500 HCI_MGMT_INDEX_EVENTS);
8513 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8514 HCI_MGMT_EXT_INDEX_EVENTS);
8517 /* This function requires the caller holds hdev->lock */
8518 static void restart_le_actions(struct hci_dev *hdev)
8520 struct hci_conn_params *p;
8522 list_for_each_entry(p, &hdev->le_conn_params, list) {
8523 /* Needed for AUTO_OFF case where might not "really"
8524 * have been powered off.
8526 list_del_init(&p->action);
8528 switch (p->auto_connect) {
8529 case HCI_AUTO_CONN_DIRECT:
8530 case HCI_AUTO_CONN_ALWAYS:
8531 list_add(&p->action, &hdev->pend_le_conns);
8533 case HCI_AUTO_CONN_REPORT:
8534 list_add(&p->action, &hdev->pend_le_reports);
8542 void mgmt_power_on(struct hci_dev *hdev, int err)
8544 struct cmd_lookup match = { NULL, hdev };
8546 bt_dev_dbg(hdev, "err %d", err);
8551 restart_le_actions(hdev);
8552 hci_update_background_scan(hdev);
8555 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8557 new_settings(hdev, match.sk);
8562 hci_dev_unlock(hdev);
8565 void __mgmt_power_off(struct hci_dev *hdev)
8567 struct cmd_lookup match = { NULL, hdev };
8568 u8 status, zero_cod[] = { 0, 0, 0 };
8570 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8572 /* If the power off is because of hdev unregistration let
8573 * use the appropriate INVALID_INDEX status. Otherwise use
8574 * NOT_POWERED. We cover both scenarios here since later in
8575 * mgmt_index_removed() any hci_conn callbacks will have already
8576 * been triggered, potentially causing misleading DISCONNECTED
8579 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8580 status = MGMT_STATUS_INVALID_INDEX;
8582 status = MGMT_STATUS_NOT_POWERED;
8584 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8586 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8587 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8588 zero_cod, sizeof(zero_cod),
8589 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8590 ext_info_changed(hdev, NULL);
8593 new_settings(hdev, match.sk);
8599 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8601 struct mgmt_pending_cmd *cmd;
8604 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8608 if (err == -ERFKILL)
8609 status = MGMT_STATUS_RFKILLED;
8611 status = MGMT_STATUS_FAILED;
8613 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8615 mgmt_pending_remove(cmd);
8618 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8621 struct mgmt_ev_new_link_key ev;
8623 memset(&ev, 0, sizeof(ev));
8625 ev.store_hint = persistent;
8626 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8627 ev.key.addr.type = BDADDR_BREDR;
8628 ev.key.type = key->type;
8629 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8630 ev.key.pin_len = key->pin_len;
8632 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8635 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8637 switch (ltk->type) {
8640 if (ltk->authenticated)
8641 return MGMT_LTK_AUTHENTICATED;
8642 return MGMT_LTK_UNAUTHENTICATED;
8644 if (ltk->authenticated)
8645 return MGMT_LTK_P256_AUTH;
8646 return MGMT_LTK_P256_UNAUTH;
8647 case SMP_LTK_P256_DEBUG:
8648 return MGMT_LTK_P256_DEBUG;
8651 return MGMT_LTK_UNAUTHENTICATED;
8654 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8656 struct mgmt_ev_new_long_term_key ev;
8658 memset(&ev, 0, sizeof(ev));
8660 /* Devices using resolvable or non-resolvable random addresses
8661 * without providing an identity resolving key don't require
8662 * to store long term keys. Their addresses will change the
8665 * Only when a remote device provides an identity address
8666 * make sure the long term key is stored. If the remote
8667 * identity is known, the long term keys are internally
8668 * mapped to the identity address. So allow static random
8669 * and public addresses here.
8671 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8672 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8673 ev.store_hint = 0x00;
8675 ev.store_hint = persistent;
8677 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8678 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8679 ev.key.type = mgmt_ltk_type(key);
8680 ev.key.enc_size = key->enc_size;
8681 ev.key.ediv = key->ediv;
8682 ev.key.rand = key->rand;
8684 if (key->type == SMP_LTK)
8687 /* Make sure we copy only the significant bytes based on the
8688 * encryption key size, and set the rest of the value to zeroes.
8690 memcpy(ev.key.val, key->val, key->enc_size);
8691 memset(ev.key.val + key->enc_size, 0,
8692 sizeof(ev.key.val) - key->enc_size);
8694 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8697 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8699 struct mgmt_ev_new_irk ev;
8701 memset(&ev, 0, sizeof(ev));
8703 ev.store_hint = persistent;
8705 bacpy(&ev.rpa, &irk->rpa);
8706 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8707 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8708 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8710 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8713 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8716 struct mgmt_ev_new_csrk ev;
8718 memset(&ev, 0, sizeof(ev));
8720 /* Devices using resolvable or non-resolvable random addresses
8721 * without providing an identity resolving key don't require
8722 * to store signature resolving keys. Their addresses will change
8723 * the next time around.
8725 * Only when a remote device provides an identity address
8726 * make sure the signature resolving key is stored. So allow
8727 * static random and public addresses here.
8729 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8730 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8731 ev.store_hint = 0x00;
8733 ev.store_hint = persistent;
8735 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8736 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8737 ev.key.type = csrk->type;
8738 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8740 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8743 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8744 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8745 u16 max_interval, u16 latency, u16 timeout)
8747 struct mgmt_ev_new_conn_param ev;
8749 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8752 memset(&ev, 0, sizeof(ev));
8753 bacpy(&ev.addr.bdaddr, bdaddr);
8754 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8755 ev.store_hint = store_hint;
8756 ev.min_interval = cpu_to_le16(min_interval);
8757 ev.max_interval = cpu_to_le16(max_interval);
8758 ev.latency = cpu_to_le16(latency);
8759 ev.timeout = cpu_to_le16(timeout);
8761 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8764 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8765 u32 flags, u8 *name, u8 name_len)
8768 struct mgmt_ev_device_connected *ev = (void *) buf;
8771 bacpy(&ev->addr.bdaddr, &conn->dst);
8772 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8774 ev->flags = __cpu_to_le32(flags);
8776 /* We must ensure that the EIR Data fields are ordered and
8777 * unique. Keep it simple for now and avoid the problem by not
8778 * adding any BR/EDR data to the LE adv.
8780 if (conn->le_adv_data_len > 0) {
8781 memcpy(&ev->eir[eir_len],
8782 conn->le_adv_data, conn->le_adv_data_len);
8783 eir_len = conn->le_adv_data_len;
8786 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8789 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8790 eir_len = eir_append_data(ev->eir, eir_len,
8792 conn->dev_class, 3);
8795 ev->eir_len = cpu_to_le16(eir_len);
8797 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8798 sizeof(*ev) + eir_len, NULL);
8801 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8803 struct sock **sk = data;
8805 cmd->cmd_complete(cmd, 0);
8810 mgmt_pending_remove(cmd);
8813 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8815 struct hci_dev *hdev = data;
8816 struct mgmt_cp_unpair_device *cp = cmd->param;
8818 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8820 cmd->cmd_complete(cmd, 0);
8821 mgmt_pending_remove(cmd);
8824 bool mgmt_powering_down(struct hci_dev *hdev)
8826 struct mgmt_pending_cmd *cmd;
8827 struct mgmt_mode *cp;
8829 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8840 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8841 u8 link_type, u8 addr_type, u8 reason,
8842 bool mgmt_connected)
8844 struct mgmt_ev_device_disconnected ev;
8845 struct sock *sk = NULL;
8847 /* The connection is still in hci_conn_hash so test for 1
8848 * instead of 0 to know if this is the last one.
8850 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8851 cancel_delayed_work(&hdev->power_off);
8852 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8855 if (!mgmt_connected)
8858 if (link_type != ACL_LINK && link_type != LE_LINK)
8861 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8863 bacpy(&ev.addr.bdaddr, bdaddr);
8864 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8867 /* Report disconnects due to suspend */
8868 if (hdev->suspended)
8869 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8871 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8876 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8880 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8881 u8 link_type, u8 addr_type, u8 status)
8883 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8884 struct mgmt_cp_disconnect *cp;
8885 struct mgmt_pending_cmd *cmd;
8887 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8890 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8896 if (bacmp(bdaddr, &cp->addr.bdaddr))
8899 if (cp->addr.type != bdaddr_type)
8902 cmd->cmd_complete(cmd, mgmt_status(status));
8903 mgmt_pending_remove(cmd);
8906 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8907 u8 addr_type, u8 status)
8909 struct mgmt_ev_connect_failed ev;
8911 /* The connection is still in hci_conn_hash so test for 1
8912 * instead of 0 to know if this is the last one.
8914 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8915 cancel_delayed_work(&hdev->power_off);
8916 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8919 bacpy(&ev.addr.bdaddr, bdaddr);
8920 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8921 ev.status = mgmt_status(status);
8923 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8926 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8928 struct mgmt_ev_pin_code_request ev;
8930 bacpy(&ev.addr.bdaddr, bdaddr);
8931 ev.addr.type = BDADDR_BREDR;
8934 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8937 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8940 struct mgmt_pending_cmd *cmd;
8942 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8946 cmd->cmd_complete(cmd, mgmt_status(status));
8947 mgmt_pending_remove(cmd);
8950 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8953 struct mgmt_pending_cmd *cmd;
8955 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8959 cmd->cmd_complete(cmd, mgmt_status(status));
8960 mgmt_pending_remove(cmd);
8963 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8964 u8 link_type, u8 addr_type, u32 value,
8967 struct mgmt_ev_user_confirm_request ev;
8969 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8971 bacpy(&ev.addr.bdaddr, bdaddr);
8972 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8973 ev.confirm_hint = confirm_hint;
8974 ev.value = cpu_to_le32(value);
8976 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8980 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8981 u8 link_type, u8 addr_type)
8983 struct mgmt_ev_user_passkey_request ev;
8985 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8987 bacpy(&ev.addr.bdaddr, bdaddr);
8988 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8990 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8994 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8995 u8 link_type, u8 addr_type, u8 status,
8998 struct mgmt_pending_cmd *cmd;
9000 cmd = pending_find(opcode, hdev);
9004 cmd->cmd_complete(cmd, mgmt_status(status));
9005 mgmt_pending_remove(cmd);
9010 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9011 u8 link_type, u8 addr_type, u8 status)
9013 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9014 status, MGMT_OP_USER_CONFIRM_REPLY);
9017 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9018 u8 link_type, u8 addr_type, u8 status)
9020 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9022 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9025 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9026 u8 link_type, u8 addr_type, u8 status)
9028 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9029 status, MGMT_OP_USER_PASSKEY_REPLY);
9032 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9033 u8 link_type, u8 addr_type, u8 status)
9035 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9037 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9040 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9041 u8 link_type, u8 addr_type, u32 passkey,
9044 struct mgmt_ev_passkey_notify ev;
9046 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9048 bacpy(&ev.addr.bdaddr, bdaddr);
9049 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9050 ev.passkey = __cpu_to_le32(passkey);
9051 ev.entered = entered;
9053 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9056 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9058 struct mgmt_ev_auth_failed ev;
9059 struct mgmt_pending_cmd *cmd;
9060 u8 status = mgmt_status(hci_status);
9062 bacpy(&ev.addr.bdaddr, &conn->dst);
9063 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9066 cmd = find_pairing(conn);
9068 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9069 cmd ? cmd->sk : NULL);
9072 cmd->cmd_complete(cmd, status);
9073 mgmt_pending_remove(cmd);
9077 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9079 struct cmd_lookup match = { NULL, hdev };
9083 u8 mgmt_err = mgmt_status(status);
9084 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9085 cmd_status_rsp, &mgmt_err);
9089 if (test_bit(HCI_AUTH, &hdev->flags))
9090 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9092 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9094 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9098 new_settings(hdev, match.sk);
9104 static void clear_eir(struct hci_request *req)
9106 struct hci_dev *hdev = req->hdev;
9107 struct hci_cp_write_eir cp;
9109 if (!lmp_ext_inq_capable(hdev))
9112 memset(hdev->eir, 0, sizeof(hdev->eir));
9114 memset(&cp, 0, sizeof(cp));
9116 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9119 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9121 struct cmd_lookup match = { NULL, hdev };
9122 struct hci_request req;
9123 bool changed = false;
9126 u8 mgmt_err = mgmt_status(status);
9128 if (enable && hci_dev_test_and_clear_flag(hdev,
9130 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9131 new_settings(hdev, NULL);
9134 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9140 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9142 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9144 changed = hci_dev_test_and_clear_flag(hdev,
9147 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9150 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9153 new_settings(hdev, match.sk);
9158 hci_req_init(&req, hdev);
9160 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9161 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9162 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9163 sizeof(enable), &enable);
9164 __hci_req_update_eir(&req);
9169 hci_req_run(&req, NULL);
9172 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9174 struct cmd_lookup *match = data;
9176 if (match->sk == NULL) {
9177 match->sk = cmd->sk;
9178 sock_hold(match->sk);
9182 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9185 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9187 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9188 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9189 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9192 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9193 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9194 ext_info_changed(hdev, NULL);
9201 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9203 struct mgmt_cp_set_local_name ev;
9204 struct mgmt_pending_cmd *cmd;
9209 memset(&ev, 0, sizeof(ev));
9210 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9211 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9213 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9215 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9217 /* If this is a HCI command related to powering on the
9218 * HCI dev don't send any mgmt signals.
9220 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9224 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9225 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9226 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9229 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9233 for (i = 0; i < uuid_count; i++) {
9234 if (!memcmp(uuid, uuids[i], 16))
9241 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9245 while (parsed < eir_len) {
9246 u8 field_len = eir[0];
9253 if (eir_len - parsed < field_len + 1)
9257 case EIR_UUID16_ALL:
9258 case EIR_UUID16_SOME:
9259 for (i = 0; i + 3 <= field_len; i += 2) {
9260 memcpy(uuid, bluetooth_base_uuid, 16);
9261 uuid[13] = eir[i + 3];
9262 uuid[12] = eir[i + 2];
9263 if (has_uuid(uuid, uuid_count, uuids))
9267 case EIR_UUID32_ALL:
9268 case EIR_UUID32_SOME:
9269 for (i = 0; i + 5 <= field_len; i += 4) {
9270 memcpy(uuid, bluetooth_base_uuid, 16);
9271 uuid[15] = eir[i + 5];
9272 uuid[14] = eir[i + 4];
9273 uuid[13] = eir[i + 3];
9274 uuid[12] = eir[i + 2];
9275 if (has_uuid(uuid, uuid_count, uuids))
9279 case EIR_UUID128_ALL:
9280 case EIR_UUID128_SOME:
9281 for (i = 0; i + 17 <= field_len; i += 16) {
9282 memcpy(uuid, eir + i + 2, 16);
9283 if (has_uuid(uuid, uuid_count, uuids))
9289 parsed += field_len + 1;
9290 eir += field_len + 1;
9296 static void restart_le_scan(struct hci_dev *hdev)
9298 /* If controller is not scanning we are done. */
9299 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9302 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9303 hdev->discovery.scan_start +
9304 hdev->discovery.scan_duration))
9307 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9308 DISCOV_LE_RESTART_DELAY);
9311 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9312 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9314 /* If a RSSI threshold has been specified, and
9315 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9316 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9317 * is set, let it through for further processing, as we might need to
9320 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9321 * the results are also dropped.
9323 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9324 (rssi == HCI_RSSI_INVALID ||
9325 (rssi < hdev->discovery.rssi &&
9326 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9329 if (hdev->discovery.uuid_count != 0) {
9330 /* If a list of UUIDs is provided in filter, results with no
9331 * matching UUID should be dropped.
9333 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9334 hdev->discovery.uuids) &&
9335 !eir_has_uuids(scan_rsp, scan_rsp_len,
9336 hdev->discovery.uuid_count,
9337 hdev->discovery.uuids))
9341 /* If duplicate filtering does not report RSSI changes, then restart
9342 * scanning to ensure updated result with updated RSSI values.
9344 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9345 restart_le_scan(hdev);
9347 /* Validate RSSI value against the RSSI threshold once more. */
9348 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9349 rssi < hdev->discovery.rssi)
9356 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9357 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9358 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9361 struct mgmt_ev_device_found *ev = (void *)buf;
9364 /* Don't send events for a non-kernel initiated discovery. With
9365 * LE one exception is if we have pend_le_reports > 0 in which
9366 * case we're doing passive scanning and want these events.
9368 if (!hci_discovery_active(hdev)) {
9369 if (link_type == ACL_LINK)
9371 if (link_type == LE_LINK &&
9372 list_empty(&hdev->pend_le_reports) &&
9373 !hci_is_adv_monitoring(hdev)) {
9378 if (hdev->discovery.result_filtering) {
9379 /* We are using service discovery */
9380 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9385 if (hdev->discovery.limited) {
9386 /* Check for limited discoverable bit */
9388 if (!(dev_class[1] & 0x20))
9391 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9392 if (!flags || !(flags[0] & LE_AD_LIMITED))
9397 /* Make sure that the buffer is big enough. The 5 extra bytes
9398 * are for the potential CoD field.
9400 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9403 memset(buf, 0, sizeof(buf));
9405 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9406 * RSSI value was reported as 0 when not available. This behavior
9407 * is kept when using device discovery. This is required for full
9408 * backwards compatibility with the API.
9410 * However when using service discovery, the value 127 will be
9411 * returned when the RSSI is not available.
9413 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9414 link_type == ACL_LINK)
9417 bacpy(&ev->addr.bdaddr, bdaddr);
9418 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9420 ev->flags = cpu_to_le32(flags);
9423 /* Copy EIR or advertising data into event */
9424 memcpy(ev->eir, eir, eir_len);
9426 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9428 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9431 if (scan_rsp_len > 0)
9432 /* Append scan response data to event */
9433 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9435 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9436 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9438 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9441 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9442 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9444 struct mgmt_ev_device_found *ev;
9445 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9448 ev = (struct mgmt_ev_device_found *) buf;
9450 memset(buf, 0, sizeof(buf));
9452 bacpy(&ev->addr.bdaddr, bdaddr);
9453 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9456 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9459 ev->eir_len = cpu_to_le16(eir_len);
9461 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9464 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9466 struct mgmt_ev_discovering ev;
9468 bt_dev_dbg(hdev, "discovering %u", discovering);
9470 memset(&ev, 0, sizeof(ev));
9471 ev.type = hdev->discovery.type;
9472 ev.discovering = discovering;
9474 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9477 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9479 struct mgmt_ev_controller_suspend ev;
9481 ev.suspend_state = state;
9482 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9485 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9488 struct mgmt_ev_controller_resume ev;
9490 ev.wake_reason = reason;
9492 bacpy(&ev.addr.bdaddr, bdaddr);
9493 ev.addr.type = addr_type;
9495 memset(&ev.addr, 0, sizeof(ev.addr));
9498 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9501 static struct hci_mgmt_chan chan = {
9502 .channel = HCI_CHANNEL_CONTROL,
9503 .handler_count = ARRAY_SIZE(mgmt_handlers),
9504 .handlers = mgmt_handlers,
9505 .hdev_init = mgmt_init_hdev,
9510 return hci_mgmt_chan_register(&chan);
9513 void mgmt_exit(void)
9515 hci_mgmt_chan_unregister(&chan);