2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 21
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
134 static const u16 mgmt_events[] = {
135 MGMT_EV_CONTROLLER_ERROR,
137 MGMT_EV_INDEX_REMOVED,
138 MGMT_EV_NEW_SETTINGS,
139 MGMT_EV_CLASS_OF_DEV_CHANGED,
140 MGMT_EV_LOCAL_NAME_CHANGED,
141 MGMT_EV_NEW_LINK_KEY,
142 MGMT_EV_NEW_LONG_TERM_KEY,
143 MGMT_EV_DEVICE_CONNECTED,
144 MGMT_EV_DEVICE_DISCONNECTED,
145 MGMT_EV_CONNECT_FAILED,
146 MGMT_EV_PIN_CODE_REQUEST,
147 MGMT_EV_USER_CONFIRM_REQUEST,
148 MGMT_EV_USER_PASSKEY_REQUEST,
150 MGMT_EV_DEVICE_FOUND,
152 MGMT_EV_DEVICE_BLOCKED,
153 MGMT_EV_DEVICE_UNBLOCKED,
154 MGMT_EV_DEVICE_UNPAIRED,
155 MGMT_EV_PASSKEY_NOTIFY,
158 MGMT_EV_DEVICE_ADDED,
159 MGMT_EV_DEVICE_REMOVED,
160 MGMT_EV_NEW_CONN_PARAM,
161 MGMT_EV_UNCONF_INDEX_ADDED,
162 MGMT_EV_UNCONF_INDEX_REMOVED,
163 MGMT_EV_NEW_CONFIG_OPTIONS,
164 MGMT_EV_EXT_INDEX_ADDED,
165 MGMT_EV_EXT_INDEX_REMOVED,
166 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 MGMT_EV_ADVERTISING_ADDED,
168 MGMT_EV_ADVERTISING_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170 MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 MGMT_EV_EXP_FEATURE_CHANGED,
172 MGMT_EV_DEVICE_FLAGS_CHANGED,
173 MGMT_EV_ADV_MONITOR_ADDED,
174 MGMT_EV_ADV_MONITOR_REMOVED,
175 MGMT_EV_CONTROLLER_SUSPEND,
176 MGMT_EV_CONTROLLER_RESUME,
179 static const u16 mgmt_untrusted_commands[] = {
180 MGMT_OP_READ_INDEX_LIST,
182 MGMT_OP_READ_UNCONF_INDEX_LIST,
183 MGMT_OP_READ_CONFIG_INFO,
184 MGMT_OP_READ_EXT_INDEX_LIST,
185 MGMT_OP_READ_EXT_INFO,
186 MGMT_OP_READ_CONTROLLER_CAP,
187 MGMT_OP_READ_EXP_FEATURES_INFO,
188 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
189 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 static const u16 mgmt_untrusted_events[] = {
194 MGMT_EV_INDEX_REMOVED,
195 MGMT_EV_NEW_SETTINGS,
196 MGMT_EV_CLASS_OF_DEV_CHANGED,
197 MGMT_EV_LOCAL_NAME_CHANGED,
198 MGMT_EV_UNCONF_INDEX_ADDED,
199 MGMT_EV_UNCONF_INDEX_REMOVED,
200 MGMT_EV_NEW_CONFIG_OPTIONS,
201 MGMT_EV_EXT_INDEX_ADDED,
202 MGMT_EV_EXT_INDEX_REMOVED,
203 MGMT_EV_EXT_INFO_CHANGED,
204 MGMT_EV_EXP_FEATURE_CHANGED,
207 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
209 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
210 "\x00\x00\x00\x00\x00\x00\x00\x00"
212 /* HCI to MGMT error code conversion table */
213 static const u8 mgmt_status_table[] = {
215 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
216 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
217 MGMT_STATUS_FAILED, /* Hardware Failure */
218 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
219 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
220 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
221 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
222 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
223 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
224 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
225 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
226 MGMT_STATUS_BUSY, /* Command Disallowed */
227 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
228 MGMT_STATUS_REJECTED, /* Rejected Security */
229 MGMT_STATUS_REJECTED, /* Rejected Personal */
230 MGMT_STATUS_TIMEOUT, /* Host Timeout */
231 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
232 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
233 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
234 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
235 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
236 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
237 MGMT_STATUS_BUSY, /* Repeated Attempts */
238 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
239 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
240 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
241 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
242 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
243 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
244 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
245 MGMT_STATUS_FAILED, /* Unspecified Error */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
247 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
248 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
249 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
250 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
251 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
252 MGMT_STATUS_FAILED, /* Unit Link Key Used */
253 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
254 MGMT_STATUS_TIMEOUT, /* Instant Passed */
255 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
256 MGMT_STATUS_FAILED, /* Transaction Collision */
257 MGMT_STATUS_FAILED, /* Reserved for future use */
258 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
259 MGMT_STATUS_REJECTED, /* QoS Rejected */
260 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
261 MGMT_STATUS_REJECTED, /* Insufficient Security */
262 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_BUSY, /* Role Switch Pending */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_FAILED, /* Slot Violation */
267 MGMT_STATUS_FAILED, /* Role Switch Failed */
268 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
269 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
270 MGMT_STATUS_BUSY, /* Host Busy Pairing */
271 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
272 MGMT_STATUS_BUSY, /* Controller Busy */
273 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
274 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
275 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
276 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
277 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
280 static u8 mgmt_errno_status(int err)
284 return MGMT_STATUS_SUCCESS;
286 return MGMT_STATUS_REJECTED;
288 return MGMT_STATUS_INVALID_PARAMS;
290 return MGMT_STATUS_NOT_SUPPORTED;
292 return MGMT_STATUS_BUSY;
294 return MGMT_STATUS_AUTH_FAILED;
296 return MGMT_STATUS_NO_RESOURCES;
298 return MGMT_STATUS_ALREADY_CONNECTED;
300 return MGMT_STATUS_DISCONNECTED;
303 return MGMT_STATUS_FAILED;
306 static u8 mgmt_status(int err)
309 return mgmt_errno_status(err);
311 if (err < ARRAY_SIZE(mgmt_status_table))
312 return mgmt_status_table[err];
314 return MGMT_STATUS_FAILED;
317 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
324 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
325 u16 len, int flag, struct sock *skip_sk)
327 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
331 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
332 struct sock *skip_sk)
334 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
335 HCI_SOCK_TRUSTED, skip_sk);
338 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
340 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
344 static u8 le_addr_type(u8 mgmt_addr_type)
346 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
347 return ADDR_LE_DEV_PUBLIC;
349 return ADDR_LE_DEV_RANDOM;
352 void mgmt_fill_version_info(void *ver)
354 struct mgmt_rp_read_version *rp = ver;
356 rp->version = MGMT_VERSION;
357 rp->revision = cpu_to_le16(MGMT_REVISION);
360 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 struct mgmt_rp_read_version rp;
365 bt_dev_dbg(hdev, "sock %p", sk);
367 mgmt_fill_version_info(&rp);
369 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
373 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 struct mgmt_rp_read_commands *rp;
377 u16 num_commands, num_events;
381 bt_dev_dbg(hdev, "sock %p", sk);
383 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
384 num_commands = ARRAY_SIZE(mgmt_commands);
385 num_events = ARRAY_SIZE(mgmt_events);
387 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
388 num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
393 rp = kmalloc(rp_size, GFP_KERNEL);
397 rp->num_commands = cpu_to_le16(num_commands);
398 rp->num_events = cpu_to_le16(num_events);
400 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
401 __le16 *opcode = rp->opcodes;
403 for (i = 0; i < num_commands; i++, opcode++)
404 put_unaligned_le16(mgmt_commands[i], opcode);
406 for (i = 0; i < num_events; i++, opcode++)
407 put_unaligned_le16(mgmt_events[i], opcode);
409 __le16 *opcode = rp->opcodes;
411 for (i = 0; i < num_commands; i++, opcode++)
412 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
414 for (i = 0; i < num_events; i++, opcode++)
415 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 struct mgmt_rp_read_index_list *rp;
434 bt_dev_dbg(hdev, "sock %p", sk);
436 read_lock(&hci_dev_list_lock);
439 list_for_each_entry(d, &hci_dev_list, list) {
440 if (d->dev_type == HCI_PRIMARY &&
441 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
445 rp_len = sizeof(*rp) + (2 * count);
446 rp = kmalloc(rp_len, GFP_ATOMIC);
448 read_unlock(&hci_dev_list_lock);
453 list_for_each_entry(d, &hci_dev_list, list) {
454 if (hci_dev_test_flag(d, HCI_SETUP) ||
455 hci_dev_test_flag(d, HCI_CONFIG) ||
456 hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 /* Devices marked as raw-only are neither configured
460 * nor unconfigured controllers.
462 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 if (d->dev_type == HCI_PRIMARY &&
466 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
467 rp->index[count++] = cpu_to_le16(d->id);
468 bt_dev_dbg(hdev, "Added hci%u", d->id);
472 rp->num_controllers = cpu_to_le16(count);
473 rp_len = sizeof(*rp) + (2 * count);
475 read_unlock(&hci_dev_list_lock);
477 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
485 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
486 void *data, u16 data_len)
488 struct mgmt_rp_read_unconf_index_list *rp;
494 bt_dev_dbg(hdev, "sock %p", sk);
496 read_lock(&hci_dev_list_lock);
499 list_for_each_entry(d, &hci_dev_list, list) {
500 if (d->dev_type == HCI_PRIMARY &&
501 hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 rp_len = sizeof(*rp) + (2 * count);
506 rp = kmalloc(rp_len, GFP_ATOMIC);
508 read_unlock(&hci_dev_list_lock);
513 list_for_each_entry(d, &hci_dev_list, list) {
514 if (hci_dev_test_flag(d, HCI_SETUP) ||
515 hci_dev_test_flag(d, HCI_CONFIG) ||
516 hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 /* Devices marked as raw-only are neither configured
520 * nor unconfigured controllers.
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 if (d->dev_type == HCI_PRIMARY &&
526 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
527 rp->index[count++] = cpu_to_le16(d->id);
528 bt_dev_dbg(hdev, "Added hci%u", d->id);
532 rp->num_controllers = cpu_to_le16(count);
533 rp_len = sizeof(*rp) + (2 * count);
535 read_unlock(&hci_dev_list_lock);
537 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
538 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
546 void *data, u16 data_len)
548 struct mgmt_rp_read_ext_index_list *rp;
553 bt_dev_dbg(hdev, "sock %p", sk);
555 read_lock(&hci_dev_list_lock);
558 list_for_each_entry(d, &hci_dev_list, list) {
559 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
563 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
565 read_unlock(&hci_dev_list_lock);
570 list_for_each_entry(d, &hci_dev_list, list) {
571 if (hci_dev_test_flag(d, HCI_SETUP) ||
572 hci_dev_test_flag(d, HCI_CONFIG) ||
573 hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 /* Devices marked as raw-only are neither configured
577 * nor unconfigured controllers.
579 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 if (d->dev_type == HCI_PRIMARY) {
583 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
584 rp->entry[count].type = 0x01;
586 rp->entry[count].type = 0x00;
587 } else if (d->dev_type == HCI_AMP) {
588 rp->entry[count].type = 0x02;
593 rp->entry[count].bus = d->bus;
594 rp->entry[count++].index = cpu_to_le16(d->id);
595 bt_dev_dbg(hdev, "Added hci%u", d->id);
598 rp->num_controllers = cpu_to_le16(count);
600 read_unlock(&hci_dev_list_lock);
602 /* If this command is called at least once, then all the
603 * default index and unconfigured index events are disabled
604 * and from now on only extended index events are used.
606 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
607 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
608 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
610 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
611 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
612 struct_size(rp, entry, count));
619 static bool is_configured(struct hci_dev *hdev)
621 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
622 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
626 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
627 !bacmp(&hdev->public_addr, BDADDR_ANY))
633 static __le32 get_missing_options(struct hci_dev *hdev)
637 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
638 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
639 options |= MGMT_OPTION_EXTERNAL_CONFIG;
641 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
642 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
643 !bacmp(&hdev->public_addr, BDADDR_ANY))
644 options |= MGMT_OPTION_PUBLIC_ADDRESS;
646 return cpu_to_le32(options);
649 static int new_options(struct hci_dev *hdev, struct sock *skip)
651 __le32 options = get_missing_options(hdev);
653 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
654 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
659 __le32 options = get_missing_options(hdev);
661 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
665 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
666 void *data, u16 data_len)
668 struct mgmt_rp_read_config_info rp;
671 bt_dev_dbg(hdev, "sock %p", sk);
675 memset(&rp, 0, sizeof(rp));
676 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
678 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
679 options |= MGMT_OPTION_EXTERNAL_CONFIG;
681 if (hdev->set_bdaddr)
682 options |= MGMT_OPTION_PUBLIC_ADDRESS;
684 rp.supported_options = cpu_to_le32(options);
685 rp.missing_options = get_missing_options(hdev);
687 hci_dev_unlock(hdev);
689 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
693 static u32 get_supported_phys(struct hci_dev *hdev)
695 u32 supported_phys = 0;
697 if (lmp_bredr_capable(hdev)) {
698 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
700 if (hdev->features[0][0] & LMP_3SLOT)
701 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
703 if (hdev->features[0][0] & LMP_5SLOT)
704 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
706 if (lmp_edr_2m_capable(hdev)) {
707 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
709 if (lmp_edr_3slot_capable(hdev))
710 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
712 if (lmp_edr_5slot_capable(hdev))
713 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
715 if (lmp_edr_3m_capable(hdev)) {
716 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
718 if (lmp_edr_3slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
721 if (lmp_edr_5slot_capable(hdev))
722 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
727 if (lmp_le_capable(hdev)) {
728 supported_phys |= MGMT_PHY_LE_1M_TX;
729 supported_phys |= MGMT_PHY_LE_1M_RX;
731 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
732 supported_phys |= MGMT_PHY_LE_2M_TX;
733 supported_phys |= MGMT_PHY_LE_2M_RX;
736 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
737 supported_phys |= MGMT_PHY_LE_CODED_TX;
738 supported_phys |= MGMT_PHY_LE_CODED_RX;
742 return supported_phys;
745 static u32 get_selected_phys(struct hci_dev *hdev)
747 u32 selected_phys = 0;
749 if (lmp_bredr_capable(hdev)) {
750 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
752 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
753 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
755 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
756 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
758 if (lmp_edr_2m_capable(hdev)) {
759 if (!(hdev->pkt_type & HCI_2DH1))
760 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
762 if (lmp_edr_3slot_capable(hdev) &&
763 !(hdev->pkt_type & HCI_2DH3))
764 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
766 if (lmp_edr_5slot_capable(hdev) &&
767 !(hdev->pkt_type & HCI_2DH5))
768 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
770 if (lmp_edr_3m_capable(hdev)) {
771 if (!(hdev->pkt_type & HCI_3DH1))
772 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
774 if (lmp_edr_3slot_capable(hdev) &&
775 !(hdev->pkt_type & HCI_3DH3))
776 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
778 if (lmp_edr_5slot_capable(hdev) &&
779 !(hdev->pkt_type & HCI_3DH5))
780 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
785 if (lmp_le_capable(hdev)) {
786 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
787 selected_phys |= MGMT_PHY_LE_1M_TX;
789 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
790 selected_phys |= MGMT_PHY_LE_1M_RX;
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
793 selected_phys |= MGMT_PHY_LE_2M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
796 selected_phys |= MGMT_PHY_LE_2M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
799 selected_phys |= MGMT_PHY_LE_CODED_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
802 selected_phys |= MGMT_PHY_LE_CODED_RX;
805 return selected_phys;
808 static u32 get_configurable_phys(struct hci_dev *hdev)
810 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
811 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 static u32 get_supported_settings(struct hci_dev *hdev)
818 settings |= MGMT_SETTING_POWERED;
819 settings |= MGMT_SETTING_BONDABLE;
820 settings |= MGMT_SETTING_DEBUG_KEYS;
821 settings |= MGMT_SETTING_CONNECTABLE;
822 settings |= MGMT_SETTING_DISCOVERABLE;
824 if (lmp_bredr_capable(hdev)) {
825 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
826 settings |= MGMT_SETTING_FAST_CONNECTABLE;
827 settings |= MGMT_SETTING_BREDR;
828 settings |= MGMT_SETTING_LINK_SECURITY;
830 if (lmp_ssp_capable(hdev)) {
831 settings |= MGMT_SETTING_SSP;
832 if (IS_ENABLED(CONFIG_BT_HS))
833 settings |= MGMT_SETTING_HS;
836 if (lmp_sc_capable(hdev))
837 settings |= MGMT_SETTING_SECURE_CONN;
839 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
841 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 if (lmp_le_capable(hdev)) {
845 settings |= MGMT_SETTING_LE;
846 settings |= MGMT_SETTING_SECURE_CONN;
847 settings |= MGMT_SETTING_PRIVACY;
848 settings |= MGMT_SETTING_STATIC_ADDRESS;
849 settings |= MGMT_SETTING_ADVERTISING;
852 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
854 settings |= MGMT_SETTING_CONFIGURATION;
856 settings |= MGMT_SETTING_PHY_CONFIGURATION;
861 static u32 get_current_settings(struct hci_dev *hdev)
865 if (hdev_is_powered(hdev))
866 settings |= MGMT_SETTING_POWERED;
868 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
869 settings |= MGMT_SETTING_CONNECTABLE;
871 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
872 settings |= MGMT_SETTING_FAST_CONNECTABLE;
874 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
875 settings |= MGMT_SETTING_DISCOVERABLE;
877 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
878 settings |= MGMT_SETTING_BONDABLE;
880 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
881 settings |= MGMT_SETTING_BREDR;
883 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
884 settings |= MGMT_SETTING_LE;
886 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
887 settings |= MGMT_SETTING_LINK_SECURITY;
889 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
890 settings |= MGMT_SETTING_SSP;
892 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
893 settings |= MGMT_SETTING_HS;
895 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
896 settings |= MGMT_SETTING_ADVERTISING;
898 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
899 settings |= MGMT_SETTING_SECURE_CONN;
901 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
902 settings |= MGMT_SETTING_DEBUG_KEYS;
904 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
905 settings |= MGMT_SETTING_PRIVACY;
907 /* The current setting for static address has two purposes. The
908 * first is to indicate if the static address will be used and
909 * the second is to indicate if it is actually set.
911 * This means if the static address is not configured, this flag
912 * will never be set. If the address is configured, then if the
913 * address is actually used decides if the flag is set or not.
915 * For single mode LE only controllers and dual-mode controllers
916 * with BR/EDR disabled, the existence of the static address will
919 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
920 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
921 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
922 if (bacmp(&hdev->static_addr, BDADDR_ANY))
923 settings |= MGMT_SETTING_STATIC_ADDRESS;
926 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
927 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
932 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
934 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
939 struct mgmt_pending_cmd *cmd;
941 /* If there's a pending mgmt command the flags will not yet have
942 * their final values, so check for this first.
944 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
946 struct mgmt_mode *cp = cmd->param;
948 return LE_AD_GENERAL;
949 else if (cp->val == 0x02)
950 return LE_AD_LIMITED;
952 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
953 return LE_AD_LIMITED;
954 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
955 return LE_AD_GENERAL;
961 bool mgmt_get_connectable(struct hci_dev *hdev)
963 struct mgmt_pending_cmd *cmd;
965 /* If there's a pending mgmt command the flag will not yet have
966 * it's final value, so check for this first.
968 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
970 struct mgmt_mode *cp = cmd->param;
975 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 static int service_cache_sync(struct hci_dev *hdev, void *data)
980 hci_update_eir_sync(hdev);
981 hci_update_class_sync(hdev);
986 static void service_cache_off(struct work_struct *work)
988 struct hci_dev *hdev = container_of(work, struct hci_dev,
991 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
999 /* The generation of a new RPA and programming it into the
1000 * controller happens in the hci_req_enable_advertising()
1003 if (ext_adv_capable(hdev))
1004 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1006 return hci_enable_advertising_sync(hdev);
1009 static void rpa_expired(struct work_struct *work)
1011 struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 bt_dev_dbg(hdev, "");
1016 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1018 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1026 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1030 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1032 /* Non-mgmt controlled devices get this bit set
1033 * implicitly so that pairing works for them, however
1034 * for mgmt we require user-space to explicitly enable
1037 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1041 void *data, u16 data_len)
1043 struct mgmt_rp_read_info rp;
1045 bt_dev_dbg(hdev, "sock %p", sk);
1049 memset(&rp, 0, sizeof(rp));
1051 bacpy(&rp.bdaddr, &hdev->bdaddr);
1053 rp.version = hdev->hci_ver;
1054 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1056 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1057 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1059 memcpy(rp.dev_class, hdev->dev_class, 3);
1061 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1062 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1064 hci_dev_unlock(hdev);
1066 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1070 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1075 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1077 hdev->dev_class, 3);
1079 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1080 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 name_len = strlen(hdev->dev_name);
1084 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1085 hdev->dev_name, name_len);
1087 name_len = strlen(hdev->short_name);
1088 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1089 hdev->short_name, name_len);
1094 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1095 void *data, u16 data_len)
1098 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 bt_dev_dbg(hdev, "sock %p", sk);
1103 memset(&buf, 0, sizeof(buf));
1107 bacpy(&rp->bdaddr, &hdev->bdaddr);
1109 rp->version = hdev->hci_ver;
1110 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1112 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1113 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1117 rp->eir_len = cpu_to_le16(eir_len);
1119 hci_dev_unlock(hdev);
1121 /* If this command is called at least once, then the events
1122 * for class of device and local name changes are disabled
1123 * and only the new extended controller information event
1126 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1127 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1128 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1130 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1131 sizeof(*rp) + eir_len);
1134 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 memset(buf, 0, sizeof(buf));
1142 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1143 ev->eir_len = cpu_to_le16(eir_len);
1145 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1146 sizeof(*ev) + eir_len,
1147 HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1152 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1154 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1158 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1160 struct mgmt_ev_advertising_added ev;
1162 ev.instance = instance;
1164 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 struct mgmt_ev_advertising_removed ev;
1172 ev.instance = instance;
1174 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 static void cancel_adv_timeout(struct hci_dev *hdev)
1179 if (hdev->adv_instance_timeout) {
1180 hdev->adv_instance_timeout = 0;
1181 cancel_delayed_work(&hdev->adv_instance_expire);
1185 /* This function requires the caller holds hdev->lock */
1186 static void restart_le_actions(struct hci_dev *hdev)
1188 struct hci_conn_params *p;
1190 list_for_each_entry(p, &hdev->le_conn_params, list) {
1191 /* Needed for AUTO_OFF case where might not "really"
1192 * have been powered off.
1194 list_del_init(&p->action);
1196 switch (p->auto_connect) {
1197 case HCI_AUTO_CONN_DIRECT:
1198 case HCI_AUTO_CONN_ALWAYS:
1199 list_add(&p->action, &hdev->pend_le_conns);
1201 case HCI_AUTO_CONN_REPORT:
1202 list_add(&p->action, &hdev->pend_le_reports);
1210 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1212 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1214 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1220 struct mgmt_pending_cmd *cmd = data;
1221 struct mgmt_mode *cp;
1223 /* Make sure cmd still outstanding. */
1224 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1229 bt_dev_dbg(hdev, "err %d", err);
1234 restart_le_actions(hdev);
1235 hci_update_passive_scan(hdev);
1236 hci_dev_unlock(hdev);
1239 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1241 /* Only call new_setting for power on as power off is deferred
1242 * to hdev->power_off work which does call hci_dev_do_close.
1245 new_settings(hdev, cmd->sk);
1247 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1251 mgmt_pending_remove(cmd);
1254 static int set_powered_sync(struct hci_dev *hdev, void *data)
1256 struct mgmt_pending_cmd *cmd = data;
1257 struct mgmt_mode *cp = cmd->param;
1259 BT_DBG("%s", hdev->name);
1261 return hci_set_powered_sync(hdev, cp->val);
1264 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 struct mgmt_mode *cp = data;
1268 struct mgmt_pending_cmd *cmd;
1271 bt_dev_dbg(hdev, "sock %p", sk);
1273 if (cp->val != 0x00 && cp->val != 0x01)
1274 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1275 MGMT_STATUS_INVALID_PARAMS);
1279 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1280 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1285 if (!!cp->val == hdev_is_powered(hdev)) {
1286 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1290 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1296 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1297 mgmt_set_powered_complete);
1300 mgmt_pending_remove(cmd);
1303 hci_dev_unlock(hdev);
1307 int mgmt_new_settings(struct hci_dev *hdev)
1309 return new_settings(hdev, NULL);
1314 struct hci_dev *hdev;
1318 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1320 struct cmd_lookup *match = data;
1322 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1324 list_del(&cmd->list);
1326 if (match->sk == NULL) {
1327 match->sk = cmd->sk;
1328 sock_hold(match->sk);
1331 mgmt_pending_free(cmd);
1334 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1338 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1339 mgmt_pending_remove(cmd);
1342 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1344 if (cmd->cmd_complete) {
1347 cmd->cmd_complete(cmd, *status);
1348 mgmt_pending_remove(cmd);
1353 cmd_status_rsp(cmd, data);
1356 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1358 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1359 cmd->param, cmd->param_len);
1362 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1364 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1365 cmd->param, sizeof(struct mgmt_addr_info));
1368 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1370 if (!lmp_bredr_capable(hdev))
1371 return MGMT_STATUS_NOT_SUPPORTED;
1372 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1373 return MGMT_STATUS_REJECTED;
1375 return MGMT_STATUS_SUCCESS;
1378 static u8 mgmt_le_support(struct hci_dev *hdev)
1380 if (!lmp_le_capable(hdev))
1381 return MGMT_STATUS_NOT_SUPPORTED;
1382 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1383 return MGMT_STATUS_REJECTED;
1385 return MGMT_STATUS_SUCCESS;
1388 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 struct mgmt_pending_cmd *cmd = data;
1393 bt_dev_dbg(hdev, "err %d", err);
1395 /* Make sure cmd still outstanding. */
1396 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1402 u8 mgmt_err = mgmt_status(err);
1403 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1404 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1408 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1409 hdev->discov_timeout > 0) {
1410 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1411 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1415 new_settings(hdev, cmd->sk);
1418 mgmt_pending_remove(cmd);
1419 hci_dev_unlock(hdev);
1422 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1424 BT_DBG("%s", hdev->name);
1426 return hci_update_discoverable_sync(hdev);
1429 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 struct mgmt_cp_set_discoverable *cp = data;
1433 struct mgmt_pending_cmd *cmd;
1437 bt_dev_dbg(hdev, "sock %p", sk);
1439 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1440 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1442 MGMT_STATUS_REJECTED);
1444 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1446 MGMT_STATUS_INVALID_PARAMS);
1448 timeout = __le16_to_cpu(cp->timeout);
1450 /* Disabling discoverable requires that no timeout is set,
1451 * and enabling limited discoverable requires a timeout.
1453 if ((cp->val == 0x00 && timeout > 0) ||
1454 (cp->val == 0x02 && timeout == 0))
1455 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1456 MGMT_STATUS_INVALID_PARAMS);
1460 if (!hdev_is_powered(hdev) && timeout > 0) {
1461 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1462 MGMT_STATUS_NOT_POWERED);
1466 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1467 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1468 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1473 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1474 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1475 MGMT_STATUS_REJECTED);
1479 if (hdev->advertising_paused) {
1480 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1485 if (!hdev_is_powered(hdev)) {
1486 bool changed = false;
1488 /* Setting limited discoverable when powered off is
1489 * not a valid operation since it requires a timeout
1490 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1492 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1493 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1497 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1502 err = new_settings(hdev, sk);
1507 /* If the current mode is the same, then just update the timeout
1508 * value with the new value. And if only the timeout gets updated,
1509 * then no need for any HCI transactions.
1511 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1512 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1513 HCI_LIMITED_DISCOVERABLE)) {
1514 cancel_delayed_work(&hdev->discov_off);
1515 hdev->discov_timeout = timeout;
1517 if (cp->val && hdev->discov_timeout > 0) {
1518 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1519 queue_delayed_work(hdev->req_workqueue,
1520 &hdev->discov_off, to);
1523 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1527 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1533 /* Cancel any potential discoverable timeout that might be
1534 * still active and store new timeout value. The arming of
1535 * the timeout happens in the complete handler.
1537 cancel_delayed_work(&hdev->discov_off);
1538 hdev->discov_timeout = timeout;
1541 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1543 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1545 /* Limited discoverable mode */
1546 if (cp->val == 0x02)
1547 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1549 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1552 mgmt_set_discoverable_complete);
1555 mgmt_pending_remove(cmd);
1558 hci_dev_unlock(hdev);
1562 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 struct mgmt_pending_cmd *cmd = data;
1567 bt_dev_dbg(hdev, "err %d", err);
1569 /* Make sure cmd still outstanding. */
1570 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1576 u8 mgmt_err = mgmt_status(err);
1577 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1581 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1582 new_settings(hdev, cmd->sk);
1586 mgmt_pending_remove(cmd);
1588 hci_dev_unlock(hdev);
1591 static int set_connectable_update_settings(struct hci_dev *hdev,
1592 struct sock *sk, u8 val)
1594 bool changed = false;
1597 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1601 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1603 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1604 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1612 hci_req_update_scan(hdev);
1613 hci_update_passive_scan(hdev);
1614 return new_settings(hdev, sk);
1620 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1622 BT_DBG("%s", hdev->name);
1624 return hci_update_connectable_sync(hdev);
1627 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 struct mgmt_mode *cp = data;
1631 struct mgmt_pending_cmd *cmd;
1634 bt_dev_dbg(hdev, "sock %p", sk);
1636 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1637 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1639 MGMT_STATUS_REJECTED);
1641 if (cp->val != 0x00 && cp->val != 0x01)
1642 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1643 MGMT_STATUS_INVALID_PARAMS);
1647 if (!hdev_is_powered(hdev)) {
1648 err = set_connectable_update_settings(hdev, sk, cp->val);
1652 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1653 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1659 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1666 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1668 if (hdev->discov_timeout > 0)
1669 cancel_delayed_work(&hdev->discov_off);
1671 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1673 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1677 mgmt_set_connectable_complete);
1680 mgmt_pending_remove(cmd);
1683 hci_dev_unlock(hdev);
1687 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 struct mgmt_mode *cp = data;
1694 bt_dev_dbg(hdev, "sock %p", sk);
1696 if (cp->val != 0x00 && cp->val != 0x01)
1697 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1698 MGMT_STATUS_INVALID_PARAMS);
1703 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1705 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1707 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1712 /* In limited privacy mode the change of bondable mode
1713 * may affect the local advertising address.
1715 hci_update_discoverable(hdev);
1717 err = new_settings(hdev, sk);
1721 hci_dev_unlock(hdev);
1725 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 struct mgmt_mode *cp = data;
1729 struct mgmt_pending_cmd *cmd;
1733 bt_dev_dbg(hdev, "sock %p", sk);
1735 status = mgmt_bredr_support(hdev);
1737 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 if (cp->val != 0x00 && cp->val != 0x01)
1741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1742 MGMT_STATUS_INVALID_PARAMS);
1746 if (!hdev_is_powered(hdev)) {
1747 bool changed = false;
1749 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1750 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1754 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1759 err = new_settings(hdev, sk);
1764 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1765 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1772 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1773 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1777 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1783 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1785 mgmt_pending_remove(cmd);
1790 hci_dev_unlock(hdev);
1794 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1796 struct cmd_lookup match = { NULL, hdev };
1797 struct mgmt_pending_cmd *cmd = data;
1798 struct mgmt_mode *cp = cmd->param;
1799 u8 enable = cp->val;
1802 /* Make sure cmd still outstanding. */
1803 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1807 u8 mgmt_err = mgmt_status(err);
1809 if (enable && hci_dev_test_and_clear_flag(hdev,
1811 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1812 new_settings(hdev, NULL);
1815 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1821 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1823 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 changed = hci_dev_test_and_clear_flag(hdev,
1829 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 new_settings(hdev, match.sk);
1840 hci_update_eir_sync(hdev);
1843 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1845 struct mgmt_pending_cmd *cmd = data;
1846 struct mgmt_mode *cp = cmd->param;
1847 bool changed = false;
1851 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1853 err = hci_write_ssp_mode_sync(hdev, cp->val);
1855 if (!err && changed)
1856 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1861 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1863 struct mgmt_mode *cp = data;
1864 struct mgmt_pending_cmd *cmd;
1868 bt_dev_dbg(hdev, "sock %p", sk);
1870 status = mgmt_bredr_support(hdev);
1872 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1874 if (!lmp_ssp_capable(hdev))
1875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1876 MGMT_STATUS_NOT_SUPPORTED);
1878 if (cp->val != 0x00 && cp->val != 0x01)
1879 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1880 MGMT_STATUS_INVALID_PARAMS);
1884 if (!hdev_is_powered(hdev)) {
1888 changed = !hci_dev_test_and_set_flag(hdev,
1891 changed = hci_dev_test_and_clear_flag(hdev,
1894 changed = hci_dev_test_and_clear_flag(hdev,
1897 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1905 err = new_settings(hdev, sk);
1910 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1911 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1916 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1917 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1921 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1925 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1929 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1930 MGMT_STATUS_FAILED);
1933 mgmt_pending_remove(cmd);
1937 hci_dev_unlock(hdev);
1941 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1943 struct mgmt_mode *cp = data;
1948 bt_dev_dbg(hdev, "sock %p", sk);
1950 if (!IS_ENABLED(CONFIG_BT_HS))
1951 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1952 MGMT_STATUS_NOT_SUPPORTED);
1954 status = mgmt_bredr_support(hdev);
1956 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1958 if (!lmp_ssp_capable(hdev))
1959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1960 MGMT_STATUS_NOT_SUPPORTED);
1962 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1963 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1964 MGMT_STATUS_REJECTED);
1966 if (cp->val != 0x00 && cp->val != 0x01)
1967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1968 MGMT_STATUS_INVALID_PARAMS);
1972 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1973 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1979 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1981 if (hdev_is_powered(hdev)) {
1982 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1983 MGMT_STATUS_REJECTED);
1987 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1995 err = new_settings(hdev, sk);
1998 hci_dev_unlock(hdev);
2002 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2004 struct cmd_lookup match = { NULL, hdev };
2005 u8 status = mgmt_status(err);
2007 bt_dev_dbg(hdev, "err %d", err);
2010 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2015 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2017 new_settings(hdev, match.sk);
2023 static int set_le_sync(struct hci_dev *hdev, void *data)
2025 struct mgmt_pending_cmd *cmd = data;
2026 struct mgmt_mode *cp = cmd->param;
2031 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2032 hci_disable_advertising_sync(hdev);
2034 if (ext_adv_capable(hdev))
2035 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2037 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 err = hci_write_le_host_supported_sync(hdev, val, 0);
2042 /* Make sure the controller has a good default for
2043 * advertising data. Restrict the update to when LE
2044 * has actually been enabled. During power on, the
2045 * update in powered_update_hci will take care of it.
2047 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2048 if (ext_adv_capable(hdev)) {
2051 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2053 hci_update_scan_rsp_data_sync(hdev, 0x00);
2055 hci_update_adv_data_sync(hdev, 0x00);
2056 hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 hci_update_passive_scan(hdev);
2065 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2068 struct mgmt_pending_cmd *cmd;
2072 bt_dev_dbg(hdev, "sock %p", sk);
2074 if (!lmp_le_capable(hdev))
2075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2076 MGMT_STATUS_NOT_SUPPORTED);
2078 if (cp->val != 0x00 && cp->val != 0x01)
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2080 MGMT_STATUS_INVALID_PARAMS);
2082 /* Bluetooth single mode LE only controllers or dual-mode
2083 * controllers configured as LE only devices, do not allow
2084 * switching LE off. These have either LE enabled explicitly
2085 * or BR/EDR has been previously switched off.
2087 * When trying to enable an already enabled LE, then gracefully
2088 * send a positive response. Trying to disable it however will
2089 * result into rejection.
2091 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2092 if (cp->val == 0x01)
2093 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2095 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2096 MGMT_STATUS_REJECTED);
2102 enabled = lmp_host_le_capable(hdev);
2105 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2107 if (!hdev_is_powered(hdev) || val == enabled) {
2108 bool changed = false;
2110 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2115 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2116 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2120 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2125 err = new_settings(hdev, sk);
2130 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2131 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2132 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2137 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2141 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2145 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2146 MGMT_STATUS_FAILED);
2149 mgmt_pending_remove(cmd);
2153 hci_dev_unlock(hdev);
2157 /* This is a helper function to test for pending mgmt commands that can
2158 * cause CoD or EIR HCI commands. We can only allow one such pending
2159 * mgmt command at a time since otherwise we cannot easily track what
2160 * the current values are, will be, and based on that calculate if a new
2161 * HCI command needs to be sent and if yes with what value.
2163 static bool pending_eir_or_class(struct hci_dev *hdev)
2165 struct mgmt_pending_cmd *cmd;
2167 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2168 switch (cmd->opcode) {
2169 case MGMT_OP_ADD_UUID:
2170 case MGMT_OP_REMOVE_UUID:
2171 case MGMT_OP_SET_DEV_CLASS:
2172 case MGMT_OP_SET_POWERED:
2180 static const u8 bluetooth_base_uuid[] = {
2181 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2182 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 static u8 get_uuid_size(const u8 *uuid)
2189 if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 val = get_unaligned_le32(&uuid[12]);
2199 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2201 struct mgmt_pending_cmd *cmd = data;
2203 bt_dev_dbg(hdev, "err %d", err);
2205 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2206 mgmt_status(err), hdev->dev_class, 3);
2208 mgmt_pending_free(cmd);
2211 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2215 err = hci_update_class_sync(hdev);
2219 return hci_update_eir_sync(hdev);
2222 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2224 struct mgmt_cp_add_uuid *cp = data;
2225 struct mgmt_pending_cmd *cmd;
2226 struct bt_uuid *uuid;
2229 bt_dev_dbg(hdev, "sock %p", sk);
2233 if (pending_eir_or_class(hdev)) {
2234 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2239 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2245 memcpy(uuid->uuid, cp->uuid, 16);
2246 uuid->svc_hint = cp->svc_hint;
2247 uuid->size = get_uuid_size(cp->uuid);
2249 list_add_tail(&uuid->list, &hdev->uuids);
2251 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2257 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2259 mgmt_pending_free(cmd);
2264 hci_dev_unlock(hdev);
2268 static bool enable_service_cache(struct hci_dev *hdev)
2270 if (!hdev_is_powered(hdev))
2273 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2274 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2282 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2286 err = hci_update_class_sync(hdev);
2290 return hci_update_eir_sync(hdev);
2293 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 struct mgmt_cp_remove_uuid *cp = data;
2297 struct mgmt_pending_cmd *cmd;
2298 struct bt_uuid *match, *tmp;
2299 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2302 bt_dev_dbg(hdev, "sock %p", sk);
2306 if (pending_eir_or_class(hdev)) {
2307 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2312 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2313 hci_uuids_clear(hdev);
2315 if (enable_service_cache(hdev)) {
2316 err = mgmt_cmd_complete(sk, hdev->id,
2317 MGMT_OP_REMOVE_UUID,
2318 0, hdev->dev_class, 3);
2327 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2328 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2331 list_del(&match->list);
2337 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2338 MGMT_STATUS_INVALID_PARAMS);
2343 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2349 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2350 mgmt_class_complete);
2352 mgmt_pending_free(cmd);
2355 hci_dev_unlock(hdev);
2359 static int set_class_sync(struct hci_dev *hdev, void *data)
2363 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2364 cancel_delayed_work_sync(&hdev->service_cache);
2365 err = hci_update_eir_sync(hdev);
2371 return hci_update_class_sync(hdev);
2374 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2377 struct mgmt_cp_set_dev_class *cp = data;
2378 struct mgmt_pending_cmd *cmd;
2381 bt_dev_dbg(hdev, "sock %p", sk);
2383 if (!lmp_bredr_capable(hdev))
2384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2385 MGMT_STATUS_NOT_SUPPORTED);
2389 if (pending_eir_or_class(hdev)) {
2390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2395 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2396 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2397 MGMT_STATUS_INVALID_PARAMS);
2401 hdev->major_class = cp->major;
2402 hdev->minor_class = cp->minor;
2404 if (!hdev_is_powered(hdev)) {
2405 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2406 hdev->dev_class, 3);
2410 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2416 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2417 mgmt_class_complete);
2419 mgmt_pending_free(cmd);
2422 hci_dev_unlock(hdev);
2426 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2429 struct mgmt_cp_load_link_keys *cp = data;
2430 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2431 sizeof(struct mgmt_link_key_info));
2432 u16 key_count, expected_len;
2436 bt_dev_dbg(hdev, "sock %p", sk);
2438 if (!lmp_bredr_capable(hdev))
2439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2440 MGMT_STATUS_NOT_SUPPORTED);
2442 key_count = __le16_to_cpu(cp->key_count);
2443 if (key_count > max_key_count) {
2444 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2447 MGMT_STATUS_INVALID_PARAMS);
2450 expected_len = struct_size(cp, keys, key_count);
2451 if (expected_len != len) {
2452 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2455 MGMT_STATUS_INVALID_PARAMS);
2458 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2460 MGMT_STATUS_INVALID_PARAMS);
2462 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2465 for (i = 0; i < key_count; i++) {
2466 struct mgmt_link_key_info *key = &cp->keys[i];
2468 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2469 return mgmt_cmd_status(sk, hdev->id,
2470 MGMT_OP_LOAD_LINK_KEYS,
2471 MGMT_STATUS_INVALID_PARAMS);
2476 hci_link_keys_clear(hdev);
2479 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2481 changed = hci_dev_test_and_clear_flag(hdev,
2482 HCI_KEEP_DEBUG_KEYS);
2485 new_settings(hdev, NULL);
2487 for (i = 0; i < key_count; i++) {
2488 struct mgmt_link_key_info *key = &cp->keys[i];
2490 if (hci_is_blocked_key(hdev,
2491 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2493 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2498 /* Always ignore debug keys and require a new pairing if
2499 * the user wants to use them.
2501 if (key->type == HCI_LK_DEBUG_COMBINATION)
2504 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2505 key->type, key->pin_len, NULL);
2508 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2510 hci_dev_unlock(hdev);
2515 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2516 u8 addr_type, struct sock *skip_sk)
2518 struct mgmt_ev_device_unpaired ev;
2520 bacpy(&ev.addr.bdaddr, bdaddr);
2521 ev.addr.type = addr_type;
2523 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2527 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2530 struct mgmt_cp_unpair_device *cp = data;
2531 struct mgmt_rp_unpair_device rp;
2532 struct hci_conn_params *params;
2533 struct mgmt_pending_cmd *cmd;
2534 struct hci_conn *conn;
2538 memset(&rp, 0, sizeof(rp));
2539 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2540 rp.addr.type = cp->addr.type;
2542 if (!bdaddr_type_is_valid(cp->addr.type))
2543 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2544 MGMT_STATUS_INVALID_PARAMS,
2547 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2548 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2549 MGMT_STATUS_INVALID_PARAMS,
2554 if (!hdev_is_powered(hdev)) {
2555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2556 MGMT_STATUS_NOT_POWERED, &rp,
2561 if (cp->addr.type == BDADDR_BREDR) {
2562 /* If disconnection is requested, then look up the
2563 * connection. If the remote device is connected, it
2564 * will be later used to terminate the link.
2566 * Setting it to NULL explicitly will cause no
2567 * termination of the link.
2570 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2575 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2577 err = mgmt_cmd_complete(sk, hdev->id,
2578 MGMT_OP_UNPAIR_DEVICE,
2579 MGMT_STATUS_NOT_PAIRED, &rp,
2587 /* LE address type */
2588 addr_type = le_addr_type(cp->addr.type);
2590 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2591 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2593 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2594 MGMT_STATUS_NOT_PAIRED, &rp,
2599 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2601 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2606 /* Defer clearing up the connection parameters until closing to
2607 * give a chance of keeping them if a repairing happens.
2609 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2611 /* Disable auto-connection parameters if present */
2612 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2614 if (params->explicit_connect)
2615 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2617 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2620 /* If disconnection is not requested, then clear the connection
2621 * variable so that the link is not terminated.
2623 if (!cp->disconnect)
2627 /* If the connection variable is set, then termination of the
2628 * link is requested.
2631 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2633 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2637 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2644 cmd->cmd_complete = addr_cmd_complete;
2646 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2648 mgmt_pending_remove(cmd);
2651 hci_dev_unlock(hdev);
2655 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2658 struct mgmt_cp_disconnect *cp = data;
2659 struct mgmt_rp_disconnect rp;
2660 struct mgmt_pending_cmd *cmd;
2661 struct hci_conn *conn;
2664 bt_dev_dbg(hdev, "sock %p", sk);
2666 memset(&rp, 0, sizeof(rp));
2667 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2668 rp.addr.type = cp->addr.type;
2670 if (!bdaddr_type_is_valid(cp->addr.type))
2671 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2672 MGMT_STATUS_INVALID_PARAMS,
2677 if (!test_bit(HCI_UP, &hdev->flags)) {
2678 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2679 MGMT_STATUS_NOT_POWERED, &rp,
2684 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2685 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2686 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2690 if (cp->addr.type == BDADDR_BREDR)
2691 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2694 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2695 le_addr_type(cp->addr.type));
2697 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2699 MGMT_STATUS_NOT_CONNECTED, &rp,
2704 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2710 cmd->cmd_complete = generic_cmd_complete;
2712 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2714 mgmt_pending_remove(cmd);
2717 hci_dev_unlock(hdev);
2721 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2723 switch (link_type) {
2725 switch (addr_type) {
2726 case ADDR_LE_DEV_PUBLIC:
2727 return BDADDR_LE_PUBLIC;
2730 /* Fallback to LE Random address type */
2731 return BDADDR_LE_RANDOM;
2735 /* Fallback to BR/EDR type */
2736 return BDADDR_BREDR;
2740 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2743 struct mgmt_rp_get_connections *rp;
2748 bt_dev_dbg(hdev, "sock %p", sk);
2752 if (!hdev_is_powered(hdev)) {
2753 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2754 MGMT_STATUS_NOT_POWERED);
2759 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2760 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2764 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2771 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2772 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2774 bacpy(&rp->addr[i].bdaddr, &c->dst);
2775 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2776 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2781 rp->conn_count = cpu_to_le16(i);
2783 /* Recalculate length in case of filtered SCO connections, etc */
2784 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2785 struct_size(rp, addr, i));
2790 hci_dev_unlock(hdev);
2794 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2795 struct mgmt_cp_pin_code_neg_reply *cp)
2797 struct mgmt_pending_cmd *cmd;
2800 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 cmd->cmd_complete = addr_cmd_complete;
2807 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2808 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2810 mgmt_pending_remove(cmd);
2815 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2818 struct hci_conn *conn;
2819 struct mgmt_cp_pin_code_reply *cp = data;
2820 struct hci_cp_pin_code_reply reply;
2821 struct mgmt_pending_cmd *cmd;
2824 bt_dev_dbg(hdev, "sock %p", sk);
2828 if (!hdev_is_powered(hdev)) {
2829 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2830 MGMT_STATUS_NOT_POWERED);
2834 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2837 MGMT_STATUS_NOT_CONNECTED);
2841 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2842 struct mgmt_cp_pin_code_neg_reply ncp;
2844 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2846 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2848 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2851 MGMT_STATUS_INVALID_PARAMS);
2856 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2862 cmd->cmd_complete = addr_cmd_complete;
2864 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2865 reply.pin_len = cp->pin_len;
2866 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2868 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2870 mgmt_pending_remove(cmd);
2873 hci_dev_unlock(hdev);
2877 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2880 struct mgmt_cp_set_io_capability *cp = data;
2882 bt_dev_dbg(hdev, "sock %p", sk);
2884 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2885 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2886 MGMT_STATUS_INVALID_PARAMS);
2890 hdev->io_capability = cp->io_capability;
2892 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2894 hci_dev_unlock(hdev);
2896 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2900 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2902 struct hci_dev *hdev = conn->hdev;
2903 struct mgmt_pending_cmd *cmd;
2905 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2906 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2909 if (cmd->user_data != conn)
2918 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2920 struct mgmt_rp_pair_device rp;
2921 struct hci_conn *conn = cmd->user_data;
2924 bacpy(&rp.addr.bdaddr, &conn->dst);
2925 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2927 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2928 status, &rp, sizeof(rp));
2930 /* So we don't get further callbacks for this connection */
2931 conn->connect_cfm_cb = NULL;
2932 conn->security_cfm_cb = NULL;
2933 conn->disconn_cfm_cb = NULL;
2935 hci_conn_drop(conn);
2937 /* The device is paired so there is no need to remove
2938 * its connection parameters anymore.
2940 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2947 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2949 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2950 struct mgmt_pending_cmd *cmd;
2952 cmd = find_pairing(conn);
2954 cmd->cmd_complete(cmd, status);
2955 mgmt_pending_remove(cmd);
2959 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2961 struct mgmt_pending_cmd *cmd;
2963 BT_DBG("status %u", status);
2965 cmd = find_pairing(conn);
2967 BT_DBG("Unable to find a pending command");
2971 cmd->cmd_complete(cmd, mgmt_status(status));
2972 mgmt_pending_remove(cmd);
2975 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2977 struct mgmt_pending_cmd *cmd;
2979 BT_DBG("status %u", status);
2984 cmd = find_pairing(conn);
2986 BT_DBG("Unable to find a pending command");
2990 cmd->cmd_complete(cmd, mgmt_status(status));
2991 mgmt_pending_remove(cmd);
2994 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2997 struct mgmt_cp_pair_device *cp = data;
2998 struct mgmt_rp_pair_device rp;
2999 struct mgmt_pending_cmd *cmd;
3000 u8 sec_level, auth_type;
3001 struct hci_conn *conn;
3004 bt_dev_dbg(hdev, "sock %p", sk);
3006 memset(&rp, 0, sizeof(rp));
3007 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3008 rp.addr.type = cp->addr.type;
3010 if (!bdaddr_type_is_valid(cp->addr.type))
3011 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3012 MGMT_STATUS_INVALID_PARAMS,
3015 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3016 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3017 MGMT_STATUS_INVALID_PARAMS,
3022 if (!hdev_is_powered(hdev)) {
3023 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3024 MGMT_STATUS_NOT_POWERED, &rp,
3029 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3031 MGMT_STATUS_ALREADY_PAIRED, &rp,
3036 sec_level = BT_SECURITY_MEDIUM;
3037 auth_type = HCI_AT_DEDICATED_BONDING;
3039 if (cp->addr.type == BDADDR_BREDR) {
3040 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3041 auth_type, CONN_REASON_PAIR_DEVICE);
3043 u8 addr_type = le_addr_type(cp->addr.type);
3044 struct hci_conn_params *p;
3046 /* When pairing a new device, it is expected to remember
3047 * this device for future connections. Adding the connection
3048 * parameter information ahead of time allows tracking
3049 * of the peripheral preferred values and will speed up any
3050 * further connection establishment.
3052 * If connection parameters already exist, then they
3053 * will be kept and this function does nothing.
3055 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3057 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3058 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3060 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3061 sec_level, HCI_LE_CONN_TIMEOUT,
3062 CONN_REASON_PAIR_DEVICE);
3068 if (PTR_ERR(conn) == -EBUSY)
3069 status = MGMT_STATUS_BUSY;
3070 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3071 status = MGMT_STATUS_NOT_SUPPORTED;
3072 else if (PTR_ERR(conn) == -ECONNREFUSED)
3073 status = MGMT_STATUS_REJECTED;
3075 status = MGMT_STATUS_CONNECT_FAILED;
3077 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3078 status, &rp, sizeof(rp));
3082 if (conn->connect_cfm_cb) {
3083 hci_conn_drop(conn);
3084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3085 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3089 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3092 hci_conn_drop(conn);
3096 cmd->cmd_complete = pairing_complete;
3098 /* For LE, just connecting isn't a proof that the pairing finished */
3099 if (cp->addr.type == BDADDR_BREDR) {
3100 conn->connect_cfm_cb = pairing_complete_cb;
3101 conn->security_cfm_cb = pairing_complete_cb;
3102 conn->disconn_cfm_cb = pairing_complete_cb;
3104 conn->connect_cfm_cb = le_pairing_complete_cb;
3105 conn->security_cfm_cb = le_pairing_complete_cb;
3106 conn->disconn_cfm_cb = le_pairing_complete_cb;
3109 conn->io_capability = cp->io_cap;
3110 cmd->user_data = hci_conn_get(conn);
3112 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3113 hci_conn_security(conn, sec_level, auth_type, true)) {
3114 cmd->cmd_complete(cmd, 0);
3115 mgmt_pending_remove(cmd);
3121 hci_dev_unlock(hdev);
3125 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3128 struct mgmt_addr_info *addr = data;
3129 struct mgmt_pending_cmd *cmd;
3130 struct hci_conn *conn;
3133 bt_dev_dbg(hdev, "sock %p", sk);
3137 if (!hdev_is_powered(hdev)) {
3138 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3139 MGMT_STATUS_NOT_POWERED);
3143 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3145 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3146 MGMT_STATUS_INVALID_PARAMS);
3150 conn = cmd->user_data;
3152 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3153 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3154 MGMT_STATUS_INVALID_PARAMS);
3158 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3159 mgmt_pending_remove(cmd);
3161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3162 addr, sizeof(*addr));
3164 /* Since user doesn't want to proceed with the connection, abort any
3165 * ongoing pairing and then terminate the link if it was created
3166 * because of the pair device action.
3168 if (addr->type == BDADDR_BREDR)
3169 hci_remove_link_key(hdev, &addr->bdaddr);
3171 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3172 le_addr_type(addr->type));
3174 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3175 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3178 hci_dev_unlock(hdev);
3182 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3183 struct mgmt_addr_info *addr, u16 mgmt_op,
3184 u16 hci_op, __le32 passkey)
3186 struct mgmt_pending_cmd *cmd;
3187 struct hci_conn *conn;
3192 if (!hdev_is_powered(hdev)) {
3193 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3194 MGMT_STATUS_NOT_POWERED, addr,
3199 if (addr->type == BDADDR_BREDR)
3200 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3202 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3203 le_addr_type(addr->type));
3206 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3207 MGMT_STATUS_NOT_CONNECTED, addr,
3212 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3213 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3215 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3216 MGMT_STATUS_SUCCESS, addr,
3219 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 MGMT_STATUS_FAILED, addr,
3226 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3232 cmd->cmd_complete = addr_cmd_complete;
3234 /* Continue with pairing via HCI */
3235 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3236 struct hci_cp_user_passkey_reply cp;
3238 bacpy(&cp.bdaddr, &addr->bdaddr);
3239 cp.passkey = passkey;
3240 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3242 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3246 mgmt_pending_remove(cmd);
3249 hci_dev_unlock(hdev);
3253 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3254 void *data, u16 len)
3256 struct mgmt_cp_pin_code_neg_reply *cp = data;
3258 bt_dev_dbg(hdev, "sock %p", sk);
3260 return user_pairing_resp(sk, hdev, &cp->addr,
3261 MGMT_OP_PIN_CODE_NEG_REPLY,
3262 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3265 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3268 struct mgmt_cp_user_confirm_reply *cp = data;
3270 bt_dev_dbg(hdev, "sock %p", sk);
3272 if (len != sizeof(*cp))
3273 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3274 MGMT_STATUS_INVALID_PARAMS);
3276 return user_pairing_resp(sk, hdev, &cp->addr,
3277 MGMT_OP_USER_CONFIRM_REPLY,
3278 HCI_OP_USER_CONFIRM_REPLY, 0);
3281 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3282 void *data, u16 len)
3284 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3286 bt_dev_dbg(hdev, "sock %p", sk);
3288 return user_pairing_resp(sk, hdev, &cp->addr,
3289 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3290 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3293 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3296 struct mgmt_cp_user_passkey_reply *cp = data;
3298 bt_dev_dbg(hdev, "sock %p", sk);
3300 return user_pairing_resp(sk, hdev, &cp->addr,
3301 MGMT_OP_USER_PASSKEY_REPLY,
3302 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3305 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3306 void *data, u16 len)
3308 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3310 bt_dev_dbg(hdev, "sock %p", sk);
3312 return user_pairing_resp(sk, hdev, &cp->addr,
3313 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3314 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3317 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3319 struct adv_info *adv_instance;
3321 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3325 /* stop if current instance doesn't need to be changed */
3326 if (!(adv_instance->flags & flags))
3329 cancel_adv_timeout(hdev);
3331 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3335 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3340 static int name_changed_sync(struct hci_dev *hdev, void *data)
3342 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3345 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3347 struct mgmt_pending_cmd *cmd = data;
3348 struct mgmt_cp_set_local_name *cp = cmd->param;
3349 u8 status = mgmt_status(err);
3351 bt_dev_dbg(hdev, "err %d", err);
3353 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3357 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3360 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3363 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3364 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3367 mgmt_pending_remove(cmd);
3370 static int set_name_sync(struct hci_dev *hdev, void *data)
3372 if (lmp_bredr_capable(hdev)) {
3373 hci_update_name_sync(hdev);
3374 hci_update_eir_sync(hdev);
3377 /* The name is stored in the scan response data and so
3378 * no need to update the advertising data here.
3380 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3381 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3386 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3389 struct mgmt_cp_set_local_name *cp = data;
3390 struct mgmt_pending_cmd *cmd;
3393 bt_dev_dbg(hdev, "sock %p", sk);
3397 /* If the old values are the same as the new ones just return a
3398 * direct command complete event.
3400 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3401 !memcmp(hdev->short_name, cp->short_name,
3402 sizeof(hdev->short_name))) {
3403 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3408 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3410 if (!hdev_is_powered(hdev)) {
3411 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3413 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3418 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3419 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3420 ext_info_changed(hdev, sk);
3425 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3429 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3433 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3434 MGMT_STATUS_FAILED);
3437 mgmt_pending_remove(cmd);
3442 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3445 hci_dev_unlock(hdev);
3449 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3451 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3454 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3457 struct mgmt_cp_set_appearance *cp = data;
3461 bt_dev_dbg(hdev, "sock %p", sk);
3463 if (!lmp_le_capable(hdev))
3464 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3465 MGMT_STATUS_NOT_SUPPORTED);
3467 appearance = le16_to_cpu(cp->appearance);
3471 if (hdev->appearance != appearance) {
3472 hdev->appearance = appearance;
3474 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3475 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3478 ext_info_changed(hdev, sk);
3481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3484 hci_dev_unlock(hdev);
3489 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3490 void *data, u16 len)
3492 struct mgmt_rp_get_phy_configuration rp;
3494 bt_dev_dbg(hdev, "sock %p", sk);
3498 memset(&rp, 0, sizeof(rp));
3500 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3501 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3502 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3504 hci_dev_unlock(hdev);
3506 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3510 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3512 struct mgmt_ev_phy_configuration_changed ev;
3514 memset(&ev, 0, sizeof(ev));
3516 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3518 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3522 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3524 struct mgmt_pending_cmd *cmd = data;
3525 struct sk_buff *skb = cmd->skb;
3526 u8 status = mgmt_status(err);
3528 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3533 status = MGMT_STATUS_FAILED;
3534 else if (IS_ERR(skb))
3535 status = mgmt_status(PTR_ERR(skb));
3537 status = mgmt_status(skb->data[0]);
3540 bt_dev_dbg(hdev, "status %d", status);
3543 mgmt_cmd_status(cmd->sk, hdev->id,
3544 MGMT_OP_SET_PHY_CONFIGURATION, status);
3546 mgmt_cmd_complete(cmd->sk, hdev->id,
3547 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3550 mgmt_phy_configuration_changed(hdev, cmd->sk);
3553 if (skb && !IS_ERR(skb))
3556 mgmt_pending_remove(cmd);
3559 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3561 struct mgmt_pending_cmd *cmd = data;
3562 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3563 struct hci_cp_le_set_default_phy cp_phy;
3564 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3566 memset(&cp_phy, 0, sizeof(cp_phy));
3568 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3569 cp_phy.all_phys |= 0x01;
3571 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3572 cp_phy.all_phys |= 0x02;
3574 if (selected_phys & MGMT_PHY_LE_1M_TX)
3575 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3577 if (selected_phys & MGMT_PHY_LE_2M_TX)
3578 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3580 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3581 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3583 if (selected_phys & MGMT_PHY_LE_1M_RX)
3584 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3586 if (selected_phys & MGMT_PHY_LE_2M_RX)
3587 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3589 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3590 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3592 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3593 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3598 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3599 void *data, u16 len)
3601 struct mgmt_cp_set_phy_configuration *cp = data;
3602 struct mgmt_pending_cmd *cmd;
3603 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3604 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3605 bool changed = false;
3608 bt_dev_dbg(hdev, "sock %p", sk);
3610 configurable_phys = get_configurable_phys(hdev);
3611 supported_phys = get_supported_phys(hdev);
3612 selected_phys = __le32_to_cpu(cp->selected_phys);
3614 if (selected_phys & ~supported_phys)
3615 return mgmt_cmd_status(sk, hdev->id,
3616 MGMT_OP_SET_PHY_CONFIGURATION,
3617 MGMT_STATUS_INVALID_PARAMS);
3619 unconfigure_phys = supported_phys & ~configurable_phys;
3621 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3622 return mgmt_cmd_status(sk, hdev->id,
3623 MGMT_OP_SET_PHY_CONFIGURATION,
3624 MGMT_STATUS_INVALID_PARAMS);
3626 if (selected_phys == get_selected_phys(hdev))
3627 return mgmt_cmd_complete(sk, hdev->id,
3628 MGMT_OP_SET_PHY_CONFIGURATION,
3633 if (!hdev_is_powered(hdev)) {
3634 err = mgmt_cmd_status(sk, hdev->id,
3635 MGMT_OP_SET_PHY_CONFIGURATION,
3636 MGMT_STATUS_REJECTED);
3640 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3641 err = mgmt_cmd_status(sk, hdev->id,
3642 MGMT_OP_SET_PHY_CONFIGURATION,
3647 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3648 pkt_type |= (HCI_DH3 | HCI_DM3);
3650 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3652 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3653 pkt_type |= (HCI_DH5 | HCI_DM5);
3655 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3657 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3658 pkt_type &= ~HCI_2DH1;
3660 pkt_type |= HCI_2DH1;
3662 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3663 pkt_type &= ~HCI_2DH3;
3665 pkt_type |= HCI_2DH3;
3667 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3668 pkt_type &= ~HCI_2DH5;
3670 pkt_type |= HCI_2DH5;
3672 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3673 pkt_type &= ~HCI_3DH1;
3675 pkt_type |= HCI_3DH1;
3677 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3678 pkt_type &= ~HCI_3DH3;
3680 pkt_type |= HCI_3DH3;
3682 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3683 pkt_type &= ~HCI_3DH5;
3685 pkt_type |= HCI_3DH5;
3687 if (pkt_type != hdev->pkt_type) {
3688 hdev->pkt_type = pkt_type;
3692 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3693 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3695 mgmt_phy_configuration_changed(hdev, sk);
3697 err = mgmt_cmd_complete(sk, hdev->id,
3698 MGMT_OP_SET_PHY_CONFIGURATION,
3704 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3709 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3710 set_default_phy_complete);
3713 err = mgmt_cmd_status(sk, hdev->id,
3714 MGMT_OP_SET_PHY_CONFIGURATION,
3715 MGMT_STATUS_FAILED);
3718 mgmt_pending_remove(cmd);
3722 hci_dev_unlock(hdev);
3727 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3730 int err = MGMT_STATUS_SUCCESS;
3731 struct mgmt_cp_set_blocked_keys *keys = data;
3732 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3733 sizeof(struct mgmt_blocked_key_info));
3734 u16 key_count, expected_len;
3737 bt_dev_dbg(hdev, "sock %p", sk);
3739 key_count = __le16_to_cpu(keys->key_count);
3740 if (key_count > max_key_count) {
3741 bt_dev_err(hdev, "too big key_count value %u", key_count);
3742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3743 MGMT_STATUS_INVALID_PARAMS);
3746 expected_len = struct_size(keys, keys, key_count);
3747 if (expected_len != len) {
3748 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3751 MGMT_STATUS_INVALID_PARAMS);
3756 hci_blocked_keys_clear(hdev);
3758 for (i = 0; i < keys->key_count; ++i) {
3759 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3762 err = MGMT_STATUS_NO_RESOURCES;
3766 b->type = keys->keys[i].type;
3767 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3768 list_add_rcu(&b->list, &hdev->blocked_keys);
3770 hci_dev_unlock(hdev);
3772 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3776 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3777 void *data, u16 len)
3779 struct mgmt_mode *cp = data;
3781 bool changed = false;
3783 bt_dev_dbg(hdev, "sock %p", sk);
3785 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3786 return mgmt_cmd_status(sk, hdev->id,
3787 MGMT_OP_SET_WIDEBAND_SPEECH,
3788 MGMT_STATUS_NOT_SUPPORTED);
3790 if (cp->val != 0x00 && cp->val != 0x01)
3791 return mgmt_cmd_status(sk, hdev->id,
3792 MGMT_OP_SET_WIDEBAND_SPEECH,
3793 MGMT_STATUS_INVALID_PARAMS);
3797 if (hdev_is_powered(hdev) &&
3798 !!cp->val != hci_dev_test_flag(hdev,
3799 HCI_WIDEBAND_SPEECH_ENABLED)) {
3800 err = mgmt_cmd_status(sk, hdev->id,
3801 MGMT_OP_SET_WIDEBAND_SPEECH,
3802 MGMT_STATUS_REJECTED);
3807 changed = !hci_dev_test_and_set_flag(hdev,
3808 HCI_WIDEBAND_SPEECH_ENABLED);
3810 changed = hci_dev_test_and_clear_flag(hdev,
3811 HCI_WIDEBAND_SPEECH_ENABLED);
3813 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3818 err = new_settings(hdev, sk);
3821 hci_dev_unlock(hdev);
3825 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3826 void *data, u16 data_len)
3829 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3832 u8 tx_power_range[2];
3834 bt_dev_dbg(hdev, "sock %p", sk);
3836 memset(&buf, 0, sizeof(buf));
3840 /* When the Read Simple Pairing Options command is supported, then
3841 * the remote public key validation is supported.
3843 * Alternatively, when Microsoft extensions are available, they can
3844 * indicate support for public key validation as well.
3846 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3847 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3849 flags |= 0x02; /* Remote public key validation (LE) */
3851 /* When the Read Encryption Key Size command is supported, then the
3852 * encryption key size is enforced.
3854 if (hdev->commands[20] & 0x10)
3855 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3857 flags |= 0x08; /* Encryption key size enforcement (LE) */
3859 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3862 /* When the Read Simple Pairing Options command is supported, then
3863 * also max encryption key size information is provided.
3865 if (hdev->commands[41] & 0x08)
3866 cap_len = eir_append_le16(rp->cap, cap_len,
3867 MGMT_CAP_MAX_ENC_KEY_SIZE,
3868 hdev->max_enc_key_size);
3870 cap_len = eir_append_le16(rp->cap, cap_len,
3871 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3872 SMP_MAX_ENC_KEY_SIZE);
3874 /* Append the min/max LE tx power parameters if we were able to fetch
3875 * it from the controller
3877 if (hdev->commands[38] & 0x80) {
3878 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3879 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3880 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3884 rp->cap_len = cpu_to_le16(cap_len);
3886 hci_dev_unlock(hdev);
3888 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3889 rp, sizeof(*rp) + cap_len);
3892 #ifdef CONFIG_BT_FEATURE_DEBUG
3893 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3894 static const u8 debug_uuid[16] = {
3895 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3896 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3900 /* 330859bc-7506-492d-9370-9a6f0614037f */
3901 static const u8 quality_report_uuid[16] = {
3902 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3903 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3906 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3907 static const u8 offload_codecs_uuid[16] = {
3908 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3909 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3912 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3913 static const u8 le_simultaneous_roles_uuid[16] = {
3914 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3915 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3918 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3919 static const u8 rpa_resolution_uuid[16] = {
3920 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3921 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3924 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3925 void *data, u16 data_len)
3927 char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */
3928 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3932 bt_dev_dbg(hdev, "sock %p", sk);
3934 memset(&buf, 0, sizeof(buf));
3936 #ifdef CONFIG_BT_FEATURE_DEBUG
3938 flags = bt_dbg_get() ? BIT(0) : 0;
3940 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3941 rp->features[idx].flags = cpu_to_le32(flags);
3946 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3947 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3952 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3953 rp->features[idx].flags = cpu_to_le32(flags);
3957 if (hdev && ll_privacy_capable(hdev)) {
3958 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3959 flags = BIT(0) | BIT(1);
3963 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3964 rp->features[idx].flags = cpu_to_le32(flags);
3968 if (hdev && (aosp_has_quality_report(hdev) ||
3969 hdev->set_quality_report)) {
3970 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3975 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3976 rp->features[idx].flags = cpu_to_le32(flags);
3980 if (hdev && hdev->get_data_path_id) {
3981 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3986 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3987 rp->features[idx].flags = cpu_to_le32(flags);
3991 rp->feature_count = cpu_to_le16(idx);
3993 /* After reading the experimental features information, enable
3994 * the events to update client on any future change.
3996 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3998 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3999 MGMT_OP_READ_EXP_FEATURES_INFO,
4000 0, rp, sizeof(*rp) + (20 * idx));
4003 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4006 struct mgmt_ev_exp_feature_changed ev;
4008 memset(&ev, 0, sizeof(ev));
4009 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4010 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4012 if (enabled && privacy_mode_capable(hdev))
4013 set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4015 clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4017 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4019 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4023 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4024 bool enabled, struct sock *skip)
4026 struct mgmt_ev_exp_feature_changed ev;
4028 memset(&ev, 0, sizeof(ev));
4029 memcpy(ev.uuid, uuid, 16);
4030 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4032 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4034 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4037 #define EXP_FEAT(_uuid, _set_func) \
4040 .set_func = _set_func, \
4043 /* The zero key uuid is special. Multiple exp features are set through it. */
4044 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4045 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4047 struct mgmt_rp_set_exp_feature rp;
4049 memset(rp.uuid, 0, 16);
4050 rp.flags = cpu_to_le32(0);
4052 #ifdef CONFIG_BT_FEATURE_DEBUG
4054 bool changed = bt_dbg_get();
4059 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4063 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4066 changed = hci_dev_test_and_clear_flag(hdev,
4067 HCI_ENABLE_LL_PRIVACY);
4069 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4073 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4075 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4076 MGMT_OP_SET_EXP_FEATURE, 0,
4080 #ifdef CONFIG_BT_FEATURE_DEBUG
4081 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4082 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4084 struct mgmt_rp_set_exp_feature rp;
4089 /* Command requires to use the non-controller index */
4091 return mgmt_cmd_status(sk, hdev->id,
4092 MGMT_OP_SET_EXP_FEATURE,
4093 MGMT_STATUS_INVALID_INDEX);
4095 /* Parameters are limited to a single octet */
4096 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4097 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4098 MGMT_OP_SET_EXP_FEATURE,
4099 MGMT_STATUS_INVALID_PARAMS);
4101 /* Only boolean on/off is supported */
4102 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4103 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4104 MGMT_OP_SET_EXP_FEATURE,
4105 MGMT_STATUS_INVALID_PARAMS);
4107 val = !!cp->param[0];
4108 changed = val ? !bt_dbg_get() : bt_dbg_get();
4111 memcpy(rp.uuid, debug_uuid, 16);
4112 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4114 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4116 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4117 MGMT_OP_SET_EXP_FEATURE, 0,
4121 exp_feature_changed(hdev, debug_uuid, val, sk);
4127 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4128 struct mgmt_cp_set_exp_feature *cp,
4131 struct mgmt_rp_set_exp_feature rp;
4136 /* Command requires to use the controller index */
4138 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4139 MGMT_OP_SET_EXP_FEATURE,
4140 MGMT_STATUS_INVALID_INDEX);
4142 /* Changes can only be made when controller is powered down */
4143 if (hdev_is_powered(hdev))
4144 return mgmt_cmd_status(sk, hdev->id,
4145 MGMT_OP_SET_EXP_FEATURE,
4146 MGMT_STATUS_REJECTED);
4148 /* Parameters are limited to a single octet */
4149 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4150 return mgmt_cmd_status(sk, hdev->id,
4151 MGMT_OP_SET_EXP_FEATURE,
4152 MGMT_STATUS_INVALID_PARAMS);
4154 /* Only boolean on/off is supported */
4155 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4156 return mgmt_cmd_status(sk, hdev->id,
4157 MGMT_OP_SET_EXP_FEATURE,
4158 MGMT_STATUS_INVALID_PARAMS);
4160 val = !!cp->param[0];
4163 changed = !hci_dev_test_and_set_flag(hdev,
4164 HCI_ENABLE_LL_PRIVACY);
4165 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4167 /* Enable LL privacy + supported settings changed */
4168 flags = BIT(0) | BIT(1);
4170 changed = hci_dev_test_and_clear_flag(hdev,
4171 HCI_ENABLE_LL_PRIVACY);
4173 /* Disable LL privacy + supported settings changed */
4177 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4178 rp.flags = cpu_to_le32(flags);
4180 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4182 err = mgmt_cmd_complete(sk, hdev->id,
4183 MGMT_OP_SET_EXP_FEATURE, 0,
4187 exp_ll_privacy_feature_changed(val, hdev, sk);
4192 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4193 struct mgmt_cp_set_exp_feature *cp,
4196 struct mgmt_rp_set_exp_feature rp;
4200 /* Command requires to use a valid controller index */
4202 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4203 MGMT_OP_SET_EXP_FEATURE,
4204 MGMT_STATUS_INVALID_INDEX);
4206 /* Parameters are limited to a single octet */
4207 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4208 return mgmt_cmd_status(sk, hdev->id,
4209 MGMT_OP_SET_EXP_FEATURE,
4210 MGMT_STATUS_INVALID_PARAMS);
4212 /* Only boolean on/off is supported */
4213 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4214 return mgmt_cmd_status(sk, hdev->id,
4215 MGMT_OP_SET_EXP_FEATURE,
4216 MGMT_STATUS_INVALID_PARAMS);
4218 hci_req_sync_lock(hdev);
4220 val = !!cp->param[0];
4221 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4223 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4224 err = mgmt_cmd_status(sk, hdev->id,
4225 MGMT_OP_SET_EXP_FEATURE,
4226 MGMT_STATUS_NOT_SUPPORTED);
4227 goto unlock_quality_report;
4231 if (hdev->set_quality_report)
4232 err = hdev->set_quality_report(hdev, val);
4234 err = aosp_set_quality_report(hdev, val);
4237 err = mgmt_cmd_status(sk, hdev->id,
4238 MGMT_OP_SET_EXP_FEATURE,
4239 MGMT_STATUS_FAILED);
4240 goto unlock_quality_report;
4244 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4246 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4249 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4251 memcpy(rp.uuid, quality_report_uuid, 16);
4252 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4253 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4255 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4259 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4261 unlock_quality_report:
4262 hci_req_sync_unlock(hdev);
4266 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4267 struct mgmt_cp_set_exp_feature *cp,
4272 struct mgmt_rp_set_exp_feature rp;
4274 /* Command requires to use a valid controller index */
4276 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4277 MGMT_OP_SET_EXP_FEATURE,
4278 MGMT_STATUS_INVALID_INDEX);
4280 /* Parameters are limited to a single octet */
4281 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4282 return mgmt_cmd_status(sk, hdev->id,
4283 MGMT_OP_SET_EXP_FEATURE,
4284 MGMT_STATUS_INVALID_PARAMS);
4286 /* Only boolean on/off is supported */
4287 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4288 return mgmt_cmd_status(sk, hdev->id,
4289 MGMT_OP_SET_EXP_FEATURE,
4290 MGMT_STATUS_INVALID_PARAMS);
4292 val = !!cp->param[0];
4293 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4295 if (!hdev->get_data_path_id) {
4296 return mgmt_cmd_status(sk, hdev->id,
4297 MGMT_OP_SET_EXP_FEATURE,
4298 MGMT_STATUS_NOT_SUPPORTED);
4303 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4305 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4308 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4311 memcpy(rp.uuid, offload_codecs_uuid, 16);
4312 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4313 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4314 err = mgmt_cmd_complete(sk, hdev->id,
4315 MGMT_OP_SET_EXP_FEATURE, 0,
4319 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4324 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4325 struct mgmt_cp_set_exp_feature *cp,
4330 struct mgmt_rp_set_exp_feature rp;
4332 /* Command requires to use a valid controller index */
4334 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4335 MGMT_OP_SET_EXP_FEATURE,
4336 MGMT_STATUS_INVALID_INDEX);
4338 /* Parameters are limited to a single octet */
4339 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4340 return mgmt_cmd_status(sk, hdev->id,
4341 MGMT_OP_SET_EXP_FEATURE,
4342 MGMT_STATUS_INVALID_PARAMS);
4344 /* Only boolean on/off is supported */
4345 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4346 return mgmt_cmd_status(sk, hdev->id,
4347 MGMT_OP_SET_EXP_FEATURE,
4348 MGMT_STATUS_INVALID_PARAMS);
4350 val = !!cp->param[0];
4351 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4353 if (!hci_dev_le_state_simultaneous(hdev)) {
4354 return mgmt_cmd_status(sk, hdev->id,
4355 MGMT_OP_SET_EXP_FEATURE,
4356 MGMT_STATUS_NOT_SUPPORTED);
4361 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4363 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4366 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4369 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4370 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4371 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4372 err = mgmt_cmd_complete(sk, hdev->id,
4373 MGMT_OP_SET_EXP_FEATURE, 0,
4377 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4382 static const struct mgmt_exp_feature {
4384 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4385 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4386 } exp_features[] = {
4387 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4388 #ifdef CONFIG_BT_FEATURE_DEBUG
4389 EXP_FEAT(debug_uuid, set_debug_func),
4391 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4392 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4393 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4394 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4396 /* end with a null feature */
4397 EXP_FEAT(NULL, NULL)
4400 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4401 void *data, u16 data_len)
4403 struct mgmt_cp_set_exp_feature *cp = data;
4406 bt_dev_dbg(hdev, "sock %p", sk);
4408 for (i = 0; exp_features[i].uuid; i++) {
4409 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4410 return exp_features[i].set_func(sk, hdev, cp, data_len);
4413 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4414 MGMT_OP_SET_EXP_FEATURE,
4415 MGMT_STATUS_NOT_SUPPORTED);
4418 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4421 struct mgmt_cp_get_device_flags *cp = data;
4422 struct mgmt_rp_get_device_flags rp;
4423 struct bdaddr_list_with_flags *br_params;
4424 struct hci_conn_params *params;
4425 u32 supported_flags;
4426 u32 current_flags = 0;
4427 u8 status = MGMT_STATUS_INVALID_PARAMS;
4429 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4430 &cp->addr.bdaddr, cp->addr.type);
4434 bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4435 __HCI_CONN_NUM_FLAGS);
4437 memset(&rp, 0, sizeof(rp));
4439 if (cp->addr.type == BDADDR_BREDR) {
4440 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4446 bitmap_to_arr32(¤t_flags, br_params->flags,
4447 __HCI_CONN_NUM_FLAGS);
4449 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4450 le_addr_type(cp->addr.type));
4455 bitmap_to_arr32(¤t_flags, params->flags,
4456 __HCI_CONN_NUM_FLAGS);
4459 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4460 rp.addr.type = cp->addr.type;
4461 rp.supported_flags = cpu_to_le32(supported_flags);
4462 rp.current_flags = cpu_to_le32(current_flags);
4464 status = MGMT_STATUS_SUCCESS;
4467 hci_dev_unlock(hdev);
4469 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4473 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4474 bdaddr_t *bdaddr, u8 bdaddr_type,
4475 u32 supported_flags, u32 current_flags)
4477 struct mgmt_ev_device_flags_changed ev;
4479 bacpy(&ev.addr.bdaddr, bdaddr);
4480 ev.addr.type = bdaddr_type;
4481 ev.supported_flags = cpu_to_le32(supported_flags);
4482 ev.current_flags = cpu_to_le32(current_flags);
4484 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4487 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4490 struct mgmt_cp_set_device_flags *cp = data;
4491 struct bdaddr_list_with_flags *br_params;
4492 struct hci_conn_params *params;
4493 u8 status = MGMT_STATUS_INVALID_PARAMS;
4494 u32 supported_flags;
4495 u32 current_flags = __le32_to_cpu(cp->current_flags);
4497 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4498 &cp->addr.bdaddr, cp->addr.type,
4499 __le32_to_cpu(current_flags));
4501 bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4502 __HCI_CONN_NUM_FLAGS);
4504 if ((supported_flags | current_flags) != supported_flags) {
4505 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4506 current_flags, supported_flags);
4512 if (cp->addr.type == BDADDR_BREDR) {
4513 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4518 bitmap_from_u64(br_params->flags, current_flags);
4519 status = MGMT_STATUS_SUCCESS;
4521 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4522 &cp->addr.bdaddr, cp->addr.type);
4525 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4526 le_addr_type(cp->addr.type));
4528 bitmap_from_u64(params->flags, current_flags);
4529 status = MGMT_STATUS_SUCCESS;
4531 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4534 if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4536 hci_update_passive_scan(hdev);
4538 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4540 le_addr_type(cp->addr.type));
4545 hci_dev_unlock(hdev);
4547 if (status == MGMT_STATUS_SUCCESS)
4548 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4549 supported_flags, current_flags);
4551 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4552 &cp->addr, sizeof(cp->addr));
4555 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4558 struct mgmt_ev_adv_monitor_added ev;
4560 ev.monitor_handle = cpu_to_le16(handle);
4562 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4565 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4567 struct mgmt_ev_adv_monitor_removed ev;
4568 struct mgmt_pending_cmd *cmd;
4569 struct sock *sk_skip = NULL;
4570 struct mgmt_cp_remove_adv_monitor *cp;
4572 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4576 if (cp->monitor_handle)
4580 ev.monitor_handle = cpu_to_le16(handle);
4582 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4585 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4586 void *data, u16 len)
4588 struct adv_monitor *monitor = NULL;
4589 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4592 __u32 supported = 0;
4594 __u16 num_handles = 0;
4595 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4597 BT_DBG("request for %s", hdev->name);
4601 if (msft_monitor_supported(hdev))
4602 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4604 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4605 handles[num_handles++] = monitor->handle;
4607 hci_dev_unlock(hdev);
4609 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4610 rp = kmalloc(rp_size, GFP_KERNEL);
4614 /* All supported features are currently enabled */
4615 enabled = supported;
4617 rp->supported_features = cpu_to_le32(supported);
4618 rp->enabled_features = cpu_to_le32(enabled);
4619 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4620 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4621 rp->num_handles = cpu_to_le16(num_handles);
4623 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4625 err = mgmt_cmd_complete(sk, hdev->id,
4626 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4627 MGMT_STATUS_SUCCESS, rp, rp_size);
4634 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4636 struct mgmt_rp_add_adv_patterns_monitor rp;
4637 struct mgmt_pending_cmd *cmd;
4638 struct adv_monitor *monitor;
4643 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4645 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4650 monitor = cmd->user_data;
4651 rp.monitor_handle = cpu_to_le16(monitor->handle);
4654 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4655 hdev->adv_monitors_cnt++;
4656 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4657 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4658 hci_update_passive_scan(hdev);
4661 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4662 mgmt_status(status), &rp, sizeof(rp));
4663 mgmt_pending_remove(cmd);
4666 hci_dev_unlock(hdev);
4667 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4668 rp.monitor_handle, status);
4673 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4674 struct adv_monitor *m, u8 status,
4675 void *data, u16 len, u16 op)
4677 struct mgmt_rp_add_adv_patterns_monitor rp;
4678 struct mgmt_pending_cmd *cmd;
4687 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4688 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4689 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4690 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4691 status = MGMT_STATUS_BUSY;
4695 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4697 status = MGMT_STATUS_NO_RESOURCES;
4702 pending = hci_add_adv_monitor(hdev, m, &err);
4704 if (err == -ENOSPC || err == -ENOMEM)
4705 status = MGMT_STATUS_NO_RESOURCES;
4706 else if (err == -EINVAL)
4707 status = MGMT_STATUS_INVALID_PARAMS;
4709 status = MGMT_STATUS_FAILED;
4711 mgmt_pending_remove(cmd);
4716 mgmt_pending_remove(cmd);
4717 rp.monitor_handle = cpu_to_le16(m->handle);
4718 mgmt_adv_monitor_added(sk, hdev, m->handle);
4719 m->state = ADV_MONITOR_STATE_REGISTERED;
4720 hdev->adv_monitors_cnt++;
4722 hci_dev_unlock(hdev);
4723 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4727 hci_dev_unlock(hdev);
4732 hci_free_adv_monitor(hdev, m);
4733 hci_dev_unlock(hdev);
4734 return mgmt_cmd_status(sk, hdev->id, op, status);
4737 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4738 struct mgmt_adv_rssi_thresholds *rssi)
4741 m->rssi.low_threshold = rssi->low_threshold;
4742 m->rssi.low_threshold_timeout =
4743 __le16_to_cpu(rssi->low_threshold_timeout);
4744 m->rssi.high_threshold = rssi->high_threshold;
4745 m->rssi.high_threshold_timeout =
4746 __le16_to_cpu(rssi->high_threshold_timeout);
4747 m->rssi.sampling_period = rssi->sampling_period;
4749 /* Default values. These numbers are the least constricting
4750 * parameters for MSFT API to work, so it behaves as if there
4751 * are no rssi parameter to consider. May need to be changed
4752 * if other API are to be supported.
4754 m->rssi.low_threshold = -127;
4755 m->rssi.low_threshold_timeout = 60;
4756 m->rssi.high_threshold = -127;
4757 m->rssi.high_threshold_timeout = 0;
4758 m->rssi.sampling_period = 0;
4762 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4763 struct mgmt_adv_pattern *patterns)
4765 u8 offset = 0, length = 0;
4766 struct adv_pattern *p = NULL;
4769 for (i = 0; i < pattern_count; i++) {
4770 offset = patterns[i].offset;
4771 length = patterns[i].length;
4772 if (offset >= HCI_MAX_AD_LENGTH ||
4773 length > HCI_MAX_AD_LENGTH ||
4774 (offset + length) > HCI_MAX_AD_LENGTH)
4775 return MGMT_STATUS_INVALID_PARAMS;
4777 p = kmalloc(sizeof(*p), GFP_KERNEL);
4779 return MGMT_STATUS_NO_RESOURCES;
4781 p->ad_type = patterns[i].ad_type;
4782 p->offset = patterns[i].offset;
4783 p->length = patterns[i].length;
4784 memcpy(p->value, patterns[i].value, p->length);
4786 INIT_LIST_HEAD(&p->list);
4787 list_add(&p->list, &m->patterns);
4790 return MGMT_STATUS_SUCCESS;
4793 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4794 void *data, u16 len)
4796 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4797 struct adv_monitor *m = NULL;
4798 u8 status = MGMT_STATUS_SUCCESS;
4799 size_t expected_size = sizeof(*cp);
4801 BT_DBG("request for %s", hdev->name);
4803 if (len <= sizeof(*cp)) {
4804 status = MGMT_STATUS_INVALID_PARAMS;
4808 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4809 if (len != expected_size) {
4810 status = MGMT_STATUS_INVALID_PARAMS;
4814 m = kzalloc(sizeof(*m), GFP_KERNEL);
4816 status = MGMT_STATUS_NO_RESOURCES;
4820 INIT_LIST_HEAD(&m->patterns);
4822 parse_adv_monitor_rssi(m, NULL);
4823 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4826 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4827 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4830 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4831 void *data, u16 len)
4833 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4834 struct adv_monitor *m = NULL;
4835 u8 status = MGMT_STATUS_SUCCESS;
4836 size_t expected_size = sizeof(*cp);
4838 BT_DBG("request for %s", hdev->name);
4840 if (len <= sizeof(*cp)) {
4841 status = MGMT_STATUS_INVALID_PARAMS;
4845 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4846 if (len != expected_size) {
4847 status = MGMT_STATUS_INVALID_PARAMS;
4851 m = kzalloc(sizeof(*m), GFP_KERNEL);
4853 status = MGMT_STATUS_NO_RESOURCES;
4857 INIT_LIST_HEAD(&m->patterns);
4859 parse_adv_monitor_rssi(m, &cp->rssi);
4860 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4863 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4864 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4867 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4869 struct mgmt_rp_remove_adv_monitor rp;
4870 struct mgmt_cp_remove_adv_monitor *cp;
4871 struct mgmt_pending_cmd *cmd;
4876 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4881 rp.monitor_handle = cp->monitor_handle;
4884 hci_update_passive_scan(hdev);
4886 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4887 mgmt_status(status), &rp, sizeof(rp));
4888 mgmt_pending_remove(cmd);
4891 hci_dev_unlock(hdev);
4892 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4893 rp.monitor_handle, status);
4898 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4899 void *data, u16 len)
4901 struct mgmt_cp_remove_adv_monitor *cp = data;
4902 struct mgmt_rp_remove_adv_monitor rp;
4903 struct mgmt_pending_cmd *cmd;
4904 u16 handle = __le16_to_cpu(cp->monitor_handle);
4908 BT_DBG("request for %s", hdev->name);
4909 rp.monitor_handle = cp->monitor_handle;
4913 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4914 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4915 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4916 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4917 status = MGMT_STATUS_BUSY;
4921 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4923 status = MGMT_STATUS_NO_RESOURCES;
4928 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4930 pending = hci_remove_all_adv_monitor(hdev, &err);
4933 mgmt_pending_remove(cmd);
4936 status = MGMT_STATUS_INVALID_INDEX;
4938 status = MGMT_STATUS_FAILED;
4943 /* monitor can be removed without forwarding request to controller */
4945 mgmt_pending_remove(cmd);
4946 hci_dev_unlock(hdev);
4948 return mgmt_cmd_complete(sk, hdev->id,
4949 MGMT_OP_REMOVE_ADV_MONITOR,
4950 MGMT_STATUS_SUCCESS,
4954 hci_dev_unlock(hdev);
4958 hci_dev_unlock(hdev);
4959 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4963 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4965 struct mgmt_rp_read_local_oob_data mgmt_rp;
4966 size_t rp_size = sizeof(mgmt_rp);
4967 struct mgmt_pending_cmd *cmd = data;
4968 struct sk_buff *skb = cmd->skb;
4969 u8 status = mgmt_status(err);
4973 status = MGMT_STATUS_FAILED;
4974 else if (IS_ERR(skb))
4975 status = mgmt_status(PTR_ERR(skb));
4977 status = mgmt_status(skb->data[0]);
4980 bt_dev_dbg(hdev, "status %d", status);
4983 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4987 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4989 if (!bredr_sc_enabled(hdev)) {
4990 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4992 if (skb->len < sizeof(*rp)) {
4993 mgmt_cmd_status(cmd->sk, hdev->id,
4994 MGMT_OP_READ_LOCAL_OOB_DATA,
4995 MGMT_STATUS_FAILED);
4999 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5000 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5002 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5004 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5006 if (skb->len < sizeof(*rp)) {
5007 mgmt_cmd_status(cmd->sk, hdev->id,
5008 MGMT_OP_READ_LOCAL_OOB_DATA,
5009 MGMT_STATUS_FAILED);
5013 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5014 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5016 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5017 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5020 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5021 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5024 if (skb && !IS_ERR(skb))
5027 mgmt_pending_free(cmd);
5030 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5032 struct mgmt_pending_cmd *cmd = data;
5034 if (bredr_sc_enabled(hdev))
5035 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5037 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5039 if (IS_ERR(cmd->skb))
5040 return PTR_ERR(cmd->skb);
5045 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5046 void *data, u16 data_len)
5048 struct mgmt_pending_cmd *cmd;
5051 bt_dev_dbg(hdev, "sock %p", sk);
5055 if (!hdev_is_powered(hdev)) {
5056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5057 MGMT_STATUS_NOT_POWERED);
5061 if (!lmp_ssp_capable(hdev)) {
5062 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5063 MGMT_STATUS_NOT_SUPPORTED);
5067 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5071 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5072 read_local_oob_data_complete);
5075 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5076 MGMT_STATUS_FAILED);
5079 mgmt_pending_free(cmd);
5083 hci_dev_unlock(hdev);
5087 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5088 void *data, u16 len)
5090 struct mgmt_addr_info *addr = data;
5093 bt_dev_dbg(hdev, "sock %p", sk);
5095 if (!bdaddr_type_is_valid(addr->type))
5096 return mgmt_cmd_complete(sk, hdev->id,
5097 MGMT_OP_ADD_REMOTE_OOB_DATA,
5098 MGMT_STATUS_INVALID_PARAMS,
5099 addr, sizeof(*addr));
5103 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5104 struct mgmt_cp_add_remote_oob_data *cp = data;
5107 if (cp->addr.type != BDADDR_BREDR) {
5108 err = mgmt_cmd_complete(sk, hdev->id,
5109 MGMT_OP_ADD_REMOTE_OOB_DATA,
5110 MGMT_STATUS_INVALID_PARAMS,
5111 &cp->addr, sizeof(cp->addr));
5115 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5116 cp->addr.type, cp->hash,
5117 cp->rand, NULL, NULL);
5119 status = MGMT_STATUS_FAILED;
5121 status = MGMT_STATUS_SUCCESS;
5123 err = mgmt_cmd_complete(sk, hdev->id,
5124 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5125 &cp->addr, sizeof(cp->addr));
5126 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5127 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5128 u8 *rand192, *hash192, *rand256, *hash256;
5131 if (bdaddr_type_is_le(cp->addr.type)) {
5132 /* Enforce zero-valued 192-bit parameters as
5133 * long as legacy SMP OOB isn't implemented.
5135 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5136 memcmp(cp->hash192, ZERO_KEY, 16)) {
5137 err = mgmt_cmd_complete(sk, hdev->id,
5138 MGMT_OP_ADD_REMOTE_OOB_DATA,
5139 MGMT_STATUS_INVALID_PARAMS,
5140 addr, sizeof(*addr));
5147 /* In case one of the P-192 values is set to zero,
5148 * then just disable OOB data for P-192.
5150 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5151 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5155 rand192 = cp->rand192;
5156 hash192 = cp->hash192;
5160 /* In case one of the P-256 values is set to zero, then just
5161 * disable OOB data for P-256.
5163 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5164 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5168 rand256 = cp->rand256;
5169 hash256 = cp->hash256;
5172 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5173 cp->addr.type, hash192, rand192,
5176 status = MGMT_STATUS_FAILED;
5178 status = MGMT_STATUS_SUCCESS;
5180 err = mgmt_cmd_complete(sk, hdev->id,
5181 MGMT_OP_ADD_REMOTE_OOB_DATA,
5182 status, &cp->addr, sizeof(cp->addr));
5184 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5186 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5187 MGMT_STATUS_INVALID_PARAMS);
5191 hci_dev_unlock(hdev);
5195 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5196 void *data, u16 len)
5198 struct mgmt_cp_remove_remote_oob_data *cp = data;
5202 bt_dev_dbg(hdev, "sock %p", sk);
5204 if (cp->addr.type != BDADDR_BREDR)
5205 return mgmt_cmd_complete(sk, hdev->id,
5206 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5207 MGMT_STATUS_INVALID_PARAMS,
5208 &cp->addr, sizeof(cp->addr));
5212 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5213 hci_remote_oob_data_clear(hdev);
5214 status = MGMT_STATUS_SUCCESS;
5218 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5220 status = MGMT_STATUS_INVALID_PARAMS;
5222 status = MGMT_STATUS_SUCCESS;
5225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5226 status, &cp->addr, sizeof(cp->addr));
5228 hci_dev_unlock(hdev);
5232 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5234 struct mgmt_pending_cmd *cmd;
5236 bt_dev_dbg(hdev, "status %u", status);
5240 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5242 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5245 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5248 cmd->cmd_complete(cmd, mgmt_status(status));
5249 mgmt_pending_remove(cmd);
5252 hci_dev_unlock(hdev);
5255 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5256 uint8_t *mgmt_status)
5259 case DISCOV_TYPE_LE:
5260 *mgmt_status = mgmt_le_support(hdev);
5264 case DISCOV_TYPE_INTERLEAVED:
5265 *mgmt_status = mgmt_le_support(hdev);
5269 case DISCOV_TYPE_BREDR:
5270 *mgmt_status = mgmt_bredr_support(hdev);
5275 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5282 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5284 struct mgmt_pending_cmd *cmd = data;
5286 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5287 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5288 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5291 bt_dev_dbg(hdev, "err %d", err);
5293 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5295 mgmt_pending_remove(cmd);
5297 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5301 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5303 return hci_start_discovery_sync(hdev);
5306 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5307 u16 op, void *data, u16 len)
5309 struct mgmt_cp_start_discovery *cp = data;
5310 struct mgmt_pending_cmd *cmd;
5314 bt_dev_dbg(hdev, "sock %p", sk);
5318 if (!hdev_is_powered(hdev)) {
5319 err = mgmt_cmd_complete(sk, hdev->id, op,
5320 MGMT_STATUS_NOT_POWERED,
5321 &cp->type, sizeof(cp->type));
5325 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5326 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5327 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5328 &cp->type, sizeof(cp->type));
5332 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5333 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5334 &cp->type, sizeof(cp->type));
5338 /* Can't start discovery when it is paused */
5339 if (hdev->discovery_paused) {
5340 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5341 &cp->type, sizeof(cp->type));
5345 /* Clear the discovery filter first to free any previously
5346 * allocated memory for the UUID list.
5348 hci_discovery_filter_clear(hdev);
5350 hdev->discovery.type = cp->type;
5351 hdev->discovery.report_invalid_rssi = false;
5352 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5353 hdev->discovery.limited = true;
5355 hdev->discovery.limited = false;
5357 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5363 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5364 start_discovery_complete);
5366 mgmt_pending_remove(cmd);
5370 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5373 hci_dev_unlock(hdev);
5377 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5378 void *data, u16 len)
5380 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5384 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5385 void *data, u16 len)
5387 return start_discovery_internal(sk, hdev,
5388 MGMT_OP_START_LIMITED_DISCOVERY,
5392 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5393 void *data, u16 len)
5395 struct mgmt_cp_start_service_discovery *cp = data;
5396 struct mgmt_pending_cmd *cmd;
5397 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5398 u16 uuid_count, expected_len;
5402 bt_dev_dbg(hdev, "sock %p", sk);
5406 if (!hdev_is_powered(hdev)) {
5407 err = mgmt_cmd_complete(sk, hdev->id,
5408 MGMT_OP_START_SERVICE_DISCOVERY,
5409 MGMT_STATUS_NOT_POWERED,
5410 &cp->type, sizeof(cp->type));
5414 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5415 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5416 err = mgmt_cmd_complete(sk, hdev->id,
5417 MGMT_OP_START_SERVICE_DISCOVERY,
5418 MGMT_STATUS_BUSY, &cp->type,
5423 if (hdev->discovery_paused) {
5424 err = mgmt_cmd_complete(sk, hdev->id,
5425 MGMT_OP_START_SERVICE_DISCOVERY,
5426 MGMT_STATUS_BUSY, &cp->type,
5431 uuid_count = __le16_to_cpu(cp->uuid_count);
5432 if (uuid_count > max_uuid_count) {
5433 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5435 err = mgmt_cmd_complete(sk, hdev->id,
5436 MGMT_OP_START_SERVICE_DISCOVERY,
5437 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5442 expected_len = sizeof(*cp) + uuid_count * 16;
5443 if (expected_len != len) {
5444 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5446 err = mgmt_cmd_complete(sk, hdev->id,
5447 MGMT_OP_START_SERVICE_DISCOVERY,
5448 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5453 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5454 err = mgmt_cmd_complete(sk, hdev->id,
5455 MGMT_OP_START_SERVICE_DISCOVERY,
5456 status, &cp->type, sizeof(cp->type));
5460 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5467 /* Clear the discovery filter first to free any previously
5468 * allocated memory for the UUID list.
5470 hci_discovery_filter_clear(hdev);
5472 hdev->discovery.result_filtering = true;
5473 hdev->discovery.type = cp->type;
5474 hdev->discovery.rssi = cp->rssi;
5475 hdev->discovery.uuid_count = uuid_count;
5477 if (uuid_count > 0) {
5478 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5480 if (!hdev->discovery.uuids) {
5481 err = mgmt_cmd_complete(sk, hdev->id,
5482 MGMT_OP_START_SERVICE_DISCOVERY,
5484 &cp->type, sizeof(cp->type));
5485 mgmt_pending_remove(cmd);
5490 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5491 start_discovery_complete);
5493 mgmt_pending_remove(cmd);
5497 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5500 hci_dev_unlock(hdev);
5504 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5506 struct mgmt_pending_cmd *cmd;
5508 bt_dev_dbg(hdev, "status %u", status);
5512 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5514 cmd->cmd_complete(cmd, mgmt_status(status));
5515 mgmt_pending_remove(cmd);
5518 hci_dev_unlock(hdev);
5521 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5523 struct mgmt_pending_cmd *cmd = data;
5525 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5528 bt_dev_dbg(hdev, "err %d", err);
5530 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5532 mgmt_pending_remove(cmd);
5535 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5538 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5540 return hci_stop_discovery_sync(hdev);
5543 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5546 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5547 struct mgmt_pending_cmd *cmd;
5550 bt_dev_dbg(hdev, "sock %p", sk);
5554 if (!hci_discovery_active(hdev)) {
5555 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5556 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5557 sizeof(mgmt_cp->type));
5561 if (hdev->discovery.type != mgmt_cp->type) {
5562 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5563 MGMT_STATUS_INVALID_PARAMS,
5564 &mgmt_cp->type, sizeof(mgmt_cp->type));
5568 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5574 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5575 stop_discovery_complete);
5577 mgmt_pending_remove(cmd);
5581 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5584 hci_dev_unlock(hdev);
5588 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5591 struct mgmt_cp_confirm_name *cp = data;
5592 struct inquiry_entry *e;
5595 bt_dev_dbg(hdev, "sock %p", sk);
5599 if (!hci_discovery_active(hdev)) {
5600 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5601 MGMT_STATUS_FAILED, &cp->addr,
5606 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5608 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5609 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5614 if (cp->name_known) {
5615 e->name_state = NAME_KNOWN;
5618 e->name_state = NAME_NEEDED;
5619 hci_inquiry_cache_update_resolve(hdev, e);
5622 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5623 &cp->addr, sizeof(cp->addr));
5626 hci_dev_unlock(hdev);
5630 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5633 struct mgmt_cp_block_device *cp = data;
5637 bt_dev_dbg(hdev, "sock %p", sk);
5639 if (!bdaddr_type_is_valid(cp->addr.type))
5640 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5641 MGMT_STATUS_INVALID_PARAMS,
5642 &cp->addr, sizeof(cp->addr));
5646 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5649 status = MGMT_STATUS_FAILED;
5653 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5655 status = MGMT_STATUS_SUCCESS;
5658 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5659 &cp->addr, sizeof(cp->addr));
5661 hci_dev_unlock(hdev);
5666 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5669 struct mgmt_cp_unblock_device *cp = data;
5673 bt_dev_dbg(hdev, "sock %p", sk);
5675 if (!bdaddr_type_is_valid(cp->addr.type))
5676 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5677 MGMT_STATUS_INVALID_PARAMS,
5678 &cp->addr, sizeof(cp->addr));
5682 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5685 status = MGMT_STATUS_INVALID_PARAMS;
5689 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5691 status = MGMT_STATUS_SUCCESS;
5694 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5695 &cp->addr, sizeof(cp->addr));
5697 hci_dev_unlock(hdev);
5702 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5704 return hci_update_eir_sync(hdev);
5707 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5710 struct mgmt_cp_set_device_id *cp = data;
5714 bt_dev_dbg(hdev, "sock %p", sk);
5716 source = __le16_to_cpu(cp->source);
5718 if (source > 0x0002)
5719 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5720 MGMT_STATUS_INVALID_PARAMS);
5724 hdev->devid_source = source;
5725 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5726 hdev->devid_product = __le16_to_cpu(cp->product);
5727 hdev->devid_version = __le16_to_cpu(cp->version);
5729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5732 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5734 hci_dev_unlock(hdev);
5739 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5742 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5744 bt_dev_dbg(hdev, "status %d", err);
5747 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5749 struct cmd_lookup match = { NULL, hdev };
5751 struct adv_info *adv_instance;
5752 u8 status = mgmt_status(err);
5755 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5756 cmd_status_rsp, &status);
5760 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5761 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5763 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5765 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5768 new_settings(hdev, match.sk);
5773 /* If "Set Advertising" was just disabled and instance advertising was
5774 * set up earlier, then re-enable multi-instance advertising.
5776 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5777 list_empty(&hdev->adv_instances))
5780 instance = hdev->cur_adv_instance;
5782 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5783 struct adv_info, list);
5787 instance = adv_instance->instance;
5790 err = hci_schedule_adv_instance_sync(hdev, instance, true);
5792 enable_advertising_instance(hdev, err);
5795 static int set_adv_sync(struct hci_dev *hdev, void *data)
5797 struct mgmt_pending_cmd *cmd = data;
5798 struct mgmt_mode *cp = cmd->param;
5801 if (cp->val == 0x02)
5802 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5804 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5806 cancel_adv_timeout(hdev);
5809 /* Switch to instance "0" for the Set Advertising setting.
5810 * We cannot use update_[adv|scan_rsp]_data() here as the
5811 * HCI_ADVERTISING flag is not yet set.
5813 hdev->cur_adv_instance = 0x00;
5815 if (ext_adv_capable(hdev)) {
5816 hci_start_ext_adv_sync(hdev, 0x00);
5818 hci_update_adv_data_sync(hdev, 0x00);
5819 hci_update_scan_rsp_data_sync(hdev, 0x00);
5820 hci_enable_advertising_sync(hdev);
5823 hci_disable_advertising_sync(hdev);
5829 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5832 struct mgmt_mode *cp = data;
5833 struct mgmt_pending_cmd *cmd;
5837 bt_dev_dbg(hdev, "sock %p", sk);
5839 status = mgmt_le_support(hdev);
5841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5844 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5846 MGMT_STATUS_INVALID_PARAMS);
5848 if (hdev->advertising_paused)
5849 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5856 /* The following conditions are ones which mean that we should
5857 * not do any HCI communication but directly send a mgmt
5858 * response to user space (after toggling the flag if
5861 if (!hdev_is_powered(hdev) ||
5862 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5863 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5864 hci_conn_num(hdev, LE_LINK) > 0 ||
5865 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5866 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5870 hdev->cur_adv_instance = 0x00;
5871 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5872 if (cp->val == 0x02)
5873 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5875 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5877 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5878 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5881 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5886 err = new_settings(hdev, sk);
5891 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5892 pending_find(MGMT_OP_SET_LE, hdev)) {
5893 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5898 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5902 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5903 set_advertising_complete);
5906 mgmt_pending_remove(cmd);
5909 hci_dev_unlock(hdev);
5913 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5914 void *data, u16 len)
5916 struct mgmt_cp_set_static_address *cp = data;
5919 bt_dev_dbg(hdev, "sock %p", sk);
5921 if (!lmp_le_capable(hdev))
5922 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5923 MGMT_STATUS_NOT_SUPPORTED);
5925 if (hdev_is_powered(hdev))
5926 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5927 MGMT_STATUS_REJECTED);
5929 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5930 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5931 return mgmt_cmd_status(sk, hdev->id,
5932 MGMT_OP_SET_STATIC_ADDRESS,
5933 MGMT_STATUS_INVALID_PARAMS);
5935 /* Two most significant bits shall be set */
5936 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5937 return mgmt_cmd_status(sk, hdev->id,
5938 MGMT_OP_SET_STATIC_ADDRESS,
5939 MGMT_STATUS_INVALID_PARAMS);
5944 bacpy(&hdev->static_addr, &cp->bdaddr);
5946 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5950 err = new_settings(hdev, sk);
5953 hci_dev_unlock(hdev);
5957 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5958 void *data, u16 len)
5960 struct mgmt_cp_set_scan_params *cp = data;
5961 __u16 interval, window;
5964 bt_dev_dbg(hdev, "sock %p", sk);
5966 if (!lmp_le_capable(hdev))
5967 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5968 MGMT_STATUS_NOT_SUPPORTED);
5970 interval = __le16_to_cpu(cp->interval);
5972 if (interval < 0x0004 || interval > 0x4000)
5973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5974 MGMT_STATUS_INVALID_PARAMS);
5976 window = __le16_to_cpu(cp->window);
5978 if (window < 0x0004 || window > 0x4000)
5979 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5980 MGMT_STATUS_INVALID_PARAMS);
5982 if (window > interval)
5983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5984 MGMT_STATUS_INVALID_PARAMS);
5988 hdev->le_scan_interval = interval;
5989 hdev->le_scan_window = window;
5991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5994 /* If background scan is running, restart it so new parameters are
5997 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5998 hdev->discovery.state == DISCOVERY_STOPPED)
5999 hci_update_passive_scan(hdev);
6001 hci_dev_unlock(hdev);
6006 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6008 struct mgmt_pending_cmd *cmd = data;
6010 bt_dev_dbg(hdev, "err %d", err);
6013 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6016 struct mgmt_mode *cp = cmd->param;
6019 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6021 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6023 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6024 new_settings(hdev, cmd->sk);
6027 mgmt_pending_free(cmd);
6030 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6032 struct mgmt_pending_cmd *cmd = data;
6033 struct mgmt_mode *cp = cmd->param;
6035 return hci_write_fast_connectable_sync(hdev, cp->val);
6038 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6039 void *data, u16 len)
6041 struct mgmt_mode *cp = data;
6042 struct mgmt_pending_cmd *cmd;
6045 bt_dev_dbg(hdev, "sock %p", sk);
6047 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6048 hdev->hci_ver < BLUETOOTH_VER_1_2)
6049 return mgmt_cmd_status(sk, hdev->id,
6050 MGMT_OP_SET_FAST_CONNECTABLE,
6051 MGMT_STATUS_NOT_SUPPORTED);
6053 if (cp->val != 0x00 && cp->val != 0x01)
6054 return mgmt_cmd_status(sk, hdev->id,
6055 MGMT_OP_SET_FAST_CONNECTABLE,
6056 MGMT_STATUS_INVALID_PARAMS);
6060 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6061 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6065 if (!hdev_is_powered(hdev)) {
6066 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6067 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6068 new_settings(hdev, sk);
6072 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6077 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6078 fast_connectable_complete);
6081 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6082 MGMT_STATUS_FAILED);
6085 mgmt_pending_free(cmd);
6089 hci_dev_unlock(hdev);
6094 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6096 struct mgmt_pending_cmd *cmd = data;
6098 bt_dev_dbg(hdev, "err %d", err);
6101 u8 mgmt_err = mgmt_status(err);
6103 /* We need to restore the flag if related HCI commands
6106 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6108 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6110 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6111 new_settings(hdev, cmd->sk);
6114 mgmt_pending_free(cmd);
6117 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6121 status = hci_write_fast_connectable_sync(hdev, false);
6124 status = hci_update_scan_sync(hdev);
6126 /* Since only the advertising data flags will change, there
6127 * is no need to update the scan response data.
6130 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6135 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6137 struct mgmt_mode *cp = data;
6138 struct mgmt_pending_cmd *cmd;
6141 bt_dev_dbg(hdev, "sock %p", sk);
6143 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6144 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6145 MGMT_STATUS_NOT_SUPPORTED);
6147 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6148 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6149 MGMT_STATUS_REJECTED);
6151 if (cp->val != 0x00 && cp->val != 0x01)
6152 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6153 MGMT_STATUS_INVALID_PARAMS);
6157 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6158 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6162 if (!hdev_is_powered(hdev)) {
6164 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6165 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6166 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6167 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6168 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6171 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6173 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6177 err = new_settings(hdev, sk);
6181 /* Reject disabling when powered on */
6183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6184 MGMT_STATUS_REJECTED);
6187 /* When configuring a dual-mode controller to operate
6188 * with LE only and using a static address, then switching
6189 * BR/EDR back on is not allowed.
6191 * Dual-mode controllers shall operate with the public
6192 * address as its identity address for BR/EDR and LE. So
6193 * reject the attempt to create an invalid configuration.
6195 * The same restrictions applies when secure connections
6196 * has been enabled. For BR/EDR this is a controller feature
6197 * while for LE it is a host stack feature. This means that
6198 * switching BR/EDR back on when secure connections has been
6199 * enabled is not a supported transaction.
6201 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6202 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6203 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6204 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6205 MGMT_STATUS_REJECTED);
6210 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6214 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6215 set_bredr_complete);
6218 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6219 MGMT_STATUS_FAILED);
6221 mgmt_pending_free(cmd);
6226 /* We need to flip the bit already here so that
6227 * hci_req_update_adv_data generates the correct flags.
6229 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6232 hci_dev_unlock(hdev);
6236 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6238 struct mgmt_pending_cmd *cmd = data;
6239 struct mgmt_mode *cp;
6241 bt_dev_dbg(hdev, "err %d", err);
6244 u8 mgmt_err = mgmt_status(err);
6246 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6254 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6255 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6258 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6259 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6262 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6263 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6267 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6268 new_settings(hdev, cmd->sk);
6271 mgmt_pending_free(cmd);
6274 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6276 struct mgmt_pending_cmd *cmd = data;
6277 struct mgmt_mode *cp = cmd->param;
6280 /* Force write of val */
6281 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6283 return hci_write_sc_support_sync(hdev, val);
6286 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6287 void *data, u16 len)
6289 struct mgmt_mode *cp = data;
6290 struct mgmt_pending_cmd *cmd;
6294 bt_dev_dbg(hdev, "sock %p", sk);
6296 if (!lmp_sc_capable(hdev) &&
6297 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6298 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6299 MGMT_STATUS_NOT_SUPPORTED);
6301 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6302 lmp_sc_capable(hdev) &&
6303 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6305 MGMT_STATUS_REJECTED);
6307 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6309 MGMT_STATUS_INVALID_PARAMS);
6313 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6314 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6318 changed = !hci_dev_test_and_set_flag(hdev,
6320 if (cp->val == 0x02)
6321 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6323 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6325 changed = hci_dev_test_and_clear_flag(hdev,
6327 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6330 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6335 err = new_settings(hdev, sk);
6342 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6343 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6344 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6348 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6352 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6353 set_secure_conn_complete);
6356 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6357 MGMT_STATUS_FAILED);
6359 mgmt_pending_free(cmd);
6363 hci_dev_unlock(hdev);
6367 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6368 void *data, u16 len)
6370 struct mgmt_mode *cp = data;
6371 bool changed, use_changed;
6374 bt_dev_dbg(hdev, "sock %p", sk);
6376 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6377 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6378 MGMT_STATUS_INVALID_PARAMS);
6383 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6385 changed = hci_dev_test_and_clear_flag(hdev,
6386 HCI_KEEP_DEBUG_KEYS);
6388 if (cp->val == 0x02)
6389 use_changed = !hci_dev_test_and_set_flag(hdev,
6390 HCI_USE_DEBUG_KEYS);
6392 use_changed = hci_dev_test_and_clear_flag(hdev,
6393 HCI_USE_DEBUG_KEYS);
6395 if (hdev_is_powered(hdev) && use_changed &&
6396 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6397 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6398 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6399 sizeof(mode), &mode);
6402 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6407 err = new_settings(hdev, sk);
6410 hci_dev_unlock(hdev);
6414 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6417 struct mgmt_cp_set_privacy *cp = cp_data;
6421 bt_dev_dbg(hdev, "sock %p", sk);
6423 if (!lmp_le_capable(hdev))
6424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6425 MGMT_STATUS_NOT_SUPPORTED);
6427 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6429 MGMT_STATUS_INVALID_PARAMS);
6431 if (hdev_is_powered(hdev))
6432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6433 MGMT_STATUS_REJECTED);
6437 /* If user space supports this command it is also expected to
6438 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6440 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6443 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6444 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6445 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6446 hci_adv_instances_set_rpa_expired(hdev, true);
6447 if (cp->privacy == 0x02)
6448 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6450 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6452 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6453 memset(hdev->irk, 0, sizeof(hdev->irk));
6454 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6455 hci_adv_instances_set_rpa_expired(hdev, false);
6456 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6459 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6464 err = new_settings(hdev, sk);
6467 hci_dev_unlock(hdev);
6471 static bool irk_is_valid(struct mgmt_irk_info *irk)
6473 switch (irk->addr.type) {
6474 case BDADDR_LE_PUBLIC:
6477 case BDADDR_LE_RANDOM:
6478 /* Two most significant bits shall be set */
6479 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6487 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6490 struct mgmt_cp_load_irks *cp = cp_data;
6491 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6492 sizeof(struct mgmt_irk_info));
6493 u16 irk_count, expected_len;
6496 bt_dev_dbg(hdev, "sock %p", sk);
6498 if (!lmp_le_capable(hdev))
6499 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6500 MGMT_STATUS_NOT_SUPPORTED);
6502 irk_count = __le16_to_cpu(cp->irk_count);
6503 if (irk_count > max_irk_count) {
6504 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6506 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6507 MGMT_STATUS_INVALID_PARAMS);
6510 expected_len = struct_size(cp, irks, irk_count);
6511 if (expected_len != len) {
6512 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6514 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6515 MGMT_STATUS_INVALID_PARAMS);
6518 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6520 for (i = 0; i < irk_count; i++) {
6521 struct mgmt_irk_info *key = &cp->irks[i];
6523 if (!irk_is_valid(key))
6524 return mgmt_cmd_status(sk, hdev->id,
6526 MGMT_STATUS_INVALID_PARAMS);
6531 hci_smp_irks_clear(hdev);
6533 for (i = 0; i < irk_count; i++) {
6534 struct mgmt_irk_info *irk = &cp->irks[i];
6536 if (hci_is_blocked_key(hdev,
6537 HCI_BLOCKED_KEY_TYPE_IRK,
6539 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6544 hci_add_irk(hdev, &irk->addr.bdaddr,
6545 le_addr_type(irk->addr.type), irk->val,
6549 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6553 hci_dev_unlock(hdev);
6558 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6560 if (key->initiator != 0x00 && key->initiator != 0x01)
6563 switch (key->addr.type) {
6564 case BDADDR_LE_PUBLIC:
6567 case BDADDR_LE_RANDOM:
6568 /* Two most significant bits shall be set */
6569 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6577 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6578 void *cp_data, u16 len)
6580 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6581 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6582 sizeof(struct mgmt_ltk_info));
6583 u16 key_count, expected_len;
6586 bt_dev_dbg(hdev, "sock %p", sk);
6588 if (!lmp_le_capable(hdev))
6589 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6590 MGMT_STATUS_NOT_SUPPORTED);
6592 key_count = __le16_to_cpu(cp->key_count);
6593 if (key_count > max_key_count) {
6594 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6596 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6597 MGMT_STATUS_INVALID_PARAMS);
6600 expected_len = struct_size(cp, keys, key_count);
6601 if (expected_len != len) {
6602 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6604 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6605 MGMT_STATUS_INVALID_PARAMS);
6608 bt_dev_dbg(hdev, "key_count %u", key_count);
6610 for (i = 0; i < key_count; i++) {
6611 struct mgmt_ltk_info *key = &cp->keys[i];
6613 if (!ltk_is_valid(key))
6614 return mgmt_cmd_status(sk, hdev->id,
6615 MGMT_OP_LOAD_LONG_TERM_KEYS,
6616 MGMT_STATUS_INVALID_PARAMS);
6621 hci_smp_ltks_clear(hdev);
6623 for (i = 0; i < key_count; i++) {
6624 struct mgmt_ltk_info *key = &cp->keys[i];
6625 u8 type, authenticated;
6627 if (hci_is_blocked_key(hdev,
6628 HCI_BLOCKED_KEY_TYPE_LTK,
6630 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6635 switch (key->type) {
6636 case MGMT_LTK_UNAUTHENTICATED:
6637 authenticated = 0x00;
6638 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6640 case MGMT_LTK_AUTHENTICATED:
6641 authenticated = 0x01;
6642 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6644 case MGMT_LTK_P256_UNAUTH:
6645 authenticated = 0x00;
6646 type = SMP_LTK_P256;
6648 case MGMT_LTK_P256_AUTH:
6649 authenticated = 0x01;
6650 type = SMP_LTK_P256;
6652 case MGMT_LTK_P256_DEBUG:
6653 authenticated = 0x00;
6654 type = SMP_LTK_P256_DEBUG;
6660 hci_add_ltk(hdev, &key->addr.bdaddr,
6661 le_addr_type(key->addr.type), type, authenticated,
6662 key->val, key->enc_size, key->ediv, key->rand);
6665 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6668 hci_dev_unlock(hdev);
6673 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6675 struct mgmt_pending_cmd *cmd = data;
6676 struct hci_conn *conn = cmd->user_data;
6677 struct mgmt_cp_get_conn_info *cp = cmd->param;
6678 struct mgmt_rp_get_conn_info rp;
6681 bt_dev_dbg(hdev, "err %d", err);
6683 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6685 status = mgmt_status(err);
6686 if (status == MGMT_STATUS_SUCCESS) {
6687 rp.rssi = conn->rssi;
6688 rp.tx_power = conn->tx_power;
6689 rp.max_tx_power = conn->max_tx_power;
6691 rp.rssi = HCI_RSSI_INVALID;
6692 rp.tx_power = HCI_TX_POWER_INVALID;
6693 rp.max_tx_power = HCI_TX_POWER_INVALID;
6696 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6700 hci_conn_drop(conn);
6704 mgmt_pending_free(cmd);
6707 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6709 struct mgmt_pending_cmd *cmd = data;
6710 struct mgmt_cp_get_conn_info *cp = cmd->param;
6711 struct hci_conn *conn;
6715 /* Make sure we are still connected */
6716 if (cp->addr.type == BDADDR_BREDR)
6717 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6720 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6722 if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6723 if (cmd->user_data) {
6724 hci_conn_drop(cmd->user_data);
6725 hci_conn_put(cmd->user_data);
6726 cmd->user_data = NULL;
6728 return MGMT_STATUS_NOT_CONNECTED;
6731 handle = cpu_to_le16(conn->handle);
6733 /* Refresh RSSI each time */
6734 err = hci_read_rssi_sync(hdev, handle);
6736 /* For LE links TX power does not change thus we don't need to
6737 * query for it once value is known.
6739 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6740 conn->tx_power == HCI_TX_POWER_INVALID))
6741 err = hci_read_tx_power_sync(hdev, handle, 0x00);
6743 /* Max TX power needs to be read only once per connection */
6744 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6745 err = hci_read_tx_power_sync(hdev, handle, 0x01);
6750 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6753 struct mgmt_cp_get_conn_info *cp = data;
6754 struct mgmt_rp_get_conn_info rp;
6755 struct hci_conn *conn;
6756 unsigned long conn_info_age;
6759 bt_dev_dbg(hdev, "sock %p", sk);
6761 memset(&rp, 0, sizeof(rp));
6762 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6763 rp.addr.type = cp->addr.type;
6765 if (!bdaddr_type_is_valid(cp->addr.type))
6766 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6767 MGMT_STATUS_INVALID_PARAMS,
6772 if (!hdev_is_powered(hdev)) {
6773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6774 MGMT_STATUS_NOT_POWERED, &rp,
6779 if (cp->addr.type == BDADDR_BREDR)
6780 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6783 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6785 if (!conn || conn->state != BT_CONNECTED) {
6786 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6787 MGMT_STATUS_NOT_CONNECTED, &rp,
6792 /* To avoid client trying to guess when to poll again for information we
6793 * calculate conn info age as random value between min/max set in hdev.
6795 conn_info_age = hdev->conn_info_min_age +
6796 prandom_u32_max(hdev->conn_info_max_age -
6797 hdev->conn_info_min_age);
6799 /* Query controller to refresh cached values if they are too old or were
6802 if (time_after(jiffies, conn->conn_info_timestamp +
6803 msecs_to_jiffies(conn_info_age)) ||
6804 !conn->conn_info_timestamp) {
6805 struct mgmt_pending_cmd *cmd;
6807 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6812 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6813 cmd, get_conn_info_complete);
6816 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6817 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6820 mgmt_pending_free(cmd);
6825 hci_conn_hold(conn);
6826 cmd->user_data = hci_conn_get(conn);
6828 conn->conn_info_timestamp = jiffies;
6830 /* Cache is valid, just reply with values cached in hci_conn */
6831 rp.rssi = conn->rssi;
6832 rp.tx_power = conn->tx_power;
6833 rp.max_tx_power = conn->max_tx_power;
6835 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6836 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6840 hci_dev_unlock(hdev);
6844 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6846 struct mgmt_pending_cmd *cmd = data;
6847 struct mgmt_cp_get_clock_info *cp = cmd->param;
6848 struct mgmt_rp_get_clock_info rp;
6849 struct hci_conn *conn = cmd->user_data;
6850 u8 status = mgmt_status(err);
6852 bt_dev_dbg(hdev, "err %d", err);
6854 memset(&rp, 0, sizeof(rp));
6855 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6856 rp.addr.type = cp->addr.type;
6861 rp.local_clock = cpu_to_le32(hdev->clock);
6864 rp.piconet_clock = cpu_to_le32(conn->clock);
6865 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6866 hci_conn_drop(conn);
6871 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6874 mgmt_pending_free(cmd);
6877 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6879 struct mgmt_pending_cmd *cmd = data;
6880 struct mgmt_cp_get_clock_info *cp = cmd->param;
6881 struct hci_cp_read_clock hci_cp;
6882 struct hci_conn *conn = cmd->user_data;
6885 memset(&hci_cp, 0, sizeof(hci_cp));
6886 err = hci_read_clock_sync(hdev, &hci_cp);
6889 /* Make sure connection still exists */
6890 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6893 if (conn && conn == cmd->user_data &&
6894 conn->state == BT_CONNECTED) {
6895 hci_cp.handle = cpu_to_le16(conn->handle);
6896 hci_cp.which = 0x01; /* Piconet clock */
6897 err = hci_read_clock_sync(hdev, &hci_cp);
6898 } else if (cmd->user_data) {
6899 hci_conn_drop(cmd->user_data);
6900 hci_conn_put(cmd->user_data);
6901 cmd->user_data = NULL;
6908 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6911 struct mgmt_cp_get_clock_info *cp = data;
6912 struct mgmt_rp_get_clock_info rp;
6913 struct mgmt_pending_cmd *cmd;
6914 struct hci_conn *conn;
6917 bt_dev_dbg(hdev, "sock %p", sk);
6919 memset(&rp, 0, sizeof(rp));
6920 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6921 rp.addr.type = cp->addr.type;
6923 if (cp->addr.type != BDADDR_BREDR)
6924 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6925 MGMT_STATUS_INVALID_PARAMS,
6930 if (!hdev_is_powered(hdev)) {
6931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6932 MGMT_STATUS_NOT_POWERED, &rp,
6937 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6938 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6940 if (!conn || conn->state != BT_CONNECTED) {
6941 err = mgmt_cmd_complete(sk, hdev->id,
6942 MGMT_OP_GET_CLOCK_INFO,
6943 MGMT_STATUS_NOT_CONNECTED,
6951 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6955 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6956 get_clock_info_complete);
6959 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6960 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6963 mgmt_pending_free(cmd);
6966 hci_conn_hold(conn);
6967 cmd->user_data = hci_conn_get(conn);
6972 hci_dev_unlock(hdev);
6976 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6978 struct hci_conn *conn;
6980 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6984 if (conn->dst_type != type)
6987 if (conn->state != BT_CONNECTED)
6993 /* This function requires the caller holds hdev->lock */
6994 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6995 u8 addr_type, u8 auto_connect)
6997 struct hci_conn_params *params;
6999 params = hci_conn_params_add(hdev, addr, addr_type);
7003 if (params->auto_connect == auto_connect)
7006 list_del_init(¶ms->action);
7008 switch (auto_connect) {
7009 case HCI_AUTO_CONN_DISABLED:
7010 case HCI_AUTO_CONN_LINK_LOSS:
7011 /* If auto connect is being disabled when we're trying to
7012 * connect to device, keep connecting.
7014 if (params->explicit_connect)
7015 list_add(¶ms->action, &hdev->pend_le_conns);
7017 case HCI_AUTO_CONN_REPORT:
7018 if (params->explicit_connect)
7019 list_add(¶ms->action, &hdev->pend_le_conns);
7021 list_add(¶ms->action, &hdev->pend_le_reports);
7023 case HCI_AUTO_CONN_DIRECT:
7024 case HCI_AUTO_CONN_ALWAYS:
7025 if (!is_connected(hdev, addr, addr_type))
7026 list_add(¶ms->action, &hdev->pend_le_conns);
7030 params->auto_connect = auto_connect;
7032 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7033 addr, addr_type, auto_connect);
7038 static void device_added(struct sock *sk, struct hci_dev *hdev,
7039 bdaddr_t *bdaddr, u8 type, u8 action)
7041 struct mgmt_ev_device_added ev;
7043 bacpy(&ev.addr.bdaddr, bdaddr);
7044 ev.addr.type = type;
7047 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7050 static int add_device_sync(struct hci_dev *hdev, void *data)
7052 return hci_update_passive_scan_sync(hdev);
7055 static int add_device(struct sock *sk, struct hci_dev *hdev,
7056 void *data, u16 len)
7058 struct mgmt_cp_add_device *cp = data;
7059 u8 auto_conn, addr_type;
7060 struct hci_conn_params *params;
7062 u32 current_flags = 0;
7063 u32 supported_flags;
7065 bt_dev_dbg(hdev, "sock %p", sk);
7067 if (!bdaddr_type_is_valid(cp->addr.type) ||
7068 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7069 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7070 MGMT_STATUS_INVALID_PARAMS,
7071 &cp->addr, sizeof(cp->addr));
7073 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7074 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7075 MGMT_STATUS_INVALID_PARAMS,
7076 &cp->addr, sizeof(cp->addr));
7080 if (cp->addr.type == BDADDR_BREDR) {
7081 /* Only incoming connections action is supported for now */
7082 if (cp->action != 0x01) {
7083 err = mgmt_cmd_complete(sk, hdev->id,
7085 MGMT_STATUS_INVALID_PARAMS,
7086 &cp->addr, sizeof(cp->addr));
7090 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7096 hci_req_update_scan(hdev);
7101 addr_type = le_addr_type(cp->addr.type);
7103 if (cp->action == 0x02)
7104 auto_conn = HCI_AUTO_CONN_ALWAYS;
7105 else if (cp->action == 0x01)
7106 auto_conn = HCI_AUTO_CONN_DIRECT;
7108 auto_conn = HCI_AUTO_CONN_REPORT;
7110 /* Kernel internally uses conn_params with resolvable private
7111 * address, but Add Device allows only identity addresses.
7112 * Make sure it is enforced before calling
7113 * hci_conn_params_lookup.
7115 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7116 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7117 MGMT_STATUS_INVALID_PARAMS,
7118 &cp->addr, sizeof(cp->addr));
7122 /* If the connection parameters don't exist for this device,
7123 * they will be created and configured with defaults.
7125 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7127 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7128 MGMT_STATUS_FAILED, &cp->addr,
7132 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7135 bitmap_to_arr32(¤t_flags, params->flags,
7136 __HCI_CONN_NUM_FLAGS);
7139 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7144 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7145 bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7146 __HCI_CONN_NUM_FLAGS);
7147 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7148 supported_flags, current_flags);
7150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7151 MGMT_STATUS_SUCCESS, &cp->addr,
7155 hci_dev_unlock(hdev);
7159 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7160 bdaddr_t *bdaddr, u8 type)
7162 struct mgmt_ev_device_removed ev;
7164 bacpy(&ev.addr.bdaddr, bdaddr);
7165 ev.addr.type = type;
7167 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7170 static int remove_device_sync(struct hci_dev *hdev, void *data)
7172 return hci_update_passive_scan_sync(hdev);
7175 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7176 void *data, u16 len)
7178 struct mgmt_cp_remove_device *cp = data;
7181 bt_dev_dbg(hdev, "sock %p", sk);
7185 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7186 struct hci_conn_params *params;
7189 if (!bdaddr_type_is_valid(cp->addr.type)) {
7190 err = mgmt_cmd_complete(sk, hdev->id,
7191 MGMT_OP_REMOVE_DEVICE,
7192 MGMT_STATUS_INVALID_PARAMS,
7193 &cp->addr, sizeof(cp->addr));
7197 if (cp->addr.type == BDADDR_BREDR) {
7198 err = hci_bdaddr_list_del(&hdev->accept_list,
7202 err = mgmt_cmd_complete(sk, hdev->id,
7203 MGMT_OP_REMOVE_DEVICE,
7204 MGMT_STATUS_INVALID_PARAMS,
7210 hci_req_update_scan(hdev);
7212 device_removed(sk, hdev, &cp->addr.bdaddr,
7217 addr_type = le_addr_type(cp->addr.type);
7219 /* Kernel internally uses conn_params with resolvable private
7220 * address, but Remove Device allows only identity addresses.
7221 * Make sure it is enforced before calling
7222 * hci_conn_params_lookup.
7224 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7225 err = mgmt_cmd_complete(sk, hdev->id,
7226 MGMT_OP_REMOVE_DEVICE,
7227 MGMT_STATUS_INVALID_PARAMS,
7228 &cp->addr, sizeof(cp->addr));
7232 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7235 err = mgmt_cmd_complete(sk, hdev->id,
7236 MGMT_OP_REMOVE_DEVICE,
7237 MGMT_STATUS_INVALID_PARAMS,
7238 &cp->addr, sizeof(cp->addr));
7242 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7243 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7244 err = mgmt_cmd_complete(sk, hdev->id,
7245 MGMT_OP_REMOVE_DEVICE,
7246 MGMT_STATUS_INVALID_PARAMS,
7247 &cp->addr, sizeof(cp->addr));
7251 list_del(¶ms->action);
7252 list_del(¶ms->list);
7255 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7257 struct hci_conn_params *p, *tmp;
7258 struct bdaddr_list *b, *btmp;
7260 if (cp->addr.type) {
7261 err = mgmt_cmd_complete(sk, hdev->id,
7262 MGMT_OP_REMOVE_DEVICE,
7263 MGMT_STATUS_INVALID_PARAMS,
7264 &cp->addr, sizeof(cp->addr));
7268 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7269 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7274 hci_req_update_scan(hdev);
7276 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7277 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7279 device_removed(sk, hdev, &p->addr, p->addr_type);
7280 if (p->explicit_connect) {
7281 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7284 list_del(&p->action);
7289 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7292 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7295 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7296 MGMT_STATUS_SUCCESS, &cp->addr,
7299 hci_dev_unlock(hdev);
7303 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7306 struct mgmt_cp_load_conn_param *cp = data;
7307 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7308 sizeof(struct mgmt_conn_param));
7309 u16 param_count, expected_len;
7312 if (!lmp_le_capable(hdev))
7313 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7314 MGMT_STATUS_NOT_SUPPORTED);
7316 param_count = __le16_to_cpu(cp->param_count);
7317 if (param_count > max_param_count) {
7318 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7321 MGMT_STATUS_INVALID_PARAMS);
7324 expected_len = struct_size(cp, params, param_count);
7325 if (expected_len != len) {
7326 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7328 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7329 MGMT_STATUS_INVALID_PARAMS);
7332 bt_dev_dbg(hdev, "param_count %u", param_count);
7336 hci_conn_params_clear_disabled(hdev);
7338 for (i = 0; i < param_count; i++) {
7339 struct mgmt_conn_param *param = &cp->params[i];
7340 struct hci_conn_params *hci_param;
7341 u16 min, max, latency, timeout;
7344 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7347 if (param->addr.type == BDADDR_LE_PUBLIC) {
7348 addr_type = ADDR_LE_DEV_PUBLIC;
7349 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7350 addr_type = ADDR_LE_DEV_RANDOM;
7352 bt_dev_err(hdev, "ignoring invalid connection parameters");
7356 min = le16_to_cpu(param->min_interval);
7357 max = le16_to_cpu(param->max_interval);
7358 latency = le16_to_cpu(param->latency);
7359 timeout = le16_to_cpu(param->timeout);
7361 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7362 min, max, latency, timeout);
7364 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7365 bt_dev_err(hdev, "ignoring invalid connection parameters");
7369 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7372 bt_dev_err(hdev, "failed to add connection parameters");
7376 hci_param->conn_min_interval = min;
7377 hci_param->conn_max_interval = max;
7378 hci_param->conn_latency = latency;
7379 hci_param->supervision_timeout = timeout;
7382 hci_dev_unlock(hdev);
7384 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7388 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7389 void *data, u16 len)
7391 struct mgmt_cp_set_external_config *cp = data;
7395 bt_dev_dbg(hdev, "sock %p", sk);
7397 if (hdev_is_powered(hdev))
7398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7399 MGMT_STATUS_REJECTED);
7401 if (cp->config != 0x00 && cp->config != 0x01)
7402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7403 MGMT_STATUS_INVALID_PARAMS);
7405 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7407 MGMT_STATUS_NOT_SUPPORTED);
7412 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7414 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7416 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7423 err = new_options(hdev, sk);
7425 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7426 mgmt_index_removed(hdev);
7428 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7429 hci_dev_set_flag(hdev, HCI_CONFIG);
7430 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7432 queue_work(hdev->req_workqueue, &hdev->power_on);
7434 set_bit(HCI_RAW, &hdev->flags);
7435 mgmt_index_added(hdev);
7440 hci_dev_unlock(hdev);
7444 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7445 void *data, u16 len)
7447 struct mgmt_cp_set_public_address *cp = data;
7451 bt_dev_dbg(hdev, "sock %p", sk);
7453 if (hdev_is_powered(hdev))
7454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7455 MGMT_STATUS_REJECTED);
7457 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7459 MGMT_STATUS_INVALID_PARAMS);
7461 if (!hdev->set_bdaddr)
7462 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7463 MGMT_STATUS_NOT_SUPPORTED);
7467 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7468 bacpy(&hdev->public_addr, &cp->bdaddr);
7470 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7477 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7478 err = new_options(hdev, sk);
7480 if (is_configured(hdev)) {
7481 mgmt_index_removed(hdev);
7483 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7485 hci_dev_set_flag(hdev, HCI_CONFIG);
7486 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7488 queue_work(hdev->req_workqueue, &hdev->power_on);
7492 hci_dev_unlock(hdev);
7496 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7499 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7500 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7501 u8 *h192, *r192, *h256, *r256;
7502 struct mgmt_pending_cmd *cmd = data;
7503 struct sk_buff *skb = cmd->skb;
7504 u8 status = mgmt_status(err);
7507 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7512 status = MGMT_STATUS_FAILED;
7513 else if (IS_ERR(skb))
7514 status = mgmt_status(PTR_ERR(skb));
7516 status = mgmt_status(skb->data[0]);
7519 bt_dev_dbg(hdev, "status %u", status);
7521 mgmt_cp = cmd->param;
7524 status = mgmt_status(status);
7531 } else if (!bredr_sc_enabled(hdev)) {
7532 struct hci_rp_read_local_oob_data *rp;
7534 if (skb->len != sizeof(*rp)) {
7535 status = MGMT_STATUS_FAILED;
7538 status = MGMT_STATUS_SUCCESS;
7539 rp = (void *)skb->data;
7541 eir_len = 5 + 18 + 18;
7548 struct hci_rp_read_local_oob_ext_data *rp;
7550 if (skb->len != sizeof(*rp)) {
7551 status = MGMT_STATUS_FAILED;
7554 status = MGMT_STATUS_SUCCESS;
7555 rp = (void *)skb->data;
7557 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7558 eir_len = 5 + 18 + 18;
7562 eir_len = 5 + 18 + 18 + 18 + 18;
7572 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7579 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7580 hdev->dev_class, 3);
7583 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7584 EIR_SSP_HASH_C192, h192, 16);
7585 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7586 EIR_SSP_RAND_R192, r192, 16);
7590 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7591 EIR_SSP_HASH_C256, h256, 16);
7592 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7593 EIR_SSP_RAND_R256, r256, 16);
7597 mgmt_rp->type = mgmt_cp->type;
7598 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7600 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7601 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7602 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7603 if (err < 0 || status)
7606 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7608 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7609 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7610 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7612 if (skb && !IS_ERR(skb))
7616 mgmt_pending_remove(cmd);
7619 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7620 struct mgmt_cp_read_local_oob_ext_data *cp)
7622 struct mgmt_pending_cmd *cmd;
7625 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7630 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7631 read_local_oob_ext_data_complete);
7634 mgmt_pending_remove(cmd);
7641 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7642 void *data, u16 data_len)
7644 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7645 struct mgmt_rp_read_local_oob_ext_data *rp;
7648 u8 status, flags, role, addr[7], hash[16], rand[16];
7651 bt_dev_dbg(hdev, "sock %p", sk);
7653 if (hdev_is_powered(hdev)) {
7655 case BIT(BDADDR_BREDR):
7656 status = mgmt_bredr_support(hdev);
7662 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7663 status = mgmt_le_support(hdev);
7667 eir_len = 9 + 3 + 18 + 18 + 3;
7670 status = MGMT_STATUS_INVALID_PARAMS;
7675 status = MGMT_STATUS_NOT_POWERED;
7679 rp_len = sizeof(*rp) + eir_len;
7680 rp = kmalloc(rp_len, GFP_ATOMIC);
7684 if (!status && !lmp_ssp_capable(hdev)) {
7685 status = MGMT_STATUS_NOT_SUPPORTED;
7696 case BIT(BDADDR_BREDR):
7697 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7698 err = read_local_ssp_oob_req(hdev, sk, cp);
7699 hci_dev_unlock(hdev);
7703 status = MGMT_STATUS_FAILED;
7706 eir_len = eir_append_data(rp->eir, eir_len,
7708 hdev->dev_class, 3);
7711 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7712 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7713 smp_generate_oob(hdev, hash, rand) < 0) {
7714 hci_dev_unlock(hdev);
7715 status = MGMT_STATUS_FAILED;
7719 /* This should return the active RPA, but since the RPA
7720 * is only programmed on demand, it is really hard to fill
7721 * this in at the moment. For now disallow retrieving
7722 * local out-of-band data when privacy is in use.
7724 * Returning the identity address will not help here since
7725 * pairing happens before the identity resolving key is
7726 * known and thus the connection establishment happens
7727 * based on the RPA and not the identity address.
7729 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7730 hci_dev_unlock(hdev);
7731 status = MGMT_STATUS_REJECTED;
7735 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7736 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7737 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7738 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7739 memcpy(addr, &hdev->static_addr, 6);
7742 memcpy(addr, &hdev->bdaddr, 6);
7746 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7747 addr, sizeof(addr));
7749 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7754 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7755 &role, sizeof(role));
7757 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7758 eir_len = eir_append_data(rp->eir, eir_len,
7760 hash, sizeof(hash));
7762 eir_len = eir_append_data(rp->eir, eir_len,
7764 rand, sizeof(rand));
7767 flags = mgmt_get_adv_discov_flags(hdev);
7769 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7770 flags |= LE_AD_NO_BREDR;
7772 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7773 &flags, sizeof(flags));
7777 hci_dev_unlock(hdev);
7779 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7781 status = MGMT_STATUS_SUCCESS;
7784 rp->type = cp->type;
7785 rp->eir_len = cpu_to_le16(eir_len);
7787 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7788 status, rp, sizeof(*rp) + eir_len);
7789 if (err < 0 || status)
7792 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7793 rp, sizeof(*rp) + eir_len,
7794 HCI_MGMT_OOB_DATA_EVENTS, sk);
7802 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7806 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7807 flags |= MGMT_ADV_FLAG_DISCOV;
7808 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7809 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7810 flags |= MGMT_ADV_FLAG_APPEARANCE;
7811 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7812 flags |= MGMT_ADV_PARAM_DURATION;
7813 flags |= MGMT_ADV_PARAM_TIMEOUT;
7814 flags |= MGMT_ADV_PARAM_INTERVALS;
7815 flags |= MGMT_ADV_PARAM_TX_POWER;
7816 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7818 /* In extended adv TX_POWER returned from Set Adv Param
7819 * will be always valid.
7821 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7822 ext_adv_capable(hdev))
7823 flags |= MGMT_ADV_FLAG_TX_POWER;
7825 if (ext_adv_capable(hdev)) {
7826 flags |= MGMT_ADV_FLAG_SEC_1M;
7827 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7828 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7830 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7831 flags |= MGMT_ADV_FLAG_SEC_2M;
7833 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7834 flags |= MGMT_ADV_FLAG_SEC_CODED;
7840 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7841 void *data, u16 data_len)
7843 struct mgmt_rp_read_adv_features *rp;
7846 struct adv_info *adv_instance;
7847 u32 supported_flags;
7850 bt_dev_dbg(hdev, "sock %p", sk);
7852 if (!lmp_le_capable(hdev))
7853 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7854 MGMT_STATUS_REJECTED);
7858 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7859 rp = kmalloc(rp_len, GFP_ATOMIC);
7861 hci_dev_unlock(hdev);
7865 supported_flags = get_supported_adv_flags(hdev);
7867 rp->supported_flags = cpu_to_le32(supported_flags);
7868 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7869 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7870 rp->max_instances = hdev->le_num_of_adv_sets;
7871 rp->num_instances = hdev->adv_instance_cnt;
7873 instance = rp->instance;
7874 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7875 *instance = adv_instance->instance;
7879 hci_dev_unlock(hdev);
7881 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7882 MGMT_STATUS_SUCCESS, rp, rp_len);
7889 static u8 calculate_name_len(struct hci_dev *hdev)
7891 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7893 return eir_append_local_name(hdev, buf, 0);
7896 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7899 u8 max_len = HCI_MAX_AD_LENGTH;
7902 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7903 MGMT_ADV_FLAG_LIMITED_DISCOV |
7904 MGMT_ADV_FLAG_MANAGED_FLAGS))
7907 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7910 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7911 max_len -= calculate_name_len(hdev);
7913 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7920 static bool flags_managed(u32 adv_flags)
7922 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7923 MGMT_ADV_FLAG_LIMITED_DISCOV |
7924 MGMT_ADV_FLAG_MANAGED_FLAGS);
7927 static bool tx_power_managed(u32 adv_flags)
7929 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7932 static bool name_managed(u32 adv_flags)
7934 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7937 static bool appearance_managed(u32 adv_flags)
7939 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7942 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7943 u8 len, bool is_adv_data)
7948 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7953 /* Make sure that the data is correctly formatted. */
7954 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7960 if (data[i + 1] == EIR_FLAGS &&
7961 (!is_adv_data || flags_managed(adv_flags)))
7964 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7967 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7970 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7973 if (data[i + 1] == EIR_APPEARANCE &&
7974 appearance_managed(adv_flags))
7977 /* If the current field length would exceed the total data
7978 * length, then it's invalid.
7980 if (i + cur_len >= len)
7987 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7989 u32 supported_flags, phy_flags;
7991 /* The current implementation only supports a subset of the specified
7992 * flags. Also need to check mutual exclusiveness of sec flags.
7994 supported_flags = get_supported_adv_flags(hdev);
7995 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7996 if (adv_flags & ~supported_flags ||
7997 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8003 static bool adv_busy(struct hci_dev *hdev)
8005 return pending_find(MGMT_OP_SET_LE, hdev);
8008 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8011 struct adv_info *adv, *n;
8013 bt_dev_dbg(hdev, "err %d", err);
8017 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8024 adv->pending = false;
8028 instance = adv->instance;
8030 if (hdev->cur_adv_instance == instance)
8031 cancel_adv_timeout(hdev);
8033 hci_remove_adv_instance(hdev, instance);
8034 mgmt_advertising_removed(sk, hdev, instance);
8037 hci_dev_unlock(hdev);
8040 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8042 struct mgmt_pending_cmd *cmd = data;
8043 struct mgmt_cp_add_advertising *cp = cmd->param;
8044 struct mgmt_rp_add_advertising rp;
8046 memset(&rp, 0, sizeof(rp));
8048 rp.instance = cp->instance;
8051 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8054 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8055 mgmt_status(err), &rp, sizeof(rp));
8057 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8059 mgmt_pending_free(cmd);
8062 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8064 struct mgmt_pending_cmd *cmd = data;
8065 struct mgmt_cp_add_advertising *cp = cmd->param;
8067 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8070 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8071 void *data, u16 data_len)
8073 struct mgmt_cp_add_advertising *cp = data;
8074 struct mgmt_rp_add_advertising rp;
8077 u16 timeout, duration;
8078 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8079 u8 schedule_instance = 0;
8080 struct adv_info *next_instance;
8082 struct mgmt_pending_cmd *cmd;
8084 bt_dev_dbg(hdev, "sock %p", sk);
8086 status = mgmt_le_support(hdev);
8088 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8091 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8092 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8093 MGMT_STATUS_INVALID_PARAMS);
8095 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8096 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8097 MGMT_STATUS_INVALID_PARAMS);
8099 flags = __le32_to_cpu(cp->flags);
8100 timeout = __le16_to_cpu(cp->timeout);
8101 duration = __le16_to_cpu(cp->duration);
8103 if (!requested_adv_flags_are_valid(hdev, flags))
8104 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8105 MGMT_STATUS_INVALID_PARAMS);
8109 if (timeout && !hdev_is_powered(hdev)) {
8110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8111 MGMT_STATUS_REJECTED);
8115 if (adv_busy(hdev)) {
8116 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8121 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8122 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8123 cp->scan_rsp_len, false)) {
8124 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8125 MGMT_STATUS_INVALID_PARAMS);
8129 err = hci_add_adv_instance(hdev, cp->instance, flags,
8130 cp->adv_data_len, cp->data,
8132 cp->data + cp->adv_data_len,
8134 HCI_ADV_TX_POWER_NO_PREFERENCE,
8135 hdev->le_adv_min_interval,
8136 hdev->le_adv_max_interval);
8138 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8139 MGMT_STATUS_FAILED);
8143 /* Only trigger an advertising added event if a new instance was
8146 if (hdev->adv_instance_cnt > prev_instance_cnt)
8147 mgmt_advertising_added(sk, hdev, cp->instance);
8149 if (hdev->cur_adv_instance == cp->instance) {
8150 /* If the currently advertised instance is being changed then
8151 * cancel the current advertising and schedule the next
8152 * instance. If there is only one instance then the overridden
8153 * advertising data will be visible right away.
8155 cancel_adv_timeout(hdev);
8157 next_instance = hci_get_next_instance(hdev, cp->instance);
8159 schedule_instance = next_instance->instance;
8160 } else if (!hdev->adv_instance_timeout) {
8161 /* Immediately advertise the new instance if no other
8162 * instance is currently being advertised.
8164 schedule_instance = cp->instance;
8167 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8168 * there is no instance to be advertised then we have no HCI
8169 * communication to make. Simply return.
8171 if (!hdev_is_powered(hdev) ||
8172 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8173 !schedule_instance) {
8174 rp.instance = cp->instance;
8175 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8176 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8180 /* We're good to go, update advertising data, parameters, and start
8183 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8190 cp->instance = schedule_instance;
8192 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8193 add_advertising_complete);
8195 mgmt_pending_free(cmd);
8198 hci_dev_unlock(hdev);
8203 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8206 struct mgmt_pending_cmd *cmd = data;
8207 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8208 struct mgmt_rp_add_ext_adv_params rp;
8209 struct adv_info *adv;
8212 BT_DBG("%s", hdev->name);
8216 adv = hci_find_adv_instance(hdev, cp->instance);
8220 rp.instance = cp->instance;
8221 rp.tx_power = adv->tx_power;
8223 /* While we're at it, inform userspace of the available space for this
8224 * advertisement, given the flags that will be used.
8226 flags = __le32_to_cpu(cp->flags);
8227 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8228 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8231 /* If this advertisement was previously advertising and we
8232 * failed to update it, we signal that it has been removed and
8233 * delete its structure
8236 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8238 hci_remove_adv_instance(hdev, cp->instance);
8240 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8243 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8244 mgmt_status(err), &rp, sizeof(rp));
8249 mgmt_pending_free(cmd);
8251 hci_dev_unlock(hdev);
8254 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8256 struct mgmt_pending_cmd *cmd = data;
8257 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8259 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8262 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8263 void *data, u16 data_len)
8265 struct mgmt_cp_add_ext_adv_params *cp = data;
8266 struct mgmt_rp_add_ext_adv_params rp;
8267 struct mgmt_pending_cmd *cmd = NULL;
8268 u32 flags, min_interval, max_interval;
8269 u16 timeout, duration;
8274 BT_DBG("%s", hdev->name);
8276 status = mgmt_le_support(hdev);
8278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8281 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8283 MGMT_STATUS_INVALID_PARAMS);
8285 /* The purpose of breaking add_advertising into two separate MGMT calls
8286 * for params and data is to allow more parameters to be added to this
8287 * structure in the future. For this reason, we verify that we have the
8288 * bare minimum structure we know of when the interface was defined. Any
8289 * extra parameters we don't know about will be ignored in this request.
8291 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8292 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8293 MGMT_STATUS_INVALID_PARAMS);
8295 flags = __le32_to_cpu(cp->flags);
8297 if (!requested_adv_flags_are_valid(hdev, flags))
8298 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8299 MGMT_STATUS_INVALID_PARAMS);
8303 /* In new interface, we require that we are powered to register */
8304 if (!hdev_is_powered(hdev)) {
8305 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8306 MGMT_STATUS_REJECTED);
8310 if (adv_busy(hdev)) {
8311 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8316 /* Parse defined parameters from request, use defaults otherwise */
8317 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8318 __le16_to_cpu(cp->timeout) : 0;
8320 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8321 __le16_to_cpu(cp->duration) :
8322 hdev->def_multi_adv_rotation_duration;
8324 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8325 __le32_to_cpu(cp->min_interval) :
8326 hdev->le_adv_min_interval;
8328 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8329 __le32_to_cpu(cp->max_interval) :
8330 hdev->le_adv_max_interval;
8332 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8334 HCI_ADV_TX_POWER_NO_PREFERENCE;
8336 /* Create advertising instance with no advertising or response data */
8337 err = hci_add_adv_instance(hdev, cp->instance, flags,
8338 0, NULL, 0, NULL, timeout, duration,
8339 tx_power, min_interval, max_interval);
8342 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8343 MGMT_STATUS_FAILED);
8347 /* Submit request for advertising params if ext adv available */
8348 if (ext_adv_capable(hdev)) {
8349 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8353 hci_remove_adv_instance(hdev, cp->instance);
8357 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8358 add_ext_adv_params_complete);
8360 mgmt_pending_free(cmd);
8362 rp.instance = cp->instance;
8363 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8364 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8365 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8366 err = mgmt_cmd_complete(sk, hdev->id,
8367 MGMT_OP_ADD_EXT_ADV_PARAMS,
8368 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8372 hci_dev_unlock(hdev);
8377 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8379 struct mgmt_pending_cmd *cmd = data;
8380 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8381 struct mgmt_rp_add_advertising rp;
8383 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8385 memset(&rp, 0, sizeof(rp));
8387 rp.instance = cp->instance;
8390 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8393 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8394 mgmt_status(err), &rp, sizeof(rp));
8396 mgmt_pending_free(cmd);
8399 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8401 struct mgmt_pending_cmd *cmd = data;
8402 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8405 if (ext_adv_capable(hdev)) {
8406 err = hci_update_adv_data_sync(hdev, cp->instance);
8410 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8414 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8417 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8420 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8423 struct mgmt_cp_add_ext_adv_data *cp = data;
8424 struct mgmt_rp_add_ext_adv_data rp;
8425 u8 schedule_instance = 0;
8426 struct adv_info *next_instance;
8427 struct adv_info *adv_instance;
8429 struct mgmt_pending_cmd *cmd;
8431 BT_DBG("%s", hdev->name);
8435 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8437 if (!adv_instance) {
8438 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8439 MGMT_STATUS_INVALID_PARAMS);
8443 /* In new interface, we require that we are powered to register */
8444 if (!hdev_is_powered(hdev)) {
8445 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8446 MGMT_STATUS_REJECTED);
8447 goto clear_new_instance;
8450 if (adv_busy(hdev)) {
8451 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8453 goto clear_new_instance;
8456 /* Validate new data */
8457 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8458 cp->adv_data_len, true) ||
8459 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8460 cp->adv_data_len, cp->scan_rsp_len, false)) {
8461 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8462 MGMT_STATUS_INVALID_PARAMS);
8463 goto clear_new_instance;
8466 /* Set the data in the advertising instance */
8467 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8468 cp->data, cp->scan_rsp_len,
8469 cp->data + cp->adv_data_len);
8471 /* If using software rotation, determine next instance to use */
8472 if (hdev->cur_adv_instance == cp->instance) {
8473 /* If the currently advertised instance is being changed
8474 * then cancel the current advertising and schedule the
8475 * next instance. If there is only one instance then the
8476 * overridden advertising data will be visible right
8479 cancel_adv_timeout(hdev);
8481 next_instance = hci_get_next_instance(hdev, cp->instance);
8483 schedule_instance = next_instance->instance;
8484 } else if (!hdev->adv_instance_timeout) {
8485 /* Immediately advertise the new instance if no other
8486 * instance is currently being advertised.
8488 schedule_instance = cp->instance;
8491 /* If the HCI_ADVERTISING flag is set or there is no instance to
8492 * be advertised then we have no HCI communication to make.
8495 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8496 if (adv_instance->pending) {
8497 mgmt_advertising_added(sk, hdev, cp->instance);
8498 adv_instance->pending = false;
8500 rp.instance = cp->instance;
8501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8502 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8506 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8510 goto clear_new_instance;
8513 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8514 add_ext_adv_data_complete);
8516 mgmt_pending_free(cmd);
8517 goto clear_new_instance;
8520 /* We were successful in updating data, so trigger advertising_added
8521 * event if this is an instance that wasn't previously advertising. If
8522 * a failure occurs in the requests we initiated, we will remove the
8523 * instance again in add_advertising_complete
8525 if (adv_instance->pending)
8526 mgmt_advertising_added(sk, hdev, cp->instance);
8531 hci_remove_adv_instance(hdev, cp->instance);
8534 hci_dev_unlock(hdev);
8539 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8542 struct mgmt_pending_cmd *cmd = data;
8543 struct mgmt_cp_remove_advertising *cp = cmd->param;
8544 struct mgmt_rp_remove_advertising rp;
8546 bt_dev_dbg(hdev, "err %d", err);
8548 memset(&rp, 0, sizeof(rp));
8549 rp.instance = cp->instance;
8552 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8555 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8556 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8558 mgmt_pending_free(cmd);
8561 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8563 struct mgmt_pending_cmd *cmd = data;
8564 struct mgmt_cp_remove_advertising *cp = cmd->param;
8567 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8571 if (list_empty(&hdev->adv_instances))
8572 err = hci_disable_advertising_sync(hdev);
8577 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8578 void *data, u16 data_len)
8580 struct mgmt_cp_remove_advertising *cp = data;
8581 struct mgmt_pending_cmd *cmd;
8584 bt_dev_dbg(hdev, "sock %p", sk);
8588 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8589 err = mgmt_cmd_status(sk, hdev->id,
8590 MGMT_OP_REMOVE_ADVERTISING,
8591 MGMT_STATUS_INVALID_PARAMS);
8595 if (pending_find(MGMT_OP_SET_LE, hdev)) {
8596 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8601 if (list_empty(&hdev->adv_instances)) {
8602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8603 MGMT_STATUS_INVALID_PARAMS);
8607 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8614 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8615 remove_advertising_complete);
8617 mgmt_pending_free(cmd);
8620 hci_dev_unlock(hdev);
8625 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8626 void *data, u16 data_len)
8628 struct mgmt_cp_get_adv_size_info *cp = data;
8629 struct mgmt_rp_get_adv_size_info rp;
8630 u32 flags, supported_flags;
8633 bt_dev_dbg(hdev, "sock %p", sk);
8635 if (!lmp_le_capable(hdev))
8636 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8637 MGMT_STATUS_REJECTED);
8639 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8641 MGMT_STATUS_INVALID_PARAMS);
8643 flags = __le32_to_cpu(cp->flags);
8645 /* The current implementation only supports a subset of the specified
8648 supported_flags = get_supported_adv_flags(hdev);
8649 if (flags & ~supported_flags)
8650 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8651 MGMT_STATUS_INVALID_PARAMS);
8653 rp.instance = cp->instance;
8654 rp.flags = cp->flags;
8655 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8656 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8658 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8659 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8664 static const struct hci_mgmt_handler mgmt_handlers[] = {
8665 { NULL }, /* 0x0000 (no command) */
8666 { read_version, MGMT_READ_VERSION_SIZE,
8668 HCI_MGMT_UNTRUSTED },
8669 { read_commands, MGMT_READ_COMMANDS_SIZE,
8671 HCI_MGMT_UNTRUSTED },
8672 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8674 HCI_MGMT_UNTRUSTED },
8675 { read_controller_info, MGMT_READ_INFO_SIZE,
8676 HCI_MGMT_UNTRUSTED },
8677 { set_powered, MGMT_SETTING_SIZE },
8678 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8679 { set_connectable, MGMT_SETTING_SIZE },
8680 { set_fast_connectable, MGMT_SETTING_SIZE },
8681 { set_bondable, MGMT_SETTING_SIZE },
8682 { set_link_security, MGMT_SETTING_SIZE },
8683 { set_ssp, MGMT_SETTING_SIZE },
8684 { set_hs, MGMT_SETTING_SIZE },
8685 { set_le, MGMT_SETTING_SIZE },
8686 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8687 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8688 { add_uuid, MGMT_ADD_UUID_SIZE },
8689 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8690 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8692 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8694 { disconnect, MGMT_DISCONNECT_SIZE },
8695 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8696 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8697 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8698 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8699 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8700 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8701 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8702 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8703 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8704 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8705 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8706 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8707 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8709 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8710 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8711 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8712 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8713 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8714 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8715 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8716 { set_advertising, MGMT_SETTING_SIZE },
8717 { set_bredr, MGMT_SETTING_SIZE },
8718 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8719 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8720 { set_secure_conn, MGMT_SETTING_SIZE },
8721 { set_debug_keys, MGMT_SETTING_SIZE },
8722 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8723 { load_irks, MGMT_LOAD_IRKS_SIZE,
8725 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8726 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8727 { add_device, MGMT_ADD_DEVICE_SIZE },
8728 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8729 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8731 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8733 HCI_MGMT_UNTRUSTED },
8734 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8735 HCI_MGMT_UNCONFIGURED |
8736 HCI_MGMT_UNTRUSTED },
8737 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8738 HCI_MGMT_UNCONFIGURED },
8739 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8740 HCI_MGMT_UNCONFIGURED },
8741 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8743 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8744 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8746 HCI_MGMT_UNTRUSTED },
8747 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8748 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8750 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8751 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8752 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8753 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8754 HCI_MGMT_UNTRUSTED },
8755 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8756 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8757 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8758 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8760 { set_wideband_speech, MGMT_SETTING_SIZE },
8761 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8762 HCI_MGMT_UNTRUSTED },
8763 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8764 HCI_MGMT_UNTRUSTED |
8765 HCI_MGMT_HDEV_OPTIONAL },
8766 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8768 HCI_MGMT_HDEV_OPTIONAL },
8769 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8770 HCI_MGMT_UNTRUSTED },
8771 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8773 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8774 HCI_MGMT_UNTRUSTED },
8775 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8777 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8778 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8779 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8780 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8782 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8783 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8785 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8787 { add_adv_patterns_monitor_rssi,
8788 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8792 void mgmt_index_added(struct hci_dev *hdev)
8794 struct mgmt_ev_ext_index ev;
8796 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8799 switch (hdev->dev_type) {
8801 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8802 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8803 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8806 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8807 HCI_MGMT_INDEX_EVENTS);
8820 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8821 HCI_MGMT_EXT_INDEX_EVENTS);
8824 void mgmt_index_removed(struct hci_dev *hdev)
8826 struct mgmt_ev_ext_index ev;
8827 u8 status = MGMT_STATUS_INVALID_INDEX;
8829 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8832 switch (hdev->dev_type) {
8834 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8836 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8837 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8838 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8841 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8842 HCI_MGMT_INDEX_EVENTS);
8855 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8856 HCI_MGMT_EXT_INDEX_EVENTS);
8859 void mgmt_power_on(struct hci_dev *hdev, int err)
8861 struct cmd_lookup match = { NULL, hdev };
8863 bt_dev_dbg(hdev, "err %d", err);
8868 restart_le_actions(hdev);
8869 hci_update_passive_scan(hdev);
8872 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8874 new_settings(hdev, match.sk);
8879 hci_dev_unlock(hdev);
8882 void __mgmt_power_off(struct hci_dev *hdev)
8884 struct cmd_lookup match = { NULL, hdev };
8885 u8 status, zero_cod[] = { 0, 0, 0 };
8887 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8889 /* If the power off is because of hdev unregistration let
8890 * use the appropriate INVALID_INDEX status. Otherwise use
8891 * NOT_POWERED. We cover both scenarios here since later in
8892 * mgmt_index_removed() any hci_conn callbacks will have already
8893 * been triggered, potentially causing misleading DISCONNECTED
8896 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8897 status = MGMT_STATUS_INVALID_INDEX;
8899 status = MGMT_STATUS_NOT_POWERED;
8901 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8903 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8904 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8905 zero_cod, sizeof(zero_cod),
8906 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8907 ext_info_changed(hdev, NULL);
8910 new_settings(hdev, match.sk);
8916 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8918 struct mgmt_pending_cmd *cmd;
8921 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8925 if (err == -ERFKILL)
8926 status = MGMT_STATUS_RFKILLED;
8928 status = MGMT_STATUS_FAILED;
8930 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8932 mgmt_pending_remove(cmd);
8935 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8938 struct mgmt_ev_new_link_key ev;
8940 memset(&ev, 0, sizeof(ev));
8942 ev.store_hint = persistent;
8943 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8944 ev.key.addr.type = BDADDR_BREDR;
8945 ev.key.type = key->type;
8946 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8947 ev.key.pin_len = key->pin_len;
8949 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8952 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8954 switch (ltk->type) {
8956 case SMP_LTK_RESPONDER:
8957 if (ltk->authenticated)
8958 return MGMT_LTK_AUTHENTICATED;
8959 return MGMT_LTK_UNAUTHENTICATED;
8961 if (ltk->authenticated)
8962 return MGMT_LTK_P256_AUTH;
8963 return MGMT_LTK_P256_UNAUTH;
8964 case SMP_LTK_P256_DEBUG:
8965 return MGMT_LTK_P256_DEBUG;
8968 return MGMT_LTK_UNAUTHENTICATED;
8971 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8973 struct mgmt_ev_new_long_term_key ev;
8975 memset(&ev, 0, sizeof(ev));
8977 /* Devices using resolvable or non-resolvable random addresses
8978 * without providing an identity resolving key don't require
8979 * to store long term keys. Their addresses will change the
8982 * Only when a remote device provides an identity address
8983 * make sure the long term key is stored. If the remote
8984 * identity is known, the long term keys are internally
8985 * mapped to the identity address. So allow static random
8986 * and public addresses here.
8988 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8989 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8990 ev.store_hint = 0x00;
8992 ev.store_hint = persistent;
8994 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8995 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8996 ev.key.type = mgmt_ltk_type(key);
8997 ev.key.enc_size = key->enc_size;
8998 ev.key.ediv = key->ediv;
8999 ev.key.rand = key->rand;
9001 if (key->type == SMP_LTK)
9002 ev.key.initiator = 1;
9004 /* Make sure we copy only the significant bytes based on the
9005 * encryption key size, and set the rest of the value to zeroes.
9007 memcpy(ev.key.val, key->val, key->enc_size);
9008 memset(ev.key.val + key->enc_size, 0,
9009 sizeof(ev.key.val) - key->enc_size);
9011 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9014 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9016 struct mgmt_ev_new_irk ev;
9018 memset(&ev, 0, sizeof(ev));
9020 ev.store_hint = persistent;
9022 bacpy(&ev.rpa, &irk->rpa);
9023 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9024 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9025 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9027 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9030 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9033 struct mgmt_ev_new_csrk ev;
9035 memset(&ev, 0, sizeof(ev));
9037 /* Devices using resolvable or non-resolvable random addresses
9038 * without providing an identity resolving key don't require
9039 * to store signature resolving keys. Their addresses will change
9040 * the next time around.
9042 * Only when a remote device provides an identity address
9043 * make sure the signature resolving key is stored. So allow
9044 * static random and public addresses here.
9046 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9047 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9048 ev.store_hint = 0x00;
9050 ev.store_hint = persistent;
9052 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9053 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9054 ev.key.type = csrk->type;
9055 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9057 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9060 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9061 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9062 u16 max_interval, u16 latency, u16 timeout)
9064 struct mgmt_ev_new_conn_param ev;
9066 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9069 memset(&ev, 0, sizeof(ev));
9070 bacpy(&ev.addr.bdaddr, bdaddr);
9071 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9072 ev.store_hint = store_hint;
9073 ev.min_interval = cpu_to_le16(min_interval);
9074 ev.max_interval = cpu_to_le16(max_interval);
9075 ev.latency = cpu_to_le16(latency);
9076 ev.timeout = cpu_to_le16(timeout);
9078 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9081 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9082 u8 *name, u8 name_len)
9084 struct sk_buff *skb;
9085 struct mgmt_ev_device_connected *ev;
9089 if (conn->le_adv_data_len > 0)
9090 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9091 conn->le_adv_data_len);
9093 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9096 ev = skb_put(skb, sizeof(*ev));
9097 bacpy(&ev->addr.bdaddr, &conn->dst);
9098 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9101 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9103 ev->flags = __cpu_to_le32(flags);
9105 /* We must ensure that the EIR Data fields are ordered and
9106 * unique. Keep it simple for now and avoid the problem by not
9107 * adding any BR/EDR data to the LE adv.
9109 if (conn->le_adv_data_len > 0) {
9110 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9111 eir_len = conn->le_adv_data_len;
9114 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9116 skb_put(skb, eir_len);
9119 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
9120 eir_len = eir_append_data(ev->eir, eir_len,
9122 conn->dev_class, 3);
9127 ev->eir_len = cpu_to_le16(eir_len);
9129 mgmt_event_skb(skb, NULL);
9132 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9134 struct sock **sk = data;
9136 cmd->cmd_complete(cmd, 0);
9141 mgmt_pending_remove(cmd);
9144 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9146 struct hci_dev *hdev = data;
9147 struct mgmt_cp_unpair_device *cp = cmd->param;
9149 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9151 cmd->cmd_complete(cmd, 0);
9152 mgmt_pending_remove(cmd);
9155 bool mgmt_powering_down(struct hci_dev *hdev)
9157 struct mgmt_pending_cmd *cmd;
9158 struct mgmt_mode *cp;
9160 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9171 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9172 u8 link_type, u8 addr_type, u8 reason,
9173 bool mgmt_connected)
9175 struct mgmt_ev_device_disconnected ev;
9176 struct sock *sk = NULL;
9178 /* The connection is still in hci_conn_hash so test for 1
9179 * instead of 0 to know if this is the last one.
9181 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9182 cancel_delayed_work(&hdev->power_off);
9183 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9186 if (!mgmt_connected)
9189 if (link_type != ACL_LINK && link_type != LE_LINK)
9192 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9194 bacpy(&ev.addr.bdaddr, bdaddr);
9195 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9198 /* Report disconnects due to suspend */
9199 if (hdev->suspended)
9200 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9202 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9207 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9211 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9212 u8 link_type, u8 addr_type, u8 status)
9214 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9215 struct mgmt_cp_disconnect *cp;
9216 struct mgmt_pending_cmd *cmd;
9218 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9221 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9227 if (bacmp(bdaddr, &cp->addr.bdaddr))
9230 if (cp->addr.type != bdaddr_type)
9233 cmd->cmd_complete(cmd, mgmt_status(status));
9234 mgmt_pending_remove(cmd);
9237 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9238 u8 addr_type, u8 status)
9240 struct mgmt_ev_connect_failed ev;
9242 /* The connection is still in hci_conn_hash so test for 1
9243 * instead of 0 to know if this is the last one.
9245 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9246 cancel_delayed_work(&hdev->power_off);
9247 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9250 bacpy(&ev.addr.bdaddr, bdaddr);
9251 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9252 ev.status = mgmt_status(status);
9254 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9257 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9259 struct mgmt_ev_pin_code_request ev;
9261 bacpy(&ev.addr.bdaddr, bdaddr);
9262 ev.addr.type = BDADDR_BREDR;
9265 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9268 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9271 struct mgmt_pending_cmd *cmd;
9273 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9277 cmd->cmd_complete(cmd, mgmt_status(status));
9278 mgmt_pending_remove(cmd);
9281 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9284 struct mgmt_pending_cmd *cmd;
9286 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9290 cmd->cmd_complete(cmd, mgmt_status(status));
9291 mgmt_pending_remove(cmd);
9294 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9295 u8 link_type, u8 addr_type, u32 value,
9298 struct mgmt_ev_user_confirm_request ev;
9300 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9302 bacpy(&ev.addr.bdaddr, bdaddr);
9303 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9304 ev.confirm_hint = confirm_hint;
9305 ev.value = cpu_to_le32(value);
9307 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9311 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9312 u8 link_type, u8 addr_type)
9314 struct mgmt_ev_user_passkey_request ev;
9316 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9318 bacpy(&ev.addr.bdaddr, bdaddr);
9319 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9321 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9325 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9326 u8 link_type, u8 addr_type, u8 status,
9329 struct mgmt_pending_cmd *cmd;
9331 cmd = pending_find(opcode, hdev);
9335 cmd->cmd_complete(cmd, mgmt_status(status));
9336 mgmt_pending_remove(cmd);
9341 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9342 u8 link_type, u8 addr_type, u8 status)
9344 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9345 status, MGMT_OP_USER_CONFIRM_REPLY);
9348 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9349 u8 link_type, u8 addr_type, u8 status)
9351 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9353 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9356 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9357 u8 link_type, u8 addr_type, u8 status)
9359 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9360 status, MGMT_OP_USER_PASSKEY_REPLY);
9363 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9364 u8 link_type, u8 addr_type, u8 status)
9366 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9368 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9371 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9372 u8 link_type, u8 addr_type, u32 passkey,
9375 struct mgmt_ev_passkey_notify ev;
9377 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9379 bacpy(&ev.addr.bdaddr, bdaddr);
9380 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9381 ev.passkey = __cpu_to_le32(passkey);
9382 ev.entered = entered;
9384 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9387 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9389 struct mgmt_ev_auth_failed ev;
9390 struct mgmt_pending_cmd *cmd;
9391 u8 status = mgmt_status(hci_status);
9393 bacpy(&ev.addr.bdaddr, &conn->dst);
9394 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9397 cmd = find_pairing(conn);
9399 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9400 cmd ? cmd->sk : NULL);
9403 cmd->cmd_complete(cmd, status);
9404 mgmt_pending_remove(cmd);
9408 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9410 struct cmd_lookup match = { NULL, hdev };
9414 u8 mgmt_err = mgmt_status(status);
9415 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9416 cmd_status_rsp, &mgmt_err);
9420 if (test_bit(HCI_AUTH, &hdev->flags))
9421 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9423 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9425 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9429 new_settings(hdev, match.sk);
9435 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9437 struct cmd_lookup *match = data;
9439 if (match->sk == NULL) {
9440 match->sk = cmd->sk;
9441 sock_hold(match->sk);
9445 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9448 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9450 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9451 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9452 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9455 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9456 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9457 ext_info_changed(hdev, NULL);
9464 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9466 struct mgmt_cp_set_local_name ev;
9467 struct mgmt_pending_cmd *cmd;
9472 memset(&ev, 0, sizeof(ev));
9473 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9474 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9476 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9478 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9480 /* If this is a HCI command related to powering on the
9481 * HCI dev don't send any mgmt signals.
9483 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9487 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9488 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9489 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9492 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9496 for (i = 0; i < uuid_count; i++) {
9497 if (!memcmp(uuid, uuids[i], 16))
9504 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9508 while (parsed < eir_len) {
9509 u8 field_len = eir[0];
9516 if (eir_len - parsed < field_len + 1)
9520 case EIR_UUID16_ALL:
9521 case EIR_UUID16_SOME:
9522 for (i = 0; i + 3 <= field_len; i += 2) {
9523 memcpy(uuid, bluetooth_base_uuid, 16);
9524 uuid[13] = eir[i + 3];
9525 uuid[12] = eir[i + 2];
9526 if (has_uuid(uuid, uuid_count, uuids))
9530 case EIR_UUID32_ALL:
9531 case EIR_UUID32_SOME:
9532 for (i = 0; i + 5 <= field_len; i += 4) {
9533 memcpy(uuid, bluetooth_base_uuid, 16);
9534 uuid[15] = eir[i + 5];
9535 uuid[14] = eir[i + 4];
9536 uuid[13] = eir[i + 3];
9537 uuid[12] = eir[i + 2];
9538 if (has_uuid(uuid, uuid_count, uuids))
9542 case EIR_UUID128_ALL:
9543 case EIR_UUID128_SOME:
9544 for (i = 0; i + 17 <= field_len; i += 16) {
9545 memcpy(uuid, eir + i + 2, 16);
9546 if (has_uuid(uuid, uuid_count, uuids))
9552 parsed += field_len + 1;
9553 eir += field_len + 1;
9559 static void restart_le_scan(struct hci_dev *hdev)
9561 /* If controller is not scanning we are done. */
9562 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9565 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9566 hdev->discovery.scan_start +
9567 hdev->discovery.scan_duration))
9570 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9571 DISCOV_LE_RESTART_DELAY);
9574 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9575 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9577 /* If a RSSI threshold has been specified, and
9578 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9579 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9580 * is set, let it through for further processing, as we might need to
9583 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9584 * the results are also dropped.
9586 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9587 (rssi == HCI_RSSI_INVALID ||
9588 (rssi < hdev->discovery.rssi &&
9589 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9592 if (hdev->discovery.uuid_count != 0) {
9593 /* If a list of UUIDs is provided in filter, results with no
9594 * matching UUID should be dropped.
9596 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9597 hdev->discovery.uuids) &&
9598 !eir_has_uuids(scan_rsp, scan_rsp_len,
9599 hdev->discovery.uuid_count,
9600 hdev->discovery.uuids))
9604 /* If duplicate filtering does not report RSSI changes, then restart
9605 * scanning to ensure updated result with updated RSSI values.
9607 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9608 restart_le_scan(hdev);
9610 /* Validate RSSI value against the RSSI threshold once more. */
9611 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9612 rssi < hdev->discovery.rssi)
9619 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9620 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9621 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9623 struct sk_buff *skb;
9624 struct mgmt_ev_device_found *ev;
9626 /* Don't send events for a non-kernel initiated discovery. With
9627 * LE one exception is if we have pend_le_reports > 0 in which
9628 * case we're doing passive scanning and want these events.
9630 if (!hci_discovery_active(hdev)) {
9631 if (link_type == ACL_LINK)
9633 if (link_type == LE_LINK &&
9634 list_empty(&hdev->pend_le_reports) &&
9635 !hci_is_adv_monitoring(hdev)) {
9640 if (hdev->discovery.result_filtering) {
9641 /* We are using service discovery */
9642 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9647 if (hdev->discovery.limited) {
9648 /* Check for limited discoverable bit */
9650 if (!(dev_class[1] & 0x20))
9653 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9654 if (!flags || !(flags[0] & LE_AD_LIMITED))
9659 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
9660 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9661 sizeof(*ev) + eir_len + scan_rsp_len + 5);
9665 ev = skb_put(skb, sizeof(*ev));
9667 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9668 * RSSI value was reported as 0 when not available. This behavior
9669 * is kept when using device discovery. This is required for full
9670 * backwards compatibility with the API.
9672 * However when using service discovery, the value 127 will be
9673 * returned when the RSSI is not available.
9675 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9676 link_type == ACL_LINK)
9679 bacpy(&ev->addr.bdaddr, bdaddr);
9680 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9682 ev->flags = cpu_to_le32(flags);
9685 /* Copy EIR or advertising data into event */
9686 skb_put_data(skb, eir, eir_len);
9688 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9691 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9693 skb_put_data(skb, eir_cod, sizeof(eir_cod));
9696 if (scan_rsp_len > 0)
9697 /* Append scan response data to event */
9698 skb_put_data(skb, scan_rsp, scan_rsp_len);
9700 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9702 mgmt_event_skb(skb, NULL);
9705 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9706 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9708 struct sk_buff *skb;
9709 struct mgmt_ev_device_found *ev;
9714 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
9716 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
9718 ev = skb_put(skb, sizeof(*ev));
9719 bacpy(&ev->addr.bdaddr, bdaddr);
9720 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9724 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9727 skb_put(skb, eir_len);
9730 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9733 ev->eir_len = cpu_to_le16(eir_len);
9734 ev->flags = cpu_to_le32(flags);
9736 mgmt_event_skb(skb, NULL);
9739 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9741 struct mgmt_ev_discovering ev;
9743 bt_dev_dbg(hdev, "discovering %u", discovering);
9745 memset(&ev, 0, sizeof(ev));
9746 ev.type = hdev->discovery.type;
9747 ev.discovering = discovering;
9749 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9752 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9754 struct mgmt_ev_controller_suspend ev;
9756 ev.suspend_state = state;
9757 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9760 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9763 struct mgmt_ev_controller_resume ev;
9765 ev.wake_reason = reason;
9767 bacpy(&ev.addr.bdaddr, bdaddr);
9768 ev.addr.type = addr_type;
9770 memset(&ev.addr, 0, sizeof(ev.addr));
9773 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9776 static struct hci_mgmt_chan chan = {
9777 .channel = HCI_CHANNEL_CONTROL,
9778 .handler_count = ARRAY_SIZE(mgmt_handlers),
9779 .handlers = mgmt_handlers,
9780 .hdev_init = mgmt_init_hdev,
9785 return hci_mgmt_chan_register(&chan);
9788 void mgmt_exit(void)
9790 hci_mgmt_chan_unregister(&chan);