2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
53 MGMT_OP_SET_LINK_SECURITY,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
127 static const u16 mgmt_events[] = {
128 MGMT_EV_CONTROLLER_ERROR,
130 MGMT_EV_INDEX_REMOVED,
131 MGMT_EV_NEW_SETTINGS,
132 MGMT_EV_CLASS_OF_DEV_CHANGED,
133 MGMT_EV_LOCAL_NAME_CHANGED,
134 MGMT_EV_NEW_LINK_KEY,
135 MGMT_EV_NEW_LONG_TERM_KEY,
136 MGMT_EV_DEVICE_CONNECTED,
137 MGMT_EV_DEVICE_DISCONNECTED,
138 MGMT_EV_CONNECT_FAILED,
139 MGMT_EV_PIN_CODE_REQUEST,
140 MGMT_EV_USER_CONFIRM_REQUEST,
141 MGMT_EV_USER_PASSKEY_REQUEST,
143 MGMT_EV_DEVICE_FOUND,
145 MGMT_EV_DEVICE_BLOCKED,
146 MGMT_EV_DEVICE_UNBLOCKED,
147 MGMT_EV_DEVICE_UNPAIRED,
148 MGMT_EV_PASSKEY_NOTIFY,
151 MGMT_EV_DEVICE_ADDED,
152 MGMT_EV_DEVICE_REMOVED,
153 MGMT_EV_NEW_CONN_PARAM,
154 MGMT_EV_UNCONF_INDEX_ADDED,
155 MGMT_EV_UNCONF_INDEX_REMOVED,
156 MGMT_EV_NEW_CONFIG_OPTIONS,
157 MGMT_EV_EXT_INDEX_ADDED,
158 MGMT_EV_EXT_INDEX_REMOVED,
159 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 MGMT_EV_ADVERTISING_ADDED,
161 MGMT_EV_ADVERTISING_REMOVED,
162 MGMT_EV_EXT_INFO_CHANGED,
163 MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 MGMT_EV_EXP_FEATURE_CHANGED,
165 MGMT_EV_DEVICE_FLAGS_CHANGED,
166 MGMT_EV_CONTROLLER_SUSPEND,
167 MGMT_EV_CONTROLLER_RESUME,
170 static const u16 mgmt_untrusted_commands[] = {
171 MGMT_OP_READ_INDEX_LIST,
173 MGMT_OP_READ_UNCONF_INDEX_LIST,
174 MGMT_OP_READ_CONFIG_INFO,
175 MGMT_OP_READ_EXT_INDEX_LIST,
176 MGMT_OP_READ_EXT_INFO,
177 MGMT_OP_READ_SECURITY_INFO,
178 MGMT_OP_READ_EXP_FEATURES_INFO,
179 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
180 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
183 static const u16 mgmt_untrusted_events[] = {
185 MGMT_EV_INDEX_REMOVED,
186 MGMT_EV_NEW_SETTINGS,
187 MGMT_EV_CLASS_OF_DEV_CHANGED,
188 MGMT_EV_LOCAL_NAME_CHANGED,
189 MGMT_EV_UNCONF_INDEX_ADDED,
190 MGMT_EV_UNCONF_INDEX_REMOVED,
191 MGMT_EV_NEW_CONFIG_OPTIONS,
192 MGMT_EV_EXT_INDEX_ADDED,
193 MGMT_EV_EXT_INDEX_REMOVED,
194 MGMT_EV_EXT_INFO_CHANGED,
195 MGMT_EV_EXP_FEATURE_CHANGED,
196 MGMT_EV_ADV_MONITOR_ADDED,
197 MGMT_EV_ADV_MONITOR_REMOVED,
200 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
202 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
203 "\x00\x00\x00\x00\x00\x00\x00\x00"
205 /* HCI to MGMT error code conversion table */
206 static const u8 mgmt_status_table[] = {
208 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
209 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
210 MGMT_STATUS_FAILED, /* Hardware Failure */
211 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
212 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
213 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
214 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
215 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
216 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
217 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
218 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
219 MGMT_STATUS_BUSY, /* Command Disallowed */
220 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
221 MGMT_STATUS_REJECTED, /* Rejected Security */
222 MGMT_STATUS_REJECTED, /* Rejected Personal */
223 MGMT_STATUS_TIMEOUT, /* Host Timeout */
224 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
225 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
226 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
227 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
228 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
229 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
230 MGMT_STATUS_BUSY, /* Repeated Attempts */
231 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
232 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
234 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
235 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
236 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
237 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
238 MGMT_STATUS_FAILED, /* Unspecified Error */
239 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
240 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
241 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
242 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
243 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
244 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
245 MGMT_STATUS_FAILED, /* Unit Link Key Used */
246 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
247 MGMT_STATUS_TIMEOUT, /* Instant Passed */
248 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
249 MGMT_STATUS_FAILED, /* Transaction Collision */
250 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
251 MGMT_STATUS_REJECTED, /* QoS Rejected */
252 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
253 MGMT_STATUS_REJECTED, /* Insufficient Security */
254 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
255 MGMT_STATUS_BUSY, /* Role Switch Pending */
256 MGMT_STATUS_FAILED, /* Slot Violation */
257 MGMT_STATUS_FAILED, /* Role Switch Failed */
258 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
259 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
260 MGMT_STATUS_BUSY, /* Host Busy Pairing */
261 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
262 MGMT_STATUS_BUSY, /* Controller Busy */
263 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
264 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
265 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
266 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
267 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
270 static u8 mgmt_status(u8 hci_status)
272 if (hci_status < ARRAY_SIZE(mgmt_status_table))
273 return mgmt_status_table[hci_status];
275 return MGMT_STATUS_FAILED;
278 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
281 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
285 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
286 u16 len, int flag, struct sock *skip_sk)
288 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
293 struct sock *skip_sk)
295 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
296 HCI_SOCK_TRUSTED, skip_sk);
299 static u8 le_addr_type(u8 mgmt_addr_type)
301 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
302 return ADDR_LE_DEV_PUBLIC;
304 return ADDR_LE_DEV_RANDOM;
307 void mgmt_fill_version_info(void *ver)
309 struct mgmt_rp_read_version *rp = ver;
311 rp->version = MGMT_VERSION;
312 rp->revision = cpu_to_le16(MGMT_REVISION);
315 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
318 struct mgmt_rp_read_version rp;
320 bt_dev_dbg(hdev, "sock %p", sk);
322 mgmt_fill_version_info(&rp);
324 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
328 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
331 struct mgmt_rp_read_commands *rp;
332 u16 num_commands, num_events;
336 bt_dev_dbg(hdev, "sock %p", sk);
338 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
339 num_commands = ARRAY_SIZE(mgmt_commands);
340 num_events = ARRAY_SIZE(mgmt_events);
342 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
343 num_events = ARRAY_SIZE(mgmt_untrusted_events);
346 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
348 rp = kmalloc(rp_size, GFP_KERNEL);
352 rp->num_commands = cpu_to_le16(num_commands);
353 rp->num_events = cpu_to_le16(num_events);
355 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
356 __le16 *opcode = rp->opcodes;
358 for (i = 0; i < num_commands; i++, opcode++)
359 put_unaligned_le16(mgmt_commands[i], opcode);
361 for (i = 0; i < num_events; i++, opcode++)
362 put_unaligned_le16(mgmt_events[i], opcode);
364 __le16 *opcode = rp->opcodes;
366 for (i = 0; i < num_commands; i++, opcode++)
367 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
369 for (i = 0; i < num_events; i++, opcode++)
370 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
373 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
380 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
383 struct mgmt_rp_read_index_list *rp;
389 bt_dev_dbg(hdev, "sock %p", sk);
391 read_lock(&hci_dev_list_lock);
394 list_for_each_entry(d, &hci_dev_list, list) {
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
400 rp_len = sizeof(*rp) + (2 * count);
401 rp = kmalloc(rp_len, GFP_ATOMIC);
403 read_unlock(&hci_dev_list_lock);
408 list_for_each_entry(d, &hci_dev_list, list) {
409 if (hci_dev_test_flag(d, HCI_SETUP) ||
410 hci_dev_test_flag(d, HCI_CONFIG) ||
411 hci_dev_test_flag(d, HCI_USER_CHANNEL))
414 /* Devices marked as raw-only are neither configured
415 * nor unconfigured controllers.
417 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
420 if (d->dev_type == HCI_PRIMARY &&
421 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
422 rp->index[count++] = cpu_to_le16(d->id);
423 bt_dev_dbg(hdev, "Added hci%u", d->id);
427 rp->num_controllers = cpu_to_le16(count);
428 rp_len = sizeof(*rp) + (2 * count);
430 read_unlock(&hci_dev_list_lock);
432 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
440 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
441 void *data, u16 data_len)
443 struct mgmt_rp_read_unconf_index_list *rp;
449 bt_dev_dbg(hdev, "sock %p", sk);
451 read_lock(&hci_dev_list_lock);
454 list_for_each_entry(d, &hci_dev_list, list) {
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED))
460 rp_len = sizeof(*rp) + (2 * count);
461 rp = kmalloc(rp_len, GFP_ATOMIC);
463 read_unlock(&hci_dev_list_lock);
468 list_for_each_entry(d, &hci_dev_list, list) {
469 if (hci_dev_test_flag(d, HCI_SETUP) ||
470 hci_dev_test_flag(d, HCI_CONFIG) ||
471 hci_dev_test_flag(d, HCI_USER_CHANNEL))
474 /* Devices marked as raw-only are neither configured
475 * nor unconfigured controllers.
477 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
480 if (d->dev_type == HCI_PRIMARY &&
481 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
482 rp->index[count++] = cpu_to_le16(d->id);
483 bt_dev_dbg(hdev, "Added hci%u", d->id);
487 rp->num_controllers = cpu_to_le16(count);
488 rp_len = sizeof(*rp) + (2 * count);
490 read_unlock(&hci_dev_list_lock);
492 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
493 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
500 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
501 void *data, u16 data_len)
503 struct mgmt_rp_read_ext_index_list *rp;
508 bt_dev_dbg(hdev, "sock %p", sk);
510 read_lock(&hci_dev_list_lock);
513 list_for_each_entry(d, &hci_dev_list, list) {
514 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
518 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
520 read_unlock(&hci_dev_list_lock);
525 list_for_each_entry(d, &hci_dev_list, list) {
526 if (hci_dev_test_flag(d, HCI_SETUP) ||
527 hci_dev_test_flag(d, HCI_CONFIG) ||
528 hci_dev_test_flag(d, HCI_USER_CHANNEL))
531 /* Devices marked as raw-only are neither configured
532 * nor unconfigured controllers.
534 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
537 if (d->dev_type == HCI_PRIMARY) {
538 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
539 rp->entry[count].type = 0x01;
541 rp->entry[count].type = 0x00;
542 } else if (d->dev_type == HCI_AMP) {
543 rp->entry[count].type = 0x02;
548 rp->entry[count].bus = d->bus;
549 rp->entry[count++].index = cpu_to_le16(d->id);
550 bt_dev_dbg(hdev, "Added hci%u", d->id);
553 rp->num_controllers = cpu_to_le16(count);
555 read_unlock(&hci_dev_list_lock);
557 /* If this command is called at least once, then all the
558 * default index and unconfigured index events are disabled
559 * and from now on only extended index events are used.
561 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
562 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
563 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
565 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
566 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
567 struct_size(rp, entry, count));
574 static bool is_configured(struct hci_dev *hdev)
576 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
577 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
580 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
581 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
582 !bacmp(&hdev->public_addr, BDADDR_ANY))
588 static __le32 get_missing_options(struct hci_dev *hdev)
592 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
593 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
594 options |= MGMT_OPTION_EXTERNAL_CONFIG;
596 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
597 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
598 !bacmp(&hdev->public_addr, BDADDR_ANY))
599 options |= MGMT_OPTION_PUBLIC_ADDRESS;
601 return cpu_to_le32(options);
604 static int new_options(struct hci_dev *hdev, struct sock *skip)
606 __le32 options = get_missing_options(hdev);
608 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
609 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
612 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
614 __le32 options = get_missing_options(hdev);
616 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
620 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
621 void *data, u16 data_len)
623 struct mgmt_rp_read_config_info rp;
626 bt_dev_dbg(hdev, "sock %p", sk);
630 memset(&rp, 0, sizeof(rp));
631 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
633 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
634 options |= MGMT_OPTION_EXTERNAL_CONFIG;
636 if (hdev->set_bdaddr)
637 options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 rp.supported_options = cpu_to_le32(options);
640 rp.missing_options = get_missing_options(hdev);
642 hci_dev_unlock(hdev);
644 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
648 static u32 get_supported_phys(struct hci_dev *hdev)
650 u32 supported_phys = 0;
652 if (lmp_bredr_capable(hdev)) {
653 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
655 if (hdev->features[0][0] & LMP_3SLOT)
656 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
658 if (hdev->features[0][0] & LMP_5SLOT)
659 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
661 if (lmp_edr_2m_capable(hdev)) {
662 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
664 if (lmp_edr_3slot_capable(hdev))
665 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
667 if (lmp_edr_5slot_capable(hdev))
668 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
670 if (lmp_edr_3m_capable(hdev)) {
671 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
673 if (lmp_edr_3slot_capable(hdev))
674 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
676 if (lmp_edr_5slot_capable(hdev))
677 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
682 if (lmp_le_capable(hdev)) {
683 supported_phys |= MGMT_PHY_LE_1M_TX;
684 supported_phys |= MGMT_PHY_LE_1M_RX;
686 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
687 supported_phys |= MGMT_PHY_LE_2M_TX;
688 supported_phys |= MGMT_PHY_LE_2M_RX;
691 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
692 supported_phys |= MGMT_PHY_LE_CODED_TX;
693 supported_phys |= MGMT_PHY_LE_CODED_RX;
697 return supported_phys;
700 static u32 get_selected_phys(struct hci_dev *hdev)
702 u32 selected_phys = 0;
704 if (lmp_bredr_capable(hdev)) {
705 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
707 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
708 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
710 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
711 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
713 if (lmp_edr_2m_capable(hdev)) {
714 if (!(hdev->pkt_type & HCI_2DH1))
715 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
717 if (lmp_edr_3slot_capable(hdev) &&
718 !(hdev->pkt_type & HCI_2DH3))
719 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
721 if (lmp_edr_5slot_capable(hdev) &&
722 !(hdev->pkt_type & HCI_2DH5))
723 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 if (!(hdev->pkt_type & HCI_3DH1))
727 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
729 if (lmp_edr_3slot_capable(hdev) &&
730 !(hdev->pkt_type & HCI_3DH3))
731 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
733 if (lmp_edr_5slot_capable(hdev) &&
734 !(hdev->pkt_type & HCI_3DH5))
735 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
740 if (lmp_le_capable(hdev)) {
741 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
742 selected_phys |= MGMT_PHY_LE_1M_TX;
744 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
745 selected_phys |= MGMT_PHY_LE_1M_RX;
747 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
748 selected_phys |= MGMT_PHY_LE_2M_TX;
750 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
751 selected_phys |= MGMT_PHY_LE_2M_RX;
753 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
754 selected_phys |= MGMT_PHY_LE_CODED_TX;
756 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
757 selected_phys |= MGMT_PHY_LE_CODED_RX;
760 return selected_phys;
763 static u32 get_configurable_phys(struct hci_dev *hdev)
765 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
766 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
769 static u32 get_supported_settings(struct hci_dev *hdev)
773 settings |= MGMT_SETTING_POWERED;
774 settings |= MGMT_SETTING_BONDABLE;
775 settings |= MGMT_SETTING_DEBUG_KEYS;
776 settings |= MGMT_SETTING_CONNECTABLE;
777 settings |= MGMT_SETTING_DISCOVERABLE;
779 if (lmp_bredr_capable(hdev)) {
780 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
781 settings |= MGMT_SETTING_FAST_CONNECTABLE;
782 settings |= MGMT_SETTING_BREDR;
783 settings |= MGMT_SETTING_LINK_SECURITY;
785 if (lmp_ssp_capable(hdev)) {
786 settings |= MGMT_SETTING_SSP;
787 if (IS_ENABLED(CONFIG_BT_HS))
788 settings |= MGMT_SETTING_HS;
791 if (lmp_sc_capable(hdev))
792 settings |= MGMT_SETTING_SECURE_CONN;
794 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
796 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
799 if (lmp_le_capable(hdev)) {
800 settings |= MGMT_SETTING_LE;
801 settings |= MGMT_SETTING_SECURE_CONN;
802 settings |= MGMT_SETTING_PRIVACY;
803 settings |= MGMT_SETTING_STATIC_ADDRESS;
805 /* When the experimental feature for LL Privacy support is
806 * enabled, then advertising is no longer supported.
808 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
809 settings |= MGMT_SETTING_ADVERTISING;
812 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
814 settings |= MGMT_SETTING_CONFIGURATION;
816 settings |= MGMT_SETTING_PHY_CONFIGURATION;
821 static u32 get_current_settings(struct hci_dev *hdev)
825 if (hdev_is_powered(hdev))
826 settings |= MGMT_SETTING_POWERED;
828 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
829 settings |= MGMT_SETTING_CONNECTABLE;
831 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
834 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
835 settings |= MGMT_SETTING_DISCOVERABLE;
837 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
838 settings |= MGMT_SETTING_BONDABLE;
840 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
841 settings |= MGMT_SETTING_BREDR;
843 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
844 settings |= MGMT_SETTING_LE;
846 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
847 settings |= MGMT_SETTING_LINK_SECURITY;
849 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
850 settings |= MGMT_SETTING_SSP;
852 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
853 settings |= MGMT_SETTING_HS;
855 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
856 settings |= MGMT_SETTING_ADVERTISING;
858 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
859 settings |= MGMT_SETTING_SECURE_CONN;
861 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
862 settings |= MGMT_SETTING_DEBUG_KEYS;
864 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
865 settings |= MGMT_SETTING_PRIVACY;
867 /* The current setting for static address has two purposes. The
868 * first is to indicate if the static address will be used and
869 * the second is to indicate if it is actually set.
871 * This means if the static address is not configured, this flag
872 * will never be set. If the address is configured, then if the
873 * address is actually used decides if the flag is set or not.
875 * For single mode LE only controllers and dual-mode controllers
876 * with BR/EDR disabled, the existence of the static address will
879 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
880 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
881 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
882 if (bacmp(&hdev->static_addr, BDADDR_ANY))
883 settings |= MGMT_SETTING_STATIC_ADDRESS;
886 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
887 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
892 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
894 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
897 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
898 struct hci_dev *hdev,
901 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
904 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
906 struct mgmt_pending_cmd *cmd;
908 /* If there's a pending mgmt command the flags will not yet have
909 * their final values, so check for this first.
911 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
913 struct mgmt_mode *cp = cmd->param;
915 return LE_AD_GENERAL;
916 else if (cp->val == 0x02)
917 return LE_AD_LIMITED;
919 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
920 return LE_AD_LIMITED;
921 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
922 return LE_AD_GENERAL;
928 bool mgmt_get_connectable(struct hci_dev *hdev)
930 struct mgmt_pending_cmd *cmd;
932 /* If there's a pending mgmt command the flag will not yet have
933 * it's final value, so check for this first.
935 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
937 struct mgmt_mode *cp = cmd->param;
942 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
945 static void service_cache_off(struct work_struct *work)
947 struct hci_dev *hdev = container_of(work, struct hci_dev,
949 struct hci_request req;
951 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
954 hci_req_init(&req, hdev);
958 __hci_req_update_eir(&req);
959 __hci_req_update_class(&req);
961 hci_dev_unlock(hdev);
963 hci_req_run(&req, NULL);
966 static void rpa_expired(struct work_struct *work)
968 struct hci_dev *hdev = container_of(work, struct hci_dev,
970 struct hci_request req;
972 bt_dev_dbg(hdev, "");
974 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
976 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
979 /* The generation of a new RPA and programming it into the
980 * controller happens in the hci_req_enable_advertising()
983 hci_req_init(&req, hdev);
984 if (ext_adv_capable(hdev))
985 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
987 __hci_req_enable_advertising(&req);
988 hci_req_run(&req, NULL);
991 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
993 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
996 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
997 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
999 /* Non-mgmt controlled devices get this bit set
1000 * implicitly so that pairing works for them, however
1001 * for mgmt we require user-space to explicitly enable
1004 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1007 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1008 void *data, u16 data_len)
1010 struct mgmt_rp_read_info rp;
1012 bt_dev_dbg(hdev, "sock %p", sk);
1016 memset(&rp, 0, sizeof(rp));
1018 bacpy(&rp.bdaddr, &hdev->bdaddr);
1020 rp.version = hdev->hci_ver;
1021 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1023 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1024 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1026 memcpy(rp.dev_class, hdev->dev_class, 3);
1028 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1029 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1031 hci_dev_unlock(hdev);
1033 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1037 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1042 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1043 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1044 hdev->dev_class, 3);
1046 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1047 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1050 name_len = strlen(hdev->dev_name);
1051 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1052 hdev->dev_name, name_len);
1054 name_len = strlen(hdev->short_name);
1055 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1056 hdev->short_name, name_len);
1061 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1062 void *data, u16 data_len)
1065 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1068 bt_dev_dbg(hdev, "sock %p", sk);
1070 memset(&buf, 0, sizeof(buf));
1074 bacpy(&rp->bdaddr, &hdev->bdaddr);
1076 rp->version = hdev->hci_ver;
1077 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1079 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1080 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1083 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1084 rp->eir_len = cpu_to_le16(eir_len);
1086 hci_dev_unlock(hdev);
1088 /* If this command is called at least once, then the events
1089 * for class of device and local name changes are disabled
1090 * and only the new extended controller information event
1093 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1094 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1095 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1097 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1098 sizeof(*rp) + eir_len);
1101 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1104 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1107 memset(buf, 0, sizeof(buf));
1109 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1110 ev->eir_len = cpu_to_le16(eir_len);
1112 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1113 sizeof(*ev) + eir_len,
1114 HCI_MGMT_EXT_INFO_EVENTS, skip);
1117 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1119 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1121 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1125 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1127 bt_dev_dbg(hdev, "status 0x%02x", status);
1129 if (hci_conn_count(hdev) == 0) {
1130 cancel_delayed_work(&hdev->power_off);
1131 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1135 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1137 struct mgmt_ev_advertising_added ev;
1139 ev.instance = instance;
1141 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1144 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1147 struct mgmt_ev_advertising_removed ev;
1149 ev.instance = instance;
1151 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1154 static void cancel_adv_timeout(struct hci_dev *hdev)
1156 if (hdev->adv_instance_timeout) {
1157 hdev->adv_instance_timeout = 0;
1158 cancel_delayed_work(&hdev->adv_instance_expire);
1162 static int clean_up_hci_state(struct hci_dev *hdev)
1164 struct hci_request req;
1165 struct hci_conn *conn;
1166 bool discov_stopped;
1169 hci_req_init(&req, hdev);
1171 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1172 test_bit(HCI_PSCAN, &hdev->flags)) {
1174 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1177 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1179 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1180 __hci_req_disable_advertising(&req);
1182 discov_stopped = hci_req_stop_discovery(&req);
1184 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1185 /* 0x15 == Terminated due to Power Off */
1186 __hci_abort_conn(&req, conn, 0x15);
1189 err = hci_req_run(&req, clean_up_hci_complete);
1190 if (!err && discov_stopped)
1191 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1196 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1199 struct mgmt_mode *cp = data;
1200 struct mgmt_pending_cmd *cmd;
1203 bt_dev_dbg(hdev, "sock %p", sk);
1205 if (cp->val != 0x00 && cp->val != 0x01)
1206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1207 MGMT_STATUS_INVALID_PARAMS);
1211 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1212 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1217 if (!!cp->val == hdev_is_powered(hdev)) {
1218 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1222 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1229 queue_work(hdev->req_workqueue, &hdev->power_on);
1232 /* Disconnect connections, stop scans, etc */
1233 err = clean_up_hci_state(hdev);
1235 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1236 HCI_POWER_OFF_TIMEOUT);
1238 /* ENODATA means there were no HCI commands queued */
1239 if (err == -ENODATA) {
1240 cancel_delayed_work(&hdev->power_off);
1241 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1247 hci_dev_unlock(hdev);
1251 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1253 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1255 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1256 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1259 int mgmt_new_settings(struct hci_dev *hdev)
1261 return new_settings(hdev, NULL);
1266 struct hci_dev *hdev;
1270 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1272 struct cmd_lookup *match = data;
1274 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1276 list_del(&cmd->list);
1278 if (match->sk == NULL) {
1279 match->sk = cmd->sk;
1280 sock_hold(match->sk);
1283 mgmt_pending_free(cmd);
1286 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1290 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1291 mgmt_pending_remove(cmd);
1294 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1296 if (cmd->cmd_complete) {
1299 cmd->cmd_complete(cmd, *status);
1300 mgmt_pending_remove(cmd);
1305 cmd_status_rsp(cmd, data);
1308 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1310 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1311 cmd->param, cmd->param_len);
1314 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1316 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1317 cmd->param, sizeof(struct mgmt_addr_info));
1320 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1322 if (!lmp_bredr_capable(hdev))
1323 return MGMT_STATUS_NOT_SUPPORTED;
1324 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1325 return MGMT_STATUS_REJECTED;
1327 return MGMT_STATUS_SUCCESS;
1330 static u8 mgmt_le_support(struct hci_dev *hdev)
1332 if (!lmp_le_capable(hdev))
1333 return MGMT_STATUS_NOT_SUPPORTED;
1334 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1335 return MGMT_STATUS_REJECTED;
1337 return MGMT_STATUS_SUCCESS;
1340 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1342 struct mgmt_pending_cmd *cmd;
1344 bt_dev_dbg(hdev, "status 0x%02x", status);
1348 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1353 u8 mgmt_err = mgmt_status(status);
1354 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1355 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1359 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1360 hdev->discov_timeout > 0) {
1361 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1362 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1365 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1366 new_settings(hdev, cmd->sk);
1369 mgmt_pending_remove(cmd);
1372 hci_dev_unlock(hdev);
1375 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1378 struct mgmt_cp_set_discoverable *cp = data;
1379 struct mgmt_pending_cmd *cmd;
1383 bt_dev_dbg(hdev, "sock %p", sk);
1385 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1386 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1388 MGMT_STATUS_REJECTED);
1390 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1391 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1392 MGMT_STATUS_INVALID_PARAMS);
1394 timeout = __le16_to_cpu(cp->timeout);
1396 /* Disabling discoverable requires that no timeout is set,
1397 * and enabling limited discoverable requires a timeout.
1399 if ((cp->val == 0x00 && timeout > 0) ||
1400 (cp->val == 0x02 && timeout == 0))
1401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1402 MGMT_STATUS_INVALID_PARAMS);
1406 if (!hdev_is_powered(hdev) && timeout > 0) {
1407 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1408 MGMT_STATUS_NOT_POWERED);
1412 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1413 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1414 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1420 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1421 MGMT_STATUS_REJECTED);
1425 if (hdev->advertising_paused) {
1426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 if (!hdev_is_powered(hdev)) {
1432 bool changed = false;
1434 /* Setting limited discoverable when powered off is
1435 * not a valid operation since it requires a timeout
1436 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1438 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1439 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1443 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1448 err = new_settings(hdev, sk);
1453 /* If the current mode is the same, then just update the timeout
1454 * value with the new value. And if only the timeout gets updated,
1455 * then no need for any HCI transactions.
1457 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1458 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1459 HCI_LIMITED_DISCOVERABLE)) {
1460 cancel_delayed_work(&hdev->discov_off);
1461 hdev->discov_timeout = timeout;
1463 if (cp->val && hdev->discov_timeout > 0) {
1464 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1465 queue_delayed_work(hdev->req_workqueue,
1466 &hdev->discov_off, to);
1469 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1473 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1479 /* Cancel any potential discoverable timeout that might be
1480 * still active and store new timeout value. The arming of
1481 * the timeout happens in the complete handler.
1483 cancel_delayed_work(&hdev->discov_off);
1484 hdev->discov_timeout = timeout;
1487 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1489 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1491 /* Limited discoverable mode */
1492 if (cp->val == 0x02)
1493 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1495 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1497 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1501 hci_dev_unlock(hdev);
1505 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1507 struct mgmt_pending_cmd *cmd;
1509 bt_dev_dbg(hdev, "status 0x%02x", status);
1513 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1518 u8 mgmt_err = mgmt_status(status);
1519 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1523 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1524 new_settings(hdev, cmd->sk);
1527 mgmt_pending_remove(cmd);
1530 hci_dev_unlock(hdev);
1533 static int set_connectable_update_settings(struct hci_dev *hdev,
1534 struct sock *sk, u8 val)
1536 bool changed = false;
1539 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1543 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1545 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1546 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1549 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1554 hci_req_update_scan(hdev);
1555 hci_update_background_scan(hdev);
1556 return new_settings(hdev, sk);
1562 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1565 struct mgmt_mode *cp = data;
1566 struct mgmt_pending_cmd *cmd;
1569 bt_dev_dbg(hdev, "sock %p", sk);
1571 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1572 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1574 MGMT_STATUS_REJECTED);
1576 if (cp->val != 0x00 && cp->val != 0x01)
1577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1578 MGMT_STATUS_INVALID_PARAMS);
1582 if (!hdev_is_powered(hdev)) {
1583 err = set_connectable_update_settings(hdev, sk, cp->val);
1587 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1588 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1594 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1601 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1603 if (hdev->discov_timeout > 0)
1604 cancel_delayed_work(&hdev->discov_off);
1606 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1607 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1608 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1611 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1615 hci_dev_unlock(hdev);
1619 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1622 struct mgmt_mode *cp = data;
1626 bt_dev_dbg(hdev, "sock %p", sk);
1628 if (cp->val != 0x00 && cp->val != 0x01)
1629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1630 MGMT_STATUS_INVALID_PARAMS);
1635 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1637 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1639 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1644 /* In limited privacy mode the change of bondable mode
1645 * may affect the local advertising address.
1647 if (hdev_is_powered(hdev) &&
1648 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1649 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1650 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1651 queue_work(hdev->req_workqueue,
1652 &hdev->discoverable_update);
1654 err = new_settings(hdev, sk);
1658 hci_dev_unlock(hdev);
1662 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1665 struct mgmt_mode *cp = data;
1666 struct mgmt_pending_cmd *cmd;
1670 bt_dev_dbg(hdev, "sock %p", sk);
1672 status = mgmt_bredr_support(hdev);
1674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1677 if (cp->val != 0x00 && cp->val != 0x01)
1678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1679 MGMT_STATUS_INVALID_PARAMS);
1683 if (!hdev_is_powered(hdev)) {
1684 bool changed = false;
1686 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1687 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1691 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1696 err = new_settings(hdev, sk);
1701 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1709 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1710 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1714 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1720 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1722 mgmt_pending_remove(cmd);
1727 hci_dev_unlock(hdev);
1731 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1733 struct mgmt_mode *cp = data;
1734 struct mgmt_pending_cmd *cmd;
1738 bt_dev_dbg(hdev, "sock %p", sk);
1740 status = mgmt_bredr_support(hdev);
1742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1744 if (!lmp_ssp_capable(hdev))
1745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1746 MGMT_STATUS_NOT_SUPPORTED);
1748 if (cp->val != 0x00 && cp->val != 0x01)
1749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1750 MGMT_STATUS_INVALID_PARAMS);
1754 if (!hdev_is_powered(hdev)) {
1758 changed = !hci_dev_test_and_set_flag(hdev,
1761 changed = hci_dev_test_and_clear_flag(hdev,
1764 changed = hci_dev_test_and_clear_flag(hdev,
1767 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1770 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1775 err = new_settings(hdev, sk);
1780 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1781 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1786 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1787 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1791 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1797 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1798 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1799 sizeof(cp->val), &cp->val);
1801 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1803 mgmt_pending_remove(cmd);
1808 hci_dev_unlock(hdev);
1812 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1814 struct mgmt_mode *cp = data;
1819 bt_dev_dbg(hdev, "sock %p", sk);
1821 if (!IS_ENABLED(CONFIG_BT_HS))
1822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1823 MGMT_STATUS_NOT_SUPPORTED);
1825 status = mgmt_bredr_support(hdev);
1827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1829 if (!lmp_ssp_capable(hdev))
1830 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_NOT_SUPPORTED);
1833 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1834 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_REJECTED);
1837 if (cp->val != 0x00 && cp->val != 0x01)
1838 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1839 MGMT_STATUS_INVALID_PARAMS);
1843 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1844 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1852 if (hdev_is_powered(hdev)) {
1853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1854 MGMT_STATUS_REJECTED);
1858 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1861 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1866 err = new_settings(hdev, sk);
1869 hci_dev_unlock(hdev);
1873 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1875 struct cmd_lookup match = { NULL, hdev };
1880 u8 mgmt_err = mgmt_status(status);
1882 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1887 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1889 new_settings(hdev, match.sk);
1894 /* Make sure the controller has a good default for
1895 * advertising data. Restrict the update to when LE
1896 * has actually been enabled. During power on, the
1897 * update in powered_update_hci will take care of it.
1899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1900 struct hci_request req;
1901 hci_req_init(&req, hdev);
1902 if (ext_adv_capable(hdev)) {
1905 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1907 __hci_req_update_scan_rsp_data(&req, 0x00);
1909 __hci_req_update_adv_data(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1912 hci_req_run(&req, NULL);
1913 hci_update_background_scan(hdev);
1917 hci_dev_unlock(hdev);
1920 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1922 struct mgmt_mode *cp = data;
1923 struct hci_cp_write_le_host_supported hci_cp;
1924 struct mgmt_pending_cmd *cmd;
1925 struct hci_request req;
1929 bt_dev_dbg(hdev, "sock %p", sk);
1931 if (!lmp_le_capable(hdev))
1932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1933 MGMT_STATUS_NOT_SUPPORTED);
1935 if (cp->val != 0x00 && cp->val != 0x01)
1936 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1937 MGMT_STATUS_INVALID_PARAMS);
1939 /* Bluetooth single mode LE only controllers or dual-mode
1940 * controllers configured as LE only devices, do not allow
1941 * switching LE off. These have either LE enabled explicitly
1942 * or BR/EDR has been previously switched off.
1944 * When trying to enable an already enabled LE, then gracefully
1945 * send a positive response. Trying to disable it however will
1946 * result into rejection.
1948 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1949 if (cp->val == 0x01)
1950 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1952 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1953 MGMT_STATUS_REJECTED);
1959 enabled = lmp_host_le_capable(hdev);
1962 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1964 if (!hdev_is_powered(hdev) || val == enabled) {
1965 bool changed = false;
1967 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1968 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1972 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1973 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1977 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1982 err = new_settings(hdev, sk);
1987 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1988 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1994 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2000 hci_req_init(&req, hdev);
2002 memset(&hci_cp, 0, sizeof(hci_cp));
2006 hci_cp.simul = 0x00;
2008 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2009 __hci_req_disable_advertising(&req);
2011 if (ext_adv_capable(hdev))
2012 __hci_req_clear_ext_adv_sets(&req);
2015 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2018 err = hci_req_run(&req, le_enable_complete);
2020 mgmt_pending_remove(cmd);
2023 hci_dev_unlock(hdev);
2027 /* This is a helper function to test for pending mgmt commands that can
2028 * cause CoD or EIR HCI commands. We can only allow one such pending
2029 * mgmt command at a time since otherwise we cannot easily track what
2030 * the current values are, will be, and based on that calculate if a new
2031 * HCI command needs to be sent and if yes with what value.
2033 static bool pending_eir_or_class(struct hci_dev *hdev)
2035 struct mgmt_pending_cmd *cmd;
2037 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2038 switch (cmd->opcode) {
2039 case MGMT_OP_ADD_UUID:
2040 case MGMT_OP_REMOVE_UUID:
2041 case MGMT_OP_SET_DEV_CLASS:
2042 case MGMT_OP_SET_POWERED:
2050 static const u8 bluetooth_base_uuid[] = {
2051 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2052 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2055 static u8 get_uuid_size(const u8 *uuid)
2059 if (memcmp(uuid, bluetooth_base_uuid, 12))
2062 val = get_unaligned_le32(&uuid[12]);
2069 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2071 struct mgmt_pending_cmd *cmd;
2075 cmd = pending_find(mgmt_op, hdev);
2079 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2080 mgmt_status(status), hdev->dev_class, 3);
2082 mgmt_pending_remove(cmd);
2085 hci_dev_unlock(hdev);
2088 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2090 bt_dev_dbg(hdev, "status 0x%02x", status);
2092 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2095 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2097 struct mgmt_cp_add_uuid *cp = data;
2098 struct mgmt_pending_cmd *cmd;
2099 struct hci_request req;
2100 struct bt_uuid *uuid;
2103 bt_dev_dbg(hdev, "sock %p", sk);
2107 if (pending_eir_or_class(hdev)) {
2108 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2113 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2119 memcpy(uuid->uuid, cp->uuid, 16);
2120 uuid->svc_hint = cp->svc_hint;
2121 uuid->size = get_uuid_size(cp->uuid);
2123 list_add_tail(&uuid->list, &hdev->uuids);
2125 hci_req_init(&req, hdev);
2127 __hci_req_update_class(&req);
2128 __hci_req_update_eir(&req);
2130 err = hci_req_run(&req, add_uuid_complete);
2132 if (err != -ENODATA)
2135 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2136 hdev->dev_class, 3);
2140 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2149 hci_dev_unlock(hdev);
2153 static bool enable_service_cache(struct hci_dev *hdev)
2155 if (!hdev_is_powered(hdev))
2158 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2159 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2167 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2169 bt_dev_dbg(hdev, "status 0x%02x", status);
2171 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2174 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2177 struct mgmt_cp_remove_uuid *cp = data;
2178 struct mgmt_pending_cmd *cmd;
2179 struct bt_uuid *match, *tmp;
2180 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2181 struct hci_request req;
2184 bt_dev_dbg(hdev, "sock %p", sk);
2188 if (pending_eir_or_class(hdev)) {
2189 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2194 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2195 hci_uuids_clear(hdev);
2197 if (enable_service_cache(hdev)) {
2198 err = mgmt_cmd_complete(sk, hdev->id,
2199 MGMT_OP_REMOVE_UUID,
2200 0, hdev->dev_class, 3);
2209 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2210 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2213 list_del(&match->list);
2219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2220 MGMT_STATUS_INVALID_PARAMS);
2225 hci_req_init(&req, hdev);
2227 __hci_req_update_class(&req);
2228 __hci_req_update_eir(&req);
2230 err = hci_req_run(&req, remove_uuid_complete);
2232 if (err != -ENODATA)
2235 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2236 hdev->dev_class, 3);
2240 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2249 hci_dev_unlock(hdev);
2253 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2255 bt_dev_dbg(hdev, "status 0x%02x", status);
2257 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2260 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2263 struct mgmt_cp_set_dev_class *cp = data;
2264 struct mgmt_pending_cmd *cmd;
2265 struct hci_request req;
2268 bt_dev_dbg(hdev, "sock %p", sk);
2270 if (!lmp_bredr_capable(hdev))
2271 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2272 MGMT_STATUS_NOT_SUPPORTED);
2276 if (pending_eir_or_class(hdev)) {
2277 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2282 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2284 MGMT_STATUS_INVALID_PARAMS);
2288 hdev->major_class = cp->major;
2289 hdev->minor_class = cp->minor;
2291 if (!hdev_is_powered(hdev)) {
2292 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2293 hdev->dev_class, 3);
2297 hci_req_init(&req, hdev);
2299 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2300 hci_dev_unlock(hdev);
2301 cancel_delayed_work_sync(&hdev->service_cache);
2303 __hci_req_update_eir(&req);
2306 __hci_req_update_class(&req);
2308 err = hci_req_run(&req, set_class_complete);
2310 if (err != -ENODATA)
2313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2314 hdev->dev_class, 3);
2318 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2327 hci_dev_unlock(hdev);
2331 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2334 struct mgmt_cp_load_link_keys *cp = data;
2335 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2336 sizeof(struct mgmt_link_key_info));
2337 u16 key_count, expected_len;
2341 bt_dev_dbg(hdev, "sock %p", sk);
2343 if (!lmp_bredr_capable(hdev))
2344 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2345 MGMT_STATUS_NOT_SUPPORTED);
2347 key_count = __le16_to_cpu(cp->key_count);
2348 if (key_count > max_key_count) {
2349 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2352 MGMT_STATUS_INVALID_PARAMS);
2355 expected_len = struct_size(cp, keys, key_count);
2356 if (expected_len != len) {
2357 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2360 MGMT_STATUS_INVALID_PARAMS);
2363 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2365 MGMT_STATUS_INVALID_PARAMS);
2367 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2370 for (i = 0; i < key_count; i++) {
2371 struct mgmt_link_key_info *key = &cp->keys[i];
2373 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2374 return mgmt_cmd_status(sk, hdev->id,
2375 MGMT_OP_LOAD_LINK_KEYS,
2376 MGMT_STATUS_INVALID_PARAMS);
2381 hci_link_keys_clear(hdev);
2384 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2386 changed = hci_dev_test_and_clear_flag(hdev,
2387 HCI_KEEP_DEBUG_KEYS);
2390 new_settings(hdev, NULL);
2392 for (i = 0; i < key_count; i++) {
2393 struct mgmt_link_key_info *key = &cp->keys[i];
2395 if (hci_is_blocked_key(hdev,
2396 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2398 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2403 /* Always ignore debug keys and require a new pairing if
2404 * the user wants to use them.
2406 if (key->type == HCI_LK_DEBUG_COMBINATION)
2409 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2410 key->type, key->pin_len, NULL);
2413 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2415 hci_dev_unlock(hdev);
2420 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2421 u8 addr_type, struct sock *skip_sk)
2423 struct mgmt_ev_device_unpaired ev;
2425 bacpy(&ev.addr.bdaddr, bdaddr);
2426 ev.addr.type = addr_type;
2428 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2432 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2435 struct mgmt_cp_unpair_device *cp = data;
2436 struct mgmt_rp_unpair_device rp;
2437 struct hci_conn_params *params;
2438 struct mgmt_pending_cmd *cmd;
2439 struct hci_conn *conn;
2443 memset(&rp, 0, sizeof(rp));
2444 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2445 rp.addr.type = cp->addr.type;
2447 if (!bdaddr_type_is_valid(cp->addr.type))
2448 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2449 MGMT_STATUS_INVALID_PARAMS,
2452 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2453 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2454 MGMT_STATUS_INVALID_PARAMS,
2459 if (!hdev_is_powered(hdev)) {
2460 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2461 MGMT_STATUS_NOT_POWERED, &rp,
2466 if (cp->addr.type == BDADDR_BREDR) {
2467 /* If disconnection is requested, then look up the
2468 * connection. If the remote device is connected, it
2469 * will be later used to terminate the link.
2471 * Setting it to NULL explicitly will cause no
2472 * termination of the link.
2475 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2480 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2482 err = mgmt_cmd_complete(sk, hdev->id,
2483 MGMT_OP_UNPAIR_DEVICE,
2484 MGMT_STATUS_NOT_PAIRED, &rp,
2492 /* LE address type */
2493 addr_type = le_addr_type(cp->addr.type);
2495 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2496 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2498 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2499 MGMT_STATUS_NOT_PAIRED, &rp,
2504 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2506 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2511 /* Defer clearing up the connection parameters until closing to
2512 * give a chance of keeping them if a repairing happens.
2514 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2516 /* Disable auto-connection parameters if present */
2517 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2519 if (params->explicit_connect)
2520 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2522 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2525 /* If disconnection is not requested, then clear the connection
2526 * variable so that the link is not terminated.
2528 if (!cp->disconnect)
2532 /* If the connection variable is set, then termination of the
2533 * link is requested.
2536 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2538 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2542 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2549 cmd->cmd_complete = addr_cmd_complete;
2551 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2553 mgmt_pending_remove(cmd);
2556 hci_dev_unlock(hdev);
2560 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2563 struct mgmt_cp_disconnect *cp = data;
2564 struct mgmt_rp_disconnect rp;
2565 struct mgmt_pending_cmd *cmd;
2566 struct hci_conn *conn;
2569 bt_dev_dbg(hdev, "sock %p", sk);
2571 memset(&rp, 0, sizeof(rp));
2572 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2573 rp.addr.type = cp->addr.type;
2575 if (!bdaddr_type_is_valid(cp->addr.type))
2576 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2577 MGMT_STATUS_INVALID_PARAMS,
2582 if (!test_bit(HCI_UP, &hdev->flags)) {
2583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584 MGMT_STATUS_NOT_POWERED, &rp,
2589 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2590 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2591 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2595 if (cp->addr.type == BDADDR_BREDR)
2596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2599 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2600 le_addr_type(cp->addr.type));
2602 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2603 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2604 MGMT_STATUS_NOT_CONNECTED, &rp,
2609 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2615 cmd->cmd_complete = generic_cmd_complete;
2617 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2619 mgmt_pending_remove(cmd);
2622 hci_dev_unlock(hdev);
2626 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2628 switch (link_type) {
2630 switch (addr_type) {
2631 case ADDR_LE_DEV_PUBLIC:
2632 return BDADDR_LE_PUBLIC;
2635 /* Fallback to LE Random address type */
2636 return BDADDR_LE_RANDOM;
2640 /* Fallback to BR/EDR type */
2641 return BDADDR_BREDR;
2645 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2648 struct mgmt_rp_get_connections *rp;
2653 bt_dev_dbg(hdev, "sock %p", sk);
2657 if (!hdev_is_powered(hdev)) {
2658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2659 MGMT_STATUS_NOT_POWERED);
2664 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2665 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2669 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2676 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2677 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2679 bacpy(&rp->addr[i].bdaddr, &c->dst);
2680 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2681 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2686 rp->conn_count = cpu_to_le16(i);
2688 /* Recalculate length in case of filtered SCO connections, etc */
2689 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2690 struct_size(rp, addr, i));
2695 hci_dev_unlock(hdev);
2699 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2700 struct mgmt_cp_pin_code_neg_reply *cp)
2702 struct mgmt_pending_cmd *cmd;
2705 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2710 cmd->cmd_complete = addr_cmd_complete;
2712 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2713 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2715 mgmt_pending_remove(cmd);
2720 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2723 struct hci_conn *conn;
2724 struct mgmt_cp_pin_code_reply *cp = data;
2725 struct hci_cp_pin_code_reply reply;
2726 struct mgmt_pending_cmd *cmd;
2729 bt_dev_dbg(hdev, "sock %p", sk);
2733 if (!hdev_is_powered(hdev)) {
2734 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2735 MGMT_STATUS_NOT_POWERED);
2739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2741 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2742 MGMT_STATUS_NOT_CONNECTED);
2746 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2747 struct mgmt_cp_pin_code_neg_reply ncp;
2749 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2751 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2753 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2756 MGMT_STATUS_INVALID_PARAMS);
2761 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2767 cmd->cmd_complete = addr_cmd_complete;
2769 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2770 reply.pin_len = cp->pin_len;
2771 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2773 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2775 mgmt_pending_remove(cmd);
2778 hci_dev_unlock(hdev);
2782 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2785 struct mgmt_cp_set_io_capability *cp = data;
2787 bt_dev_dbg(hdev, "sock %p", sk);
2789 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2791 MGMT_STATUS_INVALID_PARAMS);
2795 hdev->io_capability = cp->io_capability;
2797 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2799 hci_dev_unlock(hdev);
2801 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2805 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2807 struct hci_dev *hdev = conn->hdev;
2808 struct mgmt_pending_cmd *cmd;
2810 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2811 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2814 if (cmd->user_data != conn)
2823 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2825 struct mgmt_rp_pair_device rp;
2826 struct hci_conn *conn = cmd->user_data;
2829 bacpy(&rp.addr.bdaddr, &conn->dst);
2830 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2832 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2833 status, &rp, sizeof(rp));
2835 /* So we don't get further callbacks for this connection */
2836 conn->connect_cfm_cb = NULL;
2837 conn->security_cfm_cb = NULL;
2838 conn->disconn_cfm_cb = NULL;
2840 hci_conn_drop(conn);
2842 /* The device is paired so there is no need to remove
2843 * its connection parameters anymore.
2845 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2852 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2854 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2855 struct mgmt_pending_cmd *cmd;
2857 cmd = find_pairing(conn);
2859 cmd->cmd_complete(cmd, status);
2860 mgmt_pending_remove(cmd);
2864 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2866 struct mgmt_pending_cmd *cmd;
2868 BT_DBG("status %u", status);
2870 cmd = find_pairing(conn);
2872 BT_DBG("Unable to find a pending command");
2876 cmd->cmd_complete(cmd, mgmt_status(status));
2877 mgmt_pending_remove(cmd);
2880 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2882 struct mgmt_pending_cmd *cmd;
2884 BT_DBG("status %u", status);
2889 cmd = find_pairing(conn);
2891 BT_DBG("Unable to find a pending command");
2895 cmd->cmd_complete(cmd, mgmt_status(status));
2896 mgmt_pending_remove(cmd);
2899 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 struct mgmt_cp_pair_device *cp = data;
2903 struct mgmt_rp_pair_device rp;
2904 struct mgmt_pending_cmd *cmd;
2905 u8 sec_level, auth_type;
2906 struct hci_conn *conn;
2909 bt_dev_dbg(hdev, "sock %p", sk);
2911 memset(&rp, 0, sizeof(rp));
2912 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2913 rp.addr.type = cp->addr.type;
2915 if (!bdaddr_type_is_valid(cp->addr.type))
2916 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2917 MGMT_STATUS_INVALID_PARAMS,
2920 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2921 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2922 MGMT_STATUS_INVALID_PARAMS,
2927 if (!hdev_is_powered(hdev)) {
2928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 MGMT_STATUS_NOT_POWERED, &rp,
2934 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2935 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2936 MGMT_STATUS_ALREADY_PAIRED, &rp,
2941 sec_level = BT_SECURITY_MEDIUM;
2942 auth_type = HCI_AT_DEDICATED_BONDING;
2944 if (cp->addr.type == BDADDR_BREDR) {
2945 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2946 auth_type, CONN_REASON_PAIR_DEVICE);
2948 u8 addr_type = le_addr_type(cp->addr.type);
2949 struct hci_conn_params *p;
2951 /* When pairing a new device, it is expected to remember
2952 * this device for future connections. Adding the connection
2953 * parameter information ahead of time allows tracking
2954 * of the slave preferred values and will speed up any
2955 * further connection establishment.
2957 * If connection parameters already exist, then they
2958 * will be kept and this function does nothing.
2960 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2962 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2963 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2965 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2966 sec_level, HCI_LE_CONN_TIMEOUT,
2967 CONN_REASON_PAIR_DEVICE);
2973 if (PTR_ERR(conn) == -EBUSY)
2974 status = MGMT_STATUS_BUSY;
2975 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2976 status = MGMT_STATUS_NOT_SUPPORTED;
2977 else if (PTR_ERR(conn) == -ECONNREFUSED)
2978 status = MGMT_STATUS_REJECTED;
2980 status = MGMT_STATUS_CONNECT_FAILED;
2982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2983 status, &rp, sizeof(rp));
2987 if (conn->connect_cfm_cb) {
2988 hci_conn_drop(conn);
2989 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2990 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2994 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2997 hci_conn_drop(conn);
3001 cmd->cmd_complete = pairing_complete;
3003 /* For LE, just connecting isn't a proof that the pairing finished */
3004 if (cp->addr.type == BDADDR_BREDR) {
3005 conn->connect_cfm_cb = pairing_complete_cb;
3006 conn->security_cfm_cb = pairing_complete_cb;
3007 conn->disconn_cfm_cb = pairing_complete_cb;
3009 conn->connect_cfm_cb = le_pairing_complete_cb;
3010 conn->security_cfm_cb = le_pairing_complete_cb;
3011 conn->disconn_cfm_cb = le_pairing_complete_cb;
3014 conn->io_capability = cp->io_cap;
3015 cmd->user_data = hci_conn_get(conn);
3017 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3018 hci_conn_security(conn, sec_level, auth_type, true)) {
3019 cmd->cmd_complete(cmd, 0);
3020 mgmt_pending_remove(cmd);
3026 hci_dev_unlock(hdev);
3030 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3033 struct mgmt_addr_info *addr = data;
3034 struct mgmt_pending_cmd *cmd;
3035 struct hci_conn *conn;
3038 bt_dev_dbg(hdev, "sock %p", sk);
3042 if (!hdev_is_powered(hdev)) {
3043 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3044 MGMT_STATUS_NOT_POWERED);
3048 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3051 MGMT_STATUS_INVALID_PARAMS);
3055 conn = cmd->user_data;
3057 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3059 MGMT_STATUS_INVALID_PARAMS);
3063 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3064 mgmt_pending_remove(cmd);
3066 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3067 addr, sizeof(*addr));
3069 /* Since user doesn't want to proceed with the connection, abort any
3070 * ongoing pairing and then terminate the link if it was created
3071 * because of the pair device action.
3073 if (addr->type == BDADDR_BREDR)
3074 hci_remove_link_key(hdev, &addr->bdaddr);
3076 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3077 le_addr_type(addr->type));
3079 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3080 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3083 hci_dev_unlock(hdev);
3087 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3088 struct mgmt_addr_info *addr, u16 mgmt_op,
3089 u16 hci_op, __le32 passkey)
3091 struct mgmt_pending_cmd *cmd;
3092 struct hci_conn *conn;
3097 if (!hdev_is_powered(hdev)) {
3098 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3099 MGMT_STATUS_NOT_POWERED, addr,
3104 if (addr->type == BDADDR_BREDR)
3105 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3107 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3108 le_addr_type(addr->type));
3111 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3112 MGMT_STATUS_NOT_CONNECTED, addr,
3117 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3118 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3120 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3121 MGMT_STATUS_SUCCESS, addr,
3124 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3125 MGMT_STATUS_FAILED, addr,
3131 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3137 cmd->cmd_complete = addr_cmd_complete;
3139 /* Continue with pairing via HCI */
3140 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3141 struct hci_cp_user_passkey_reply cp;
3143 bacpy(&cp.bdaddr, &addr->bdaddr);
3144 cp.passkey = passkey;
3145 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3147 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3151 mgmt_pending_remove(cmd);
3154 hci_dev_unlock(hdev);
3158 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3159 void *data, u16 len)
3161 struct mgmt_cp_pin_code_neg_reply *cp = data;
3163 bt_dev_dbg(hdev, "sock %p", sk);
3165 return user_pairing_resp(sk, hdev, &cp->addr,
3166 MGMT_OP_PIN_CODE_NEG_REPLY,
3167 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3170 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3173 struct mgmt_cp_user_confirm_reply *cp = data;
3175 bt_dev_dbg(hdev, "sock %p", sk);
3177 if (len != sizeof(*cp))
3178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3179 MGMT_STATUS_INVALID_PARAMS);
3181 return user_pairing_resp(sk, hdev, &cp->addr,
3182 MGMT_OP_USER_CONFIRM_REPLY,
3183 HCI_OP_USER_CONFIRM_REPLY, 0);
3186 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3187 void *data, u16 len)
3189 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3191 bt_dev_dbg(hdev, "sock %p", sk);
3193 return user_pairing_resp(sk, hdev, &cp->addr,
3194 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3195 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3198 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3201 struct mgmt_cp_user_passkey_reply *cp = data;
3203 bt_dev_dbg(hdev, "sock %p", sk);
3205 return user_pairing_resp(sk, hdev, &cp->addr,
3206 MGMT_OP_USER_PASSKEY_REPLY,
3207 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3210 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3211 void *data, u16 len)
3213 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3215 bt_dev_dbg(hdev, "sock %p", sk);
3217 return user_pairing_resp(sk, hdev, &cp->addr,
3218 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3219 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3222 static void adv_expire(struct hci_dev *hdev, u32 flags)
3224 struct adv_info *adv_instance;
3225 struct hci_request req;
3228 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3232 /* stop if current instance doesn't need to be changed */
3233 if (!(adv_instance->flags & flags))
3236 cancel_adv_timeout(hdev);
3238 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3242 hci_req_init(&req, hdev);
3243 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3248 hci_req_run(&req, NULL);
3251 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3253 struct mgmt_cp_set_local_name *cp;
3254 struct mgmt_pending_cmd *cmd;
3256 bt_dev_dbg(hdev, "status 0x%02x", status);
3260 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3267 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3268 mgmt_status(status));
3270 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3273 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3274 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3277 mgmt_pending_remove(cmd);
3280 hci_dev_unlock(hdev);
3283 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3286 struct mgmt_cp_set_local_name *cp = data;
3287 struct mgmt_pending_cmd *cmd;
3288 struct hci_request req;
3291 bt_dev_dbg(hdev, "sock %p", sk);
3295 /* If the old values are the same as the new ones just return a
3296 * direct command complete event.
3298 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3299 !memcmp(hdev->short_name, cp->short_name,
3300 sizeof(hdev->short_name))) {
3301 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3306 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3308 if (!hdev_is_powered(hdev)) {
3309 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3311 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3316 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3317 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3318 ext_info_changed(hdev, sk);
3323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3329 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3331 hci_req_init(&req, hdev);
3333 if (lmp_bredr_capable(hdev)) {
3334 __hci_req_update_name(&req);
3335 __hci_req_update_eir(&req);
3338 /* The name is stored in the scan response data and so
3339 * no need to udpate the advertising data here.
3341 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3342 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3344 err = hci_req_run(&req, set_name_complete);
3346 mgmt_pending_remove(cmd);
3349 hci_dev_unlock(hdev);
3353 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3356 struct mgmt_cp_set_appearance *cp = data;
3360 bt_dev_dbg(hdev, "sock %p", sk);
3362 if (!lmp_le_capable(hdev))
3363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3364 MGMT_STATUS_NOT_SUPPORTED);
3366 appearance = le16_to_cpu(cp->appearance);
3370 if (hdev->appearance != appearance) {
3371 hdev->appearance = appearance;
3373 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3374 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3376 ext_info_changed(hdev, sk);
3379 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3382 hci_dev_unlock(hdev);
3387 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3388 void *data, u16 len)
3390 struct mgmt_rp_get_phy_confguration rp;
3392 bt_dev_dbg(hdev, "sock %p", sk);
3396 memset(&rp, 0, sizeof(rp));
3398 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3399 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3400 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3402 hci_dev_unlock(hdev);
3404 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3408 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3410 struct mgmt_ev_phy_configuration_changed ev;
3412 memset(&ev, 0, sizeof(ev));
3414 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3416 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3420 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3421 u16 opcode, struct sk_buff *skb)
3423 struct mgmt_pending_cmd *cmd;
3425 bt_dev_dbg(hdev, "status 0x%02x", status);
3429 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3434 mgmt_cmd_status(cmd->sk, hdev->id,
3435 MGMT_OP_SET_PHY_CONFIGURATION,
3436 mgmt_status(status));
3438 mgmt_cmd_complete(cmd->sk, hdev->id,
3439 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3442 mgmt_phy_configuration_changed(hdev, cmd->sk);
3445 mgmt_pending_remove(cmd);
3448 hci_dev_unlock(hdev);
3451 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3452 void *data, u16 len)
3454 struct mgmt_cp_set_phy_confguration *cp = data;
3455 struct hci_cp_le_set_default_phy cp_phy;
3456 struct mgmt_pending_cmd *cmd;
3457 struct hci_request req;
3458 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3459 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3460 bool changed = false;
3463 bt_dev_dbg(hdev, "sock %p", sk);
3465 configurable_phys = get_configurable_phys(hdev);
3466 supported_phys = get_supported_phys(hdev);
3467 selected_phys = __le32_to_cpu(cp->selected_phys);
3469 if (selected_phys & ~supported_phys)
3470 return mgmt_cmd_status(sk, hdev->id,
3471 MGMT_OP_SET_PHY_CONFIGURATION,
3472 MGMT_STATUS_INVALID_PARAMS);
3474 unconfigure_phys = supported_phys & ~configurable_phys;
3476 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3477 return mgmt_cmd_status(sk, hdev->id,
3478 MGMT_OP_SET_PHY_CONFIGURATION,
3479 MGMT_STATUS_INVALID_PARAMS);
3481 if (selected_phys == get_selected_phys(hdev))
3482 return mgmt_cmd_complete(sk, hdev->id,
3483 MGMT_OP_SET_PHY_CONFIGURATION,
3488 if (!hdev_is_powered(hdev)) {
3489 err = mgmt_cmd_status(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3491 MGMT_STATUS_REJECTED);
3495 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3496 err = mgmt_cmd_status(sk, hdev->id,
3497 MGMT_OP_SET_PHY_CONFIGURATION,
3502 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3503 pkt_type |= (HCI_DH3 | HCI_DM3);
3505 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3507 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3508 pkt_type |= (HCI_DH5 | HCI_DM5);
3510 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3512 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3513 pkt_type &= ~HCI_2DH1;
3515 pkt_type |= HCI_2DH1;
3517 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3518 pkt_type &= ~HCI_2DH3;
3520 pkt_type |= HCI_2DH3;
3522 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3523 pkt_type &= ~HCI_2DH5;
3525 pkt_type |= HCI_2DH5;
3527 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3528 pkt_type &= ~HCI_3DH1;
3530 pkt_type |= HCI_3DH1;
3532 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3533 pkt_type &= ~HCI_3DH3;
3535 pkt_type |= HCI_3DH3;
3537 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3538 pkt_type &= ~HCI_3DH5;
3540 pkt_type |= HCI_3DH5;
3542 if (pkt_type != hdev->pkt_type) {
3543 hdev->pkt_type = pkt_type;
3547 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3548 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3550 mgmt_phy_configuration_changed(hdev, sk);
3552 err = mgmt_cmd_complete(sk, hdev->id,
3553 MGMT_OP_SET_PHY_CONFIGURATION,
3559 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3566 hci_req_init(&req, hdev);
3568 memset(&cp_phy, 0, sizeof(cp_phy));
3570 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3571 cp_phy.all_phys |= 0x01;
3573 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3574 cp_phy.all_phys |= 0x02;
3576 if (selected_phys & MGMT_PHY_LE_1M_TX)
3577 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3579 if (selected_phys & MGMT_PHY_LE_2M_TX)
3580 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3582 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3583 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3585 if (selected_phys & MGMT_PHY_LE_1M_RX)
3586 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3588 if (selected_phys & MGMT_PHY_LE_2M_RX)
3589 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3591 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3592 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3594 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3596 err = hci_req_run_skb(&req, set_default_phy_complete);
3598 mgmt_pending_remove(cmd);
3601 hci_dev_unlock(hdev);
3606 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3609 int err = MGMT_STATUS_SUCCESS;
3610 struct mgmt_cp_set_blocked_keys *keys = data;
3611 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3612 sizeof(struct mgmt_blocked_key_info));
3613 u16 key_count, expected_len;
3616 bt_dev_dbg(hdev, "sock %p", sk);
3618 key_count = __le16_to_cpu(keys->key_count);
3619 if (key_count > max_key_count) {
3620 bt_dev_err(hdev, "too big key_count value %u", key_count);
3621 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3622 MGMT_STATUS_INVALID_PARAMS);
3625 expected_len = struct_size(keys, keys, key_count);
3626 if (expected_len != len) {
3627 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3630 MGMT_STATUS_INVALID_PARAMS);
3635 hci_blocked_keys_clear(hdev);
3637 for (i = 0; i < keys->key_count; ++i) {
3638 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3641 err = MGMT_STATUS_NO_RESOURCES;
3645 b->type = keys->keys[i].type;
3646 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3647 list_add_rcu(&b->list, &hdev->blocked_keys);
3649 hci_dev_unlock(hdev);
3651 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3655 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3656 void *data, u16 len)
3658 struct mgmt_mode *cp = data;
3660 bool changed = false;
3662 bt_dev_dbg(hdev, "sock %p", sk);
3664 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3665 return mgmt_cmd_status(sk, hdev->id,
3666 MGMT_OP_SET_WIDEBAND_SPEECH,
3667 MGMT_STATUS_NOT_SUPPORTED);
3669 if (cp->val != 0x00 && cp->val != 0x01)
3670 return mgmt_cmd_status(sk, hdev->id,
3671 MGMT_OP_SET_WIDEBAND_SPEECH,
3672 MGMT_STATUS_INVALID_PARAMS);
3676 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3677 err = mgmt_cmd_status(sk, hdev->id,
3678 MGMT_OP_SET_WIDEBAND_SPEECH,
3683 if (hdev_is_powered(hdev) &&
3684 !!cp->val != hci_dev_test_flag(hdev,
3685 HCI_WIDEBAND_SPEECH_ENABLED)) {
3686 err = mgmt_cmd_status(sk, hdev->id,
3687 MGMT_OP_SET_WIDEBAND_SPEECH,
3688 MGMT_STATUS_REJECTED);
3693 changed = !hci_dev_test_and_set_flag(hdev,
3694 HCI_WIDEBAND_SPEECH_ENABLED);
3696 changed = hci_dev_test_and_clear_flag(hdev,
3697 HCI_WIDEBAND_SPEECH_ENABLED);
3699 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3704 err = new_settings(hdev, sk);
3707 hci_dev_unlock(hdev);
3711 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3712 void *data, u16 data_len)
3715 struct mgmt_rp_read_security_info *rp = (void *)buf;
3719 bt_dev_dbg(hdev, "sock %p", sk);
3721 memset(&buf, 0, sizeof(buf));
3725 /* When the Read Simple Pairing Options command is supported, then
3726 * the remote public key validation is supported.
3728 if (hdev->commands[41] & 0x08)
3729 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3731 flags |= 0x02; /* Remote public key validation (LE) */
3733 /* When the Read Encryption Key Size command is supported, then the
3734 * encryption key size is enforced.
3736 if (hdev->commands[20] & 0x10)
3737 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3739 flags |= 0x08; /* Encryption key size enforcement (LE) */
3741 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3743 /* When the Read Simple Pairing Options command is supported, then
3744 * also max encryption key size information is provided.
3746 if (hdev->commands[41] & 0x08)
3747 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3748 hdev->max_enc_key_size);
3750 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3752 rp->sec_len = cpu_to_le16(sec_len);
3754 hci_dev_unlock(hdev);
3756 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3757 rp, sizeof(*rp) + sec_len);
3760 #ifdef CONFIG_BT_FEATURE_DEBUG
3761 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3762 static const u8 debug_uuid[16] = {
3763 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3764 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3768 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3769 static const u8 simult_central_periph_uuid[16] = {
3770 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3771 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3774 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3775 static const u8 rpa_resolution_uuid[16] = {
3776 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3777 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3780 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3781 void *data, u16 data_len)
3783 char buf[62]; /* Enough space for 3 features */
3784 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3788 bt_dev_dbg(hdev, "sock %p", sk);
3790 memset(&buf, 0, sizeof(buf));
3792 #ifdef CONFIG_BT_FEATURE_DEBUG
3794 flags = bt_dbg_get() ? BIT(0) : 0;
3796 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3797 rp->features[idx].flags = cpu_to_le32(flags);
3803 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3804 (hdev->le_states[4] & 0x08) && /* Central */
3805 (hdev->le_states[4] & 0x40) && /* Peripheral */
3806 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3811 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3812 rp->features[idx].flags = cpu_to_le32(flags);
3816 if (hdev && use_ll_privacy(hdev)) {
3817 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3818 flags = BIT(0) | BIT(1);
3822 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3823 rp->features[idx].flags = cpu_to_le32(flags);
3827 rp->feature_count = cpu_to_le16(idx);
3829 /* After reading the experimental features information, enable
3830 * the events to update client on any future change.
3832 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3834 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3835 MGMT_OP_READ_EXP_FEATURES_INFO,
3836 0, rp, sizeof(*rp) + (20 * idx));
3839 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3842 struct mgmt_ev_exp_feature_changed ev;
3844 memset(&ev, 0, sizeof(ev));
3845 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3846 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3848 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3850 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3854 #ifdef CONFIG_BT_FEATURE_DEBUG
3855 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3857 struct mgmt_ev_exp_feature_changed ev;
3859 memset(&ev, 0, sizeof(ev));
3860 memcpy(ev.uuid, debug_uuid, 16);
3861 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3863 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3865 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3869 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3870 void *data, u16 data_len)
3872 struct mgmt_cp_set_exp_feature *cp = data;
3873 struct mgmt_rp_set_exp_feature rp;
3875 bt_dev_dbg(hdev, "sock %p", sk);
3877 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3878 memset(rp.uuid, 0, 16);
3879 rp.flags = cpu_to_le32(0);
3881 #ifdef CONFIG_BT_FEATURE_DEBUG
3883 bool changed = bt_dbg_get();
3888 exp_debug_feature_changed(false, sk);
3892 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3893 bool changed = hci_dev_test_flag(hdev,
3894 HCI_ENABLE_LL_PRIVACY);
3896 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3899 exp_ll_privacy_feature_changed(false, hdev, sk);
3902 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3904 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3905 MGMT_OP_SET_EXP_FEATURE, 0,
3909 #ifdef CONFIG_BT_FEATURE_DEBUG
3910 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3914 /* Command requires to use the non-controller index */
3916 return mgmt_cmd_status(sk, hdev->id,
3917 MGMT_OP_SET_EXP_FEATURE,
3918 MGMT_STATUS_INVALID_INDEX);
3920 /* Parameters are limited to a single octet */
3921 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3922 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3923 MGMT_OP_SET_EXP_FEATURE,
3924 MGMT_STATUS_INVALID_PARAMS);
3926 /* Only boolean on/off is supported */
3927 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3928 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3929 MGMT_OP_SET_EXP_FEATURE,
3930 MGMT_STATUS_INVALID_PARAMS);
3932 val = !!cp->param[0];
3933 changed = val ? !bt_dbg_get() : bt_dbg_get();
3936 memcpy(rp.uuid, debug_uuid, 16);
3937 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3939 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3941 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3942 MGMT_OP_SET_EXP_FEATURE, 0,
3946 exp_debug_feature_changed(val, sk);
3952 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3957 /* Command requires to use the controller index */
3959 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3960 MGMT_OP_SET_EXP_FEATURE,
3961 MGMT_STATUS_INVALID_INDEX);
3963 /* Changes can only be made when controller is powered down */
3964 if (hdev_is_powered(hdev))
3965 return mgmt_cmd_status(sk, hdev->id,
3966 MGMT_OP_SET_EXP_FEATURE,
3967 MGMT_STATUS_NOT_POWERED);
3969 /* Parameters are limited to a single octet */
3970 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3971 return mgmt_cmd_status(sk, hdev->id,
3972 MGMT_OP_SET_EXP_FEATURE,
3973 MGMT_STATUS_INVALID_PARAMS);
3975 /* Only boolean on/off is supported */
3976 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3977 return mgmt_cmd_status(sk, hdev->id,
3978 MGMT_OP_SET_EXP_FEATURE,
3979 MGMT_STATUS_INVALID_PARAMS);
3981 val = !!cp->param[0];
3984 changed = !hci_dev_test_flag(hdev,
3985 HCI_ENABLE_LL_PRIVACY);
3986 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3987 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3989 /* Enable LL privacy + supported settings changed */
3990 flags = BIT(0) | BIT(1);
3992 changed = hci_dev_test_flag(hdev,
3993 HCI_ENABLE_LL_PRIVACY);
3994 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3996 /* Disable LL privacy + supported settings changed */
4000 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4001 rp.flags = cpu_to_le32(flags);
4003 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4005 err = mgmt_cmd_complete(sk, hdev->id,
4006 MGMT_OP_SET_EXP_FEATURE, 0,
4010 exp_ll_privacy_feature_changed(val, hdev, sk);
4015 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4016 MGMT_OP_SET_EXP_FEATURE,
4017 MGMT_STATUS_NOT_SUPPORTED);
4020 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4022 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4025 struct mgmt_cp_get_device_flags *cp = data;
4026 struct mgmt_rp_get_device_flags rp;
4027 struct bdaddr_list_with_flags *br_params;
4028 struct hci_conn_params *params;
4029 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4030 u32 current_flags = 0;
4031 u8 status = MGMT_STATUS_INVALID_PARAMS;
4033 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4034 &cp->addr.bdaddr, cp->addr.type);
4038 if (cp->addr.type == BDADDR_BREDR) {
4039 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4045 current_flags = br_params->current_flags;
4047 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4048 le_addr_type(cp->addr.type));
4053 current_flags = params->current_flags;
4056 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4057 rp.addr.type = cp->addr.type;
4058 rp.supported_flags = cpu_to_le32(supported_flags);
4059 rp.current_flags = cpu_to_le32(current_flags);
4061 status = MGMT_STATUS_SUCCESS;
4064 hci_dev_unlock(hdev);
4066 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4070 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4071 bdaddr_t *bdaddr, u8 bdaddr_type,
4072 u32 supported_flags, u32 current_flags)
4074 struct mgmt_ev_device_flags_changed ev;
4076 bacpy(&ev.addr.bdaddr, bdaddr);
4077 ev.addr.type = bdaddr_type;
4078 ev.supported_flags = cpu_to_le32(supported_flags);
4079 ev.current_flags = cpu_to_le32(current_flags);
4081 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4084 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4087 struct mgmt_cp_set_device_flags *cp = data;
4088 struct bdaddr_list_with_flags *br_params;
4089 struct hci_conn_params *params;
4090 u8 status = MGMT_STATUS_INVALID_PARAMS;
4091 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4092 u32 current_flags = __le32_to_cpu(cp->current_flags);
4094 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4095 &cp->addr.bdaddr, cp->addr.type,
4096 __le32_to_cpu(current_flags));
4098 if ((supported_flags | current_flags) != supported_flags) {
4099 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4100 current_flags, supported_flags);
4106 if (cp->addr.type == BDADDR_BREDR) {
4107 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4112 br_params->current_flags = current_flags;
4113 status = MGMT_STATUS_SUCCESS;
4115 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4116 &cp->addr.bdaddr, cp->addr.type);
4119 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4120 le_addr_type(cp->addr.type));
4122 params->current_flags = current_flags;
4123 status = MGMT_STATUS_SUCCESS;
4125 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4127 le_addr_type(cp->addr.type));
4132 hci_dev_unlock(hdev);
4134 if (status == MGMT_STATUS_SUCCESS)
4135 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4136 supported_flags, current_flags);
4138 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4139 &cp->addr, sizeof(cp->addr));
4142 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4145 struct mgmt_ev_adv_monitor_added ev;
4147 ev.monitor_handle = cpu_to_le16(handle);
4149 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4152 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4155 struct mgmt_ev_adv_monitor_added ev;
4157 ev.monitor_handle = cpu_to_le16(handle);
4159 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4162 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4163 void *data, u16 len)
4165 struct adv_monitor *monitor = NULL;
4166 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4169 __u32 supported = 0;
4170 __u16 num_handles = 0;
4171 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4173 BT_DBG("request for %s", hdev->name);
4177 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4178 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4180 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4181 handles[num_handles++] = monitor->handle;
4184 hci_dev_unlock(hdev);
4186 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4187 rp = kmalloc(rp_size, GFP_KERNEL);
4191 /* Once controller-based monitoring is in place, the enabled_features
4192 * should reflect the use.
4194 rp->supported_features = cpu_to_le32(supported);
4195 rp->enabled_features = 0;
4196 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4197 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4198 rp->num_handles = cpu_to_le16(num_handles);
4200 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4202 err = mgmt_cmd_complete(sk, hdev->id,
4203 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4204 MGMT_STATUS_SUCCESS, rp, rp_size);
4211 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4212 void *data, u16 len)
4214 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4215 struct mgmt_rp_add_adv_patterns_monitor rp;
4216 struct adv_monitor *m = NULL;
4217 struct adv_pattern *p = NULL;
4218 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4219 __u8 cp_ofst = 0, cp_len = 0;
4222 BT_DBG("request for %s", hdev->name);
4224 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4225 err = mgmt_cmd_status(sk, hdev->id,
4226 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4227 MGMT_STATUS_INVALID_PARAMS);
4231 m = kmalloc(sizeof(*m), GFP_KERNEL);
4237 INIT_LIST_HEAD(&m->patterns);
4240 for (i = 0; i < cp->pattern_count; i++) {
4241 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4242 err = mgmt_cmd_status(sk, hdev->id,
4243 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4244 MGMT_STATUS_INVALID_PARAMS);
4248 cp_ofst = cp->patterns[i].offset;
4249 cp_len = cp->patterns[i].length;
4250 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4251 cp_len > HCI_MAX_AD_LENGTH ||
4252 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4253 err = mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4255 MGMT_STATUS_INVALID_PARAMS);
4259 p = kmalloc(sizeof(*p), GFP_KERNEL);
4265 p->ad_type = cp->patterns[i].ad_type;
4266 p->offset = cp->patterns[i].offset;
4267 p->length = cp->patterns[i].length;
4268 memcpy(p->value, cp->patterns[i].value, p->length);
4270 INIT_LIST_HEAD(&p->list);
4271 list_add(&p->list, &m->patterns);
4274 if (mp_cnt != cp->pattern_count) {
4275 err = mgmt_cmd_status(sk, hdev->id,
4276 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4277 MGMT_STATUS_INVALID_PARAMS);
4283 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4285 err = hci_add_adv_monitor(hdev, m);
4287 if (err == -ENOSPC) {
4288 mgmt_cmd_status(sk, hdev->id,
4289 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4290 MGMT_STATUS_NO_RESOURCES);
4295 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4296 mgmt_adv_monitor_added(sk, hdev, m->handle);
4298 hci_dev_unlock(hdev);
4300 rp.monitor_handle = cpu_to_le16(m->handle);
4302 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4303 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4306 hci_dev_unlock(hdev);
4309 hci_free_adv_monitor(m);
4313 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4314 void *data, u16 len)
4316 struct mgmt_cp_remove_adv_monitor *cp = data;
4317 struct mgmt_rp_remove_adv_monitor rp;
4318 unsigned int prev_adv_monitors_cnt;
4322 BT_DBG("request for %s", hdev->name);
4326 handle = __le16_to_cpu(cp->monitor_handle);
4327 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4329 err = hci_remove_adv_monitor(hdev, handle);
4330 if (err == -ENOENT) {
4331 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4332 MGMT_STATUS_INVALID_INDEX);
4336 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4337 mgmt_adv_monitor_removed(sk, hdev, handle);
4339 hci_dev_unlock(hdev);
4341 rp.monitor_handle = cp->monitor_handle;
4343 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4344 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4347 hci_dev_unlock(hdev);
4351 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4352 u16 opcode, struct sk_buff *skb)
4354 struct mgmt_rp_read_local_oob_data mgmt_rp;
4355 size_t rp_size = sizeof(mgmt_rp);
4356 struct mgmt_pending_cmd *cmd;
4358 bt_dev_dbg(hdev, "status %u", status);
4360 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4364 if (status || !skb) {
4365 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4366 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4370 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4372 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4373 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4375 if (skb->len < sizeof(*rp)) {
4376 mgmt_cmd_status(cmd->sk, hdev->id,
4377 MGMT_OP_READ_LOCAL_OOB_DATA,
4378 MGMT_STATUS_FAILED);
4382 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4383 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4385 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4387 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4389 if (skb->len < sizeof(*rp)) {
4390 mgmt_cmd_status(cmd->sk, hdev->id,
4391 MGMT_OP_READ_LOCAL_OOB_DATA,
4392 MGMT_STATUS_FAILED);
4396 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4397 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4399 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4400 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4403 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4404 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4407 mgmt_pending_remove(cmd);
4410 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4411 void *data, u16 data_len)
4413 struct mgmt_pending_cmd *cmd;
4414 struct hci_request req;
4417 bt_dev_dbg(hdev, "sock %p", sk);
4421 if (!hdev_is_powered(hdev)) {
4422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4423 MGMT_STATUS_NOT_POWERED);
4427 if (!lmp_ssp_capable(hdev)) {
4428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4429 MGMT_STATUS_NOT_SUPPORTED);
4433 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4434 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4439 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4445 hci_req_init(&req, hdev);
4447 if (bredr_sc_enabled(hdev))
4448 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4450 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4452 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4454 mgmt_pending_remove(cmd);
4457 hci_dev_unlock(hdev);
4461 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4462 void *data, u16 len)
4464 struct mgmt_addr_info *addr = data;
4467 bt_dev_dbg(hdev, "sock %p", sk);
4469 if (!bdaddr_type_is_valid(addr->type))
4470 return mgmt_cmd_complete(sk, hdev->id,
4471 MGMT_OP_ADD_REMOTE_OOB_DATA,
4472 MGMT_STATUS_INVALID_PARAMS,
4473 addr, sizeof(*addr));
4477 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4478 struct mgmt_cp_add_remote_oob_data *cp = data;
4481 if (cp->addr.type != BDADDR_BREDR) {
4482 err = mgmt_cmd_complete(sk, hdev->id,
4483 MGMT_OP_ADD_REMOTE_OOB_DATA,
4484 MGMT_STATUS_INVALID_PARAMS,
4485 &cp->addr, sizeof(cp->addr));
4489 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4490 cp->addr.type, cp->hash,
4491 cp->rand, NULL, NULL);
4493 status = MGMT_STATUS_FAILED;
4495 status = MGMT_STATUS_SUCCESS;
4497 err = mgmt_cmd_complete(sk, hdev->id,
4498 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4499 &cp->addr, sizeof(cp->addr));
4500 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4501 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4502 u8 *rand192, *hash192, *rand256, *hash256;
4505 if (bdaddr_type_is_le(cp->addr.type)) {
4506 /* Enforce zero-valued 192-bit parameters as
4507 * long as legacy SMP OOB isn't implemented.
4509 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4510 memcmp(cp->hash192, ZERO_KEY, 16)) {
4511 err = mgmt_cmd_complete(sk, hdev->id,
4512 MGMT_OP_ADD_REMOTE_OOB_DATA,
4513 MGMT_STATUS_INVALID_PARAMS,
4514 addr, sizeof(*addr));
4521 /* In case one of the P-192 values is set to zero,
4522 * then just disable OOB data for P-192.
4524 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4525 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4529 rand192 = cp->rand192;
4530 hash192 = cp->hash192;
4534 /* In case one of the P-256 values is set to zero, then just
4535 * disable OOB data for P-256.
4537 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4538 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4542 rand256 = cp->rand256;
4543 hash256 = cp->hash256;
4546 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4547 cp->addr.type, hash192, rand192,
4550 status = MGMT_STATUS_FAILED;
4552 status = MGMT_STATUS_SUCCESS;
4554 err = mgmt_cmd_complete(sk, hdev->id,
4555 MGMT_OP_ADD_REMOTE_OOB_DATA,
4556 status, &cp->addr, sizeof(cp->addr));
4558 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4560 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4561 MGMT_STATUS_INVALID_PARAMS);
4565 hci_dev_unlock(hdev);
4569 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4570 void *data, u16 len)
4572 struct mgmt_cp_remove_remote_oob_data *cp = data;
4576 bt_dev_dbg(hdev, "sock %p", sk);
4578 if (cp->addr.type != BDADDR_BREDR)
4579 return mgmt_cmd_complete(sk, hdev->id,
4580 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4581 MGMT_STATUS_INVALID_PARAMS,
4582 &cp->addr, sizeof(cp->addr));
4586 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4587 hci_remote_oob_data_clear(hdev);
4588 status = MGMT_STATUS_SUCCESS;
4592 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4594 status = MGMT_STATUS_INVALID_PARAMS;
4596 status = MGMT_STATUS_SUCCESS;
4599 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4600 status, &cp->addr, sizeof(cp->addr));
4602 hci_dev_unlock(hdev);
4606 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4608 struct mgmt_pending_cmd *cmd;
4610 bt_dev_dbg(hdev, "status %d", status);
4614 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4616 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4619 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4622 cmd->cmd_complete(cmd, mgmt_status(status));
4623 mgmt_pending_remove(cmd);
4626 hci_dev_unlock(hdev);
4628 /* Handle suspend notifier */
4629 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4630 hdev->suspend_tasks)) {
4631 bt_dev_dbg(hdev, "Unpaused discovery");
4632 wake_up(&hdev->suspend_wait_q);
4636 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4637 uint8_t *mgmt_status)
4640 case DISCOV_TYPE_LE:
4641 *mgmt_status = mgmt_le_support(hdev);
4645 case DISCOV_TYPE_INTERLEAVED:
4646 *mgmt_status = mgmt_le_support(hdev);
4650 case DISCOV_TYPE_BREDR:
4651 *mgmt_status = mgmt_bredr_support(hdev);
4656 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4663 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4664 u16 op, void *data, u16 len)
4666 struct mgmt_cp_start_discovery *cp = data;
4667 struct mgmt_pending_cmd *cmd;
4671 bt_dev_dbg(hdev, "sock %p", sk);
4675 if (!hdev_is_powered(hdev)) {
4676 err = mgmt_cmd_complete(sk, hdev->id, op,
4677 MGMT_STATUS_NOT_POWERED,
4678 &cp->type, sizeof(cp->type));
4682 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4683 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4684 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4685 &cp->type, sizeof(cp->type));
4689 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4690 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4691 &cp->type, sizeof(cp->type));
4695 /* Can't start discovery when it is paused */
4696 if (hdev->discovery_paused) {
4697 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4698 &cp->type, sizeof(cp->type));
4702 /* Clear the discovery filter first to free any previously
4703 * allocated memory for the UUID list.
4705 hci_discovery_filter_clear(hdev);
4707 hdev->discovery.type = cp->type;
4708 hdev->discovery.report_invalid_rssi = false;
4709 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4710 hdev->discovery.limited = true;
4712 hdev->discovery.limited = false;
4714 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4720 cmd->cmd_complete = generic_cmd_complete;
4722 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4723 queue_work(hdev->req_workqueue, &hdev->discov_update);
4727 hci_dev_unlock(hdev);
4731 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4732 void *data, u16 len)
4734 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4738 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4739 void *data, u16 len)
4741 return start_discovery_internal(sk, hdev,
4742 MGMT_OP_START_LIMITED_DISCOVERY,
4746 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4749 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4753 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4754 void *data, u16 len)
4756 struct mgmt_cp_start_service_discovery *cp = data;
4757 struct mgmt_pending_cmd *cmd;
4758 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4759 u16 uuid_count, expected_len;
4763 bt_dev_dbg(hdev, "sock %p", sk);
4767 if (!hdev_is_powered(hdev)) {
4768 err = mgmt_cmd_complete(sk, hdev->id,
4769 MGMT_OP_START_SERVICE_DISCOVERY,
4770 MGMT_STATUS_NOT_POWERED,
4771 &cp->type, sizeof(cp->type));
4775 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4776 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4777 err = mgmt_cmd_complete(sk, hdev->id,
4778 MGMT_OP_START_SERVICE_DISCOVERY,
4779 MGMT_STATUS_BUSY, &cp->type,
4784 uuid_count = __le16_to_cpu(cp->uuid_count);
4785 if (uuid_count > max_uuid_count) {
4786 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4788 err = mgmt_cmd_complete(sk, hdev->id,
4789 MGMT_OP_START_SERVICE_DISCOVERY,
4790 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4795 expected_len = sizeof(*cp) + uuid_count * 16;
4796 if (expected_len != len) {
4797 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4799 err = mgmt_cmd_complete(sk, hdev->id,
4800 MGMT_OP_START_SERVICE_DISCOVERY,
4801 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4806 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4807 err = mgmt_cmd_complete(sk, hdev->id,
4808 MGMT_OP_START_SERVICE_DISCOVERY,
4809 status, &cp->type, sizeof(cp->type));
4813 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4820 cmd->cmd_complete = service_discovery_cmd_complete;
4822 /* Clear the discovery filter first to free any previously
4823 * allocated memory for the UUID list.
4825 hci_discovery_filter_clear(hdev);
4827 hdev->discovery.result_filtering = true;
4828 hdev->discovery.type = cp->type;
4829 hdev->discovery.rssi = cp->rssi;
4830 hdev->discovery.uuid_count = uuid_count;
4832 if (uuid_count > 0) {
4833 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4835 if (!hdev->discovery.uuids) {
4836 err = mgmt_cmd_complete(sk, hdev->id,
4837 MGMT_OP_START_SERVICE_DISCOVERY,
4839 &cp->type, sizeof(cp->type));
4840 mgmt_pending_remove(cmd);
4845 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4846 queue_work(hdev->req_workqueue, &hdev->discov_update);
4850 hci_dev_unlock(hdev);
4854 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4856 struct mgmt_pending_cmd *cmd;
4858 bt_dev_dbg(hdev, "status %d", status);
4862 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4864 cmd->cmd_complete(cmd, mgmt_status(status));
4865 mgmt_pending_remove(cmd);
4868 hci_dev_unlock(hdev);
4870 /* Handle suspend notifier */
4871 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4872 bt_dev_dbg(hdev, "Paused discovery");
4873 wake_up(&hdev->suspend_wait_q);
4877 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4880 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4881 struct mgmt_pending_cmd *cmd;
4884 bt_dev_dbg(hdev, "sock %p", sk);
4888 if (!hci_discovery_active(hdev)) {
4889 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4890 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4891 sizeof(mgmt_cp->type));
4895 if (hdev->discovery.type != mgmt_cp->type) {
4896 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4897 MGMT_STATUS_INVALID_PARAMS,
4898 &mgmt_cp->type, sizeof(mgmt_cp->type));
4902 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4908 cmd->cmd_complete = generic_cmd_complete;
4910 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4911 queue_work(hdev->req_workqueue, &hdev->discov_update);
4915 hci_dev_unlock(hdev);
4919 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4922 struct mgmt_cp_confirm_name *cp = data;
4923 struct inquiry_entry *e;
4926 bt_dev_dbg(hdev, "sock %p", sk);
4930 if (!hci_discovery_active(hdev)) {
4931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4932 MGMT_STATUS_FAILED, &cp->addr,
4937 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4940 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4945 if (cp->name_known) {
4946 e->name_state = NAME_KNOWN;
4949 e->name_state = NAME_NEEDED;
4950 hci_inquiry_cache_update_resolve(hdev, e);
4953 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4954 &cp->addr, sizeof(cp->addr));
4957 hci_dev_unlock(hdev);
4961 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4964 struct mgmt_cp_block_device *cp = data;
4968 bt_dev_dbg(hdev, "sock %p", sk);
4970 if (!bdaddr_type_is_valid(cp->addr.type))
4971 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4972 MGMT_STATUS_INVALID_PARAMS,
4973 &cp->addr, sizeof(cp->addr));
4977 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4980 status = MGMT_STATUS_FAILED;
4984 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4986 status = MGMT_STATUS_SUCCESS;
4989 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4990 &cp->addr, sizeof(cp->addr));
4992 hci_dev_unlock(hdev);
4997 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5000 struct mgmt_cp_unblock_device *cp = data;
5004 bt_dev_dbg(hdev, "sock %p", sk);
5006 if (!bdaddr_type_is_valid(cp->addr.type))
5007 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5008 MGMT_STATUS_INVALID_PARAMS,
5009 &cp->addr, sizeof(cp->addr));
5013 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5016 status = MGMT_STATUS_INVALID_PARAMS;
5020 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5022 status = MGMT_STATUS_SUCCESS;
5025 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5026 &cp->addr, sizeof(cp->addr));
5028 hci_dev_unlock(hdev);
5033 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5036 struct mgmt_cp_set_device_id *cp = data;
5037 struct hci_request req;
5041 bt_dev_dbg(hdev, "sock %p", sk);
5043 source = __le16_to_cpu(cp->source);
5045 if (source > 0x0002)
5046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5047 MGMT_STATUS_INVALID_PARAMS);
5051 hdev->devid_source = source;
5052 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5053 hdev->devid_product = __le16_to_cpu(cp->product);
5054 hdev->devid_version = __le16_to_cpu(cp->version);
5056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5059 hci_req_init(&req, hdev);
5060 __hci_req_update_eir(&req);
5061 hci_req_run(&req, NULL);
5063 hci_dev_unlock(hdev);
5068 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5071 bt_dev_dbg(hdev, "status %d", status);
5074 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5077 struct cmd_lookup match = { NULL, hdev };
5078 struct hci_request req;
5080 struct adv_info *adv_instance;
5086 u8 mgmt_err = mgmt_status(status);
5088 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5089 cmd_status_rsp, &mgmt_err);
5093 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5094 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5096 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5098 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5101 new_settings(hdev, match.sk);
5106 /* Handle suspend notifier */
5107 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5108 hdev->suspend_tasks)) {
5109 bt_dev_dbg(hdev, "Paused advertising");
5110 wake_up(&hdev->suspend_wait_q);
5111 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5112 hdev->suspend_tasks)) {
5113 bt_dev_dbg(hdev, "Unpaused advertising");
5114 wake_up(&hdev->suspend_wait_q);
5117 /* If "Set Advertising" was just disabled and instance advertising was
5118 * set up earlier, then re-enable multi-instance advertising.
5120 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5121 list_empty(&hdev->adv_instances))
5124 instance = hdev->cur_adv_instance;
5126 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5127 struct adv_info, list);
5131 instance = adv_instance->instance;
5134 hci_req_init(&req, hdev);
5136 err = __hci_req_schedule_adv_instance(&req, instance, true);
5139 err = hci_req_run(&req, enable_advertising_instance);
5142 bt_dev_err(hdev, "failed to re-configure advertising");
5145 hci_dev_unlock(hdev);
5148 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5151 struct mgmt_mode *cp = data;
5152 struct mgmt_pending_cmd *cmd;
5153 struct hci_request req;
5157 bt_dev_dbg(hdev, "sock %p", sk);
5159 status = mgmt_le_support(hdev);
5161 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5164 /* Enabling the experimental LL Privay support disables support for
5167 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5168 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5169 MGMT_STATUS_NOT_SUPPORTED);
5171 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5173 MGMT_STATUS_INVALID_PARAMS);
5175 if (hdev->advertising_paused)
5176 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5183 /* The following conditions are ones which mean that we should
5184 * not do any HCI communication but directly send a mgmt
5185 * response to user space (after toggling the flag if
5188 if (!hdev_is_powered(hdev) ||
5189 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5190 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5191 hci_conn_num(hdev, LE_LINK) > 0 ||
5192 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5193 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5197 hdev->cur_adv_instance = 0x00;
5198 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5199 if (cp->val == 0x02)
5200 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5202 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5204 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5205 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5208 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5213 err = new_settings(hdev, sk);
5218 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5219 pending_find(MGMT_OP_SET_LE, hdev)) {
5220 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5225 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5231 hci_req_init(&req, hdev);
5233 if (cp->val == 0x02)
5234 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5236 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5238 cancel_adv_timeout(hdev);
5241 /* Switch to instance "0" for the Set Advertising setting.
5242 * We cannot use update_[adv|scan_rsp]_data() here as the
5243 * HCI_ADVERTISING flag is not yet set.
5245 hdev->cur_adv_instance = 0x00;
5247 if (ext_adv_capable(hdev)) {
5248 __hci_req_start_ext_adv(&req, 0x00);
5250 __hci_req_update_adv_data(&req, 0x00);
5251 __hci_req_update_scan_rsp_data(&req, 0x00);
5252 __hci_req_enable_advertising(&req);
5255 __hci_req_disable_advertising(&req);
5258 err = hci_req_run(&req, set_advertising_complete);
5260 mgmt_pending_remove(cmd);
5263 hci_dev_unlock(hdev);
5267 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5268 void *data, u16 len)
5270 struct mgmt_cp_set_static_address *cp = data;
5273 bt_dev_dbg(hdev, "sock %p", sk);
5275 if (!lmp_le_capable(hdev))
5276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5277 MGMT_STATUS_NOT_SUPPORTED);
5279 if (hdev_is_powered(hdev))
5280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5281 MGMT_STATUS_REJECTED);
5283 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5284 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5285 return mgmt_cmd_status(sk, hdev->id,
5286 MGMT_OP_SET_STATIC_ADDRESS,
5287 MGMT_STATUS_INVALID_PARAMS);
5289 /* Two most significant bits shall be set */
5290 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5291 return mgmt_cmd_status(sk, hdev->id,
5292 MGMT_OP_SET_STATIC_ADDRESS,
5293 MGMT_STATUS_INVALID_PARAMS);
5298 bacpy(&hdev->static_addr, &cp->bdaddr);
5300 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5304 err = new_settings(hdev, sk);
5307 hci_dev_unlock(hdev);
5311 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5312 void *data, u16 len)
5314 struct mgmt_cp_set_scan_params *cp = data;
5315 __u16 interval, window;
5318 bt_dev_dbg(hdev, "sock %p", sk);
5320 if (!lmp_le_capable(hdev))
5321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5322 MGMT_STATUS_NOT_SUPPORTED);
5324 interval = __le16_to_cpu(cp->interval);
5326 if (interval < 0x0004 || interval > 0x4000)
5327 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5328 MGMT_STATUS_INVALID_PARAMS);
5330 window = __le16_to_cpu(cp->window);
5332 if (window < 0x0004 || window > 0x4000)
5333 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5334 MGMT_STATUS_INVALID_PARAMS);
5336 if (window > interval)
5337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5338 MGMT_STATUS_INVALID_PARAMS);
5342 hdev->le_scan_interval = interval;
5343 hdev->le_scan_window = window;
5345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5348 /* If background scan is running, restart it so new parameters are
5351 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5352 hdev->discovery.state == DISCOVERY_STOPPED) {
5353 struct hci_request req;
5355 hci_req_init(&req, hdev);
5357 hci_req_add_le_scan_disable(&req, false);
5358 hci_req_add_le_passive_scan(&req);
5360 hci_req_run(&req, NULL);
5363 hci_dev_unlock(hdev);
5368 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5371 struct mgmt_pending_cmd *cmd;
5373 bt_dev_dbg(hdev, "status 0x%02x", status);
5377 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5382 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5383 mgmt_status(status));
5385 struct mgmt_mode *cp = cmd->param;
5388 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5390 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5392 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5393 new_settings(hdev, cmd->sk);
5396 mgmt_pending_remove(cmd);
5399 hci_dev_unlock(hdev);
5402 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5403 void *data, u16 len)
5405 struct mgmt_mode *cp = data;
5406 struct mgmt_pending_cmd *cmd;
5407 struct hci_request req;
5410 bt_dev_dbg(hdev, "sock %p", sk);
5412 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5413 hdev->hci_ver < BLUETOOTH_VER_1_2)
5414 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5415 MGMT_STATUS_NOT_SUPPORTED);
5417 if (cp->val != 0x00 && cp->val != 0x01)
5418 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5419 MGMT_STATUS_INVALID_PARAMS);
5423 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5429 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5430 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5435 if (!hdev_is_powered(hdev)) {
5436 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5437 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5439 new_settings(hdev, sk);
5443 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5450 hci_req_init(&req, hdev);
5452 __hci_req_write_fast_connectable(&req, cp->val);
5454 err = hci_req_run(&req, fast_connectable_complete);
5456 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5457 MGMT_STATUS_FAILED);
5458 mgmt_pending_remove(cmd);
5462 hci_dev_unlock(hdev);
5467 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5469 struct mgmt_pending_cmd *cmd;
5471 bt_dev_dbg(hdev, "status 0x%02x", status);
5475 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5480 u8 mgmt_err = mgmt_status(status);
5482 /* We need to restore the flag if related HCI commands
5485 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5487 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5489 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5490 new_settings(hdev, cmd->sk);
5493 mgmt_pending_remove(cmd);
5496 hci_dev_unlock(hdev);
5499 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5501 struct mgmt_mode *cp = data;
5502 struct mgmt_pending_cmd *cmd;
5503 struct hci_request req;
5506 bt_dev_dbg(hdev, "sock %p", sk);
5508 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5509 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5510 MGMT_STATUS_NOT_SUPPORTED);
5512 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5513 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5514 MGMT_STATUS_REJECTED);
5516 if (cp->val != 0x00 && cp->val != 0x01)
5517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5518 MGMT_STATUS_INVALID_PARAMS);
5522 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5523 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5527 if (!hdev_is_powered(hdev)) {
5529 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5530 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5531 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5532 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5533 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5536 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5538 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5542 err = new_settings(hdev, sk);
5546 /* Reject disabling when powered on */
5548 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5549 MGMT_STATUS_REJECTED);
5552 /* When configuring a dual-mode controller to operate
5553 * with LE only and using a static address, then switching
5554 * BR/EDR back on is not allowed.
5556 * Dual-mode controllers shall operate with the public
5557 * address as its identity address for BR/EDR and LE. So
5558 * reject the attempt to create an invalid configuration.
5560 * The same restrictions applies when secure connections
5561 * has been enabled. For BR/EDR this is a controller feature
5562 * while for LE it is a host stack feature. This means that
5563 * switching BR/EDR back on when secure connections has been
5564 * enabled is not a supported transaction.
5566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5567 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5568 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5569 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5570 MGMT_STATUS_REJECTED);
5575 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5581 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5587 /* We need to flip the bit already here so that
5588 * hci_req_update_adv_data generates the correct flags.
5590 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5592 hci_req_init(&req, hdev);
5594 __hci_req_write_fast_connectable(&req, false);
5595 __hci_req_update_scan(&req);
5597 /* Since only the advertising data flags will change, there
5598 * is no need to update the scan response data.
5600 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5602 err = hci_req_run(&req, set_bredr_complete);
5604 mgmt_pending_remove(cmd);
5607 hci_dev_unlock(hdev);
5611 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5613 struct mgmt_pending_cmd *cmd;
5614 struct mgmt_mode *cp;
5616 bt_dev_dbg(hdev, "status %u", status);
5620 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5625 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5626 mgmt_status(status));
5634 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5635 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5638 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5639 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5642 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5643 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5647 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5648 new_settings(hdev, cmd->sk);
5651 mgmt_pending_remove(cmd);
5653 hci_dev_unlock(hdev);
5656 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5657 void *data, u16 len)
5659 struct mgmt_mode *cp = data;
5660 struct mgmt_pending_cmd *cmd;
5661 struct hci_request req;
5665 bt_dev_dbg(hdev, "sock %p", sk);
5667 if (!lmp_sc_capable(hdev) &&
5668 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5670 MGMT_STATUS_NOT_SUPPORTED);
5672 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5673 lmp_sc_capable(hdev) &&
5674 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5675 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5676 MGMT_STATUS_REJECTED);
5678 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5679 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5680 MGMT_STATUS_INVALID_PARAMS);
5684 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5685 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5689 changed = !hci_dev_test_and_set_flag(hdev,
5691 if (cp->val == 0x02)
5692 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5694 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5696 changed = hci_dev_test_and_clear_flag(hdev,
5698 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5701 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5706 err = new_settings(hdev, sk);
5711 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5712 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5719 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5720 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5721 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5725 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5731 hci_req_init(&req, hdev);
5732 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5733 err = hci_req_run(&req, sc_enable_complete);
5735 mgmt_pending_remove(cmd);
5740 hci_dev_unlock(hdev);
5744 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5745 void *data, u16 len)
5747 struct mgmt_mode *cp = data;
5748 bool changed, use_changed;
5751 bt_dev_dbg(hdev, "sock %p", sk);
5753 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5755 MGMT_STATUS_INVALID_PARAMS);
5760 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5762 changed = hci_dev_test_and_clear_flag(hdev,
5763 HCI_KEEP_DEBUG_KEYS);
5765 if (cp->val == 0x02)
5766 use_changed = !hci_dev_test_and_set_flag(hdev,
5767 HCI_USE_DEBUG_KEYS);
5769 use_changed = hci_dev_test_and_clear_flag(hdev,
5770 HCI_USE_DEBUG_KEYS);
5772 if (hdev_is_powered(hdev) && use_changed &&
5773 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5774 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5775 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5776 sizeof(mode), &mode);
5779 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5784 err = new_settings(hdev, sk);
5787 hci_dev_unlock(hdev);
5791 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5794 struct mgmt_cp_set_privacy *cp = cp_data;
5798 bt_dev_dbg(hdev, "sock %p", sk);
5800 if (!lmp_le_capable(hdev))
5801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5802 MGMT_STATUS_NOT_SUPPORTED);
5804 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5806 MGMT_STATUS_INVALID_PARAMS);
5808 if (hdev_is_powered(hdev))
5809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5810 MGMT_STATUS_REJECTED);
5814 /* If user space supports this command it is also expected to
5815 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5817 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5820 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5821 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5822 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5823 hci_adv_instances_set_rpa_expired(hdev, true);
5824 if (cp->privacy == 0x02)
5825 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5827 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5829 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5830 memset(hdev->irk, 0, sizeof(hdev->irk));
5831 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5832 hci_adv_instances_set_rpa_expired(hdev, false);
5833 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5836 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5841 err = new_settings(hdev, sk);
5844 hci_dev_unlock(hdev);
5848 static bool irk_is_valid(struct mgmt_irk_info *irk)
5850 switch (irk->addr.type) {
5851 case BDADDR_LE_PUBLIC:
5854 case BDADDR_LE_RANDOM:
5855 /* Two most significant bits shall be set */
5856 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5864 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5867 struct mgmt_cp_load_irks *cp = cp_data;
5868 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5869 sizeof(struct mgmt_irk_info));
5870 u16 irk_count, expected_len;
5873 bt_dev_dbg(hdev, "sock %p", sk);
5875 if (!lmp_le_capable(hdev))
5876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5877 MGMT_STATUS_NOT_SUPPORTED);
5879 irk_count = __le16_to_cpu(cp->irk_count);
5880 if (irk_count > max_irk_count) {
5881 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5884 MGMT_STATUS_INVALID_PARAMS);
5887 expected_len = struct_size(cp, irks, irk_count);
5888 if (expected_len != len) {
5889 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5892 MGMT_STATUS_INVALID_PARAMS);
5895 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5897 for (i = 0; i < irk_count; i++) {
5898 struct mgmt_irk_info *key = &cp->irks[i];
5900 if (!irk_is_valid(key))
5901 return mgmt_cmd_status(sk, hdev->id,
5903 MGMT_STATUS_INVALID_PARAMS);
5908 hci_smp_irks_clear(hdev);
5910 for (i = 0; i < irk_count; i++) {
5911 struct mgmt_irk_info *irk = &cp->irks[i];
5913 if (hci_is_blocked_key(hdev,
5914 HCI_BLOCKED_KEY_TYPE_IRK,
5916 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5921 hci_add_irk(hdev, &irk->addr.bdaddr,
5922 le_addr_type(irk->addr.type), irk->val,
5926 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5930 hci_dev_unlock(hdev);
5935 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5937 if (key->master != 0x00 && key->master != 0x01)
5940 switch (key->addr.type) {
5941 case BDADDR_LE_PUBLIC:
5944 case BDADDR_LE_RANDOM:
5945 /* Two most significant bits shall be set */
5946 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5954 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5955 void *cp_data, u16 len)
5957 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5958 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5959 sizeof(struct mgmt_ltk_info));
5960 u16 key_count, expected_len;
5963 bt_dev_dbg(hdev, "sock %p", sk);
5965 if (!lmp_le_capable(hdev))
5966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5967 MGMT_STATUS_NOT_SUPPORTED);
5969 key_count = __le16_to_cpu(cp->key_count);
5970 if (key_count > max_key_count) {
5971 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5974 MGMT_STATUS_INVALID_PARAMS);
5977 expected_len = struct_size(cp, keys, key_count);
5978 if (expected_len != len) {
5979 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5982 MGMT_STATUS_INVALID_PARAMS);
5985 bt_dev_dbg(hdev, "key_count %u", key_count);
5987 for (i = 0; i < key_count; i++) {
5988 struct mgmt_ltk_info *key = &cp->keys[i];
5990 if (!ltk_is_valid(key))
5991 return mgmt_cmd_status(sk, hdev->id,
5992 MGMT_OP_LOAD_LONG_TERM_KEYS,
5993 MGMT_STATUS_INVALID_PARAMS);
5998 hci_smp_ltks_clear(hdev);
6000 for (i = 0; i < key_count; i++) {
6001 struct mgmt_ltk_info *key = &cp->keys[i];
6002 u8 type, authenticated;
6004 if (hci_is_blocked_key(hdev,
6005 HCI_BLOCKED_KEY_TYPE_LTK,
6007 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6012 switch (key->type) {
6013 case MGMT_LTK_UNAUTHENTICATED:
6014 authenticated = 0x00;
6015 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6017 case MGMT_LTK_AUTHENTICATED:
6018 authenticated = 0x01;
6019 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6021 case MGMT_LTK_P256_UNAUTH:
6022 authenticated = 0x00;
6023 type = SMP_LTK_P256;
6025 case MGMT_LTK_P256_AUTH:
6026 authenticated = 0x01;
6027 type = SMP_LTK_P256;
6029 case MGMT_LTK_P256_DEBUG:
6030 authenticated = 0x00;
6031 type = SMP_LTK_P256_DEBUG;
6037 hci_add_ltk(hdev, &key->addr.bdaddr,
6038 le_addr_type(key->addr.type), type, authenticated,
6039 key->val, key->enc_size, key->ediv, key->rand);
6042 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6045 hci_dev_unlock(hdev);
6050 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6052 struct hci_conn *conn = cmd->user_data;
6053 struct mgmt_rp_get_conn_info rp;
6056 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6058 if (status == MGMT_STATUS_SUCCESS) {
6059 rp.rssi = conn->rssi;
6060 rp.tx_power = conn->tx_power;
6061 rp.max_tx_power = conn->max_tx_power;
6063 rp.rssi = HCI_RSSI_INVALID;
6064 rp.tx_power = HCI_TX_POWER_INVALID;
6065 rp.max_tx_power = HCI_TX_POWER_INVALID;
6068 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6069 status, &rp, sizeof(rp));
6071 hci_conn_drop(conn);
6077 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6080 struct hci_cp_read_rssi *cp;
6081 struct mgmt_pending_cmd *cmd;
6082 struct hci_conn *conn;
6086 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6090 /* Commands sent in request are either Read RSSI or Read Transmit Power
6091 * Level so we check which one was last sent to retrieve connection
6092 * handle. Both commands have handle as first parameter so it's safe to
6093 * cast data on the same command struct.
6095 * First command sent is always Read RSSI and we fail only if it fails.
6096 * In other case we simply override error to indicate success as we
6097 * already remembered if TX power value is actually valid.
6099 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6101 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6102 status = MGMT_STATUS_SUCCESS;
6104 status = mgmt_status(hci_status);
6108 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6112 handle = __le16_to_cpu(cp->handle);
6113 conn = hci_conn_hash_lookup_handle(hdev, handle);
6115 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6120 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6124 cmd->cmd_complete(cmd, status);
6125 mgmt_pending_remove(cmd);
6128 hci_dev_unlock(hdev);
6131 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6134 struct mgmt_cp_get_conn_info *cp = data;
6135 struct mgmt_rp_get_conn_info rp;
6136 struct hci_conn *conn;
6137 unsigned long conn_info_age;
6140 bt_dev_dbg(hdev, "sock %p", sk);
6142 memset(&rp, 0, sizeof(rp));
6143 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6144 rp.addr.type = cp->addr.type;
6146 if (!bdaddr_type_is_valid(cp->addr.type))
6147 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6148 MGMT_STATUS_INVALID_PARAMS,
6153 if (!hdev_is_powered(hdev)) {
6154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6155 MGMT_STATUS_NOT_POWERED, &rp,
6160 if (cp->addr.type == BDADDR_BREDR)
6161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6164 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6166 if (!conn || conn->state != BT_CONNECTED) {
6167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6168 MGMT_STATUS_NOT_CONNECTED, &rp,
6173 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6175 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6179 /* To avoid client trying to guess when to poll again for information we
6180 * calculate conn info age as random value between min/max set in hdev.
6182 conn_info_age = hdev->conn_info_min_age +
6183 prandom_u32_max(hdev->conn_info_max_age -
6184 hdev->conn_info_min_age);
6186 /* Query controller to refresh cached values if they are too old or were
6189 if (time_after(jiffies, conn->conn_info_timestamp +
6190 msecs_to_jiffies(conn_info_age)) ||
6191 !conn->conn_info_timestamp) {
6192 struct hci_request req;
6193 struct hci_cp_read_tx_power req_txp_cp;
6194 struct hci_cp_read_rssi req_rssi_cp;
6195 struct mgmt_pending_cmd *cmd;
6197 hci_req_init(&req, hdev);
6198 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6199 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6202 /* For LE links TX power does not change thus we don't need to
6203 * query for it once value is known.
6205 if (!bdaddr_type_is_le(cp->addr.type) ||
6206 conn->tx_power == HCI_TX_POWER_INVALID) {
6207 req_txp_cp.handle = cpu_to_le16(conn->handle);
6208 req_txp_cp.type = 0x00;
6209 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6210 sizeof(req_txp_cp), &req_txp_cp);
6213 /* Max TX power needs to be read only once per connection */
6214 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6215 req_txp_cp.handle = cpu_to_le16(conn->handle);
6216 req_txp_cp.type = 0x01;
6217 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6218 sizeof(req_txp_cp), &req_txp_cp);
6221 err = hci_req_run(&req, conn_info_refresh_complete);
6225 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6232 hci_conn_hold(conn);
6233 cmd->user_data = hci_conn_get(conn);
6234 cmd->cmd_complete = conn_info_cmd_complete;
6236 conn->conn_info_timestamp = jiffies;
6238 /* Cache is valid, just reply with values cached in hci_conn */
6239 rp.rssi = conn->rssi;
6240 rp.tx_power = conn->tx_power;
6241 rp.max_tx_power = conn->max_tx_power;
6243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6244 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6248 hci_dev_unlock(hdev);
6252 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6254 struct hci_conn *conn = cmd->user_data;
6255 struct mgmt_rp_get_clock_info rp;
6256 struct hci_dev *hdev;
6259 memset(&rp, 0, sizeof(rp));
6260 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6265 hdev = hci_dev_get(cmd->index);
6267 rp.local_clock = cpu_to_le32(hdev->clock);
6272 rp.piconet_clock = cpu_to_le32(conn->clock);
6273 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6277 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6281 hci_conn_drop(conn);
6288 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6290 struct hci_cp_read_clock *hci_cp;
6291 struct mgmt_pending_cmd *cmd;
6292 struct hci_conn *conn;
6294 bt_dev_dbg(hdev, "status %u", status);
6298 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6302 if (hci_cp->which) {
6303 u16 handle = __le16_to_cpu(hci_cp->handle);
6304 conn = hci_conn_hash_lookup_handle(hdev, handle);
6309 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6313 cmd->cmd_complete(cmd, mgmt_status(status));
6314 mgmt_pending_remove(cmd);
6317 hci_dev_unlock(hdev);
6320 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6323 struct mgmt_cp_get_clock_info *cp = data;
6324 struct mgmt_rp_get_clock_info rp;
6325 struct hci_cp_read_clock hci_cp;
6326 struct mgmt_pending_cmd *cmd;
6327 struct hci_request req;
6328 struct hci_conn *conn;
6331 bt_dev_dbg(hdev, "sock %p", sk);
6333 memset(&rp, 0, sizeof(rp));
6334 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6335 rp.addr.type = cp->addr.type;
6337 if (cp->addr.type != BDADDR_BREDR)
6338 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6339 MGMT_STATUS_INVALID_PARAMS,
6344 if (!hdev_is_powered(hdev)) {
6345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6346 MGMT_STATUS_NOT_POWERED, &rp,
6351 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6352 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6354 if (!conn || conn->state != BT_CONNECTED) {
6355 err = mgmt_cmd_complete(sk, hdev->id,
6356 MGMT_OP_GET_CLOCK_INFO,
6357 MGMT_STATUS_NOT_CONNECTED,
6365 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6371 cmd->cmd_complete = clock_info_cmd_complete;
6373 hci_req_init(&req, hdev);
6375 memset(&hci_cp, 0, sizeof(hci_cp));
6376 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6379 hci_conn_hold(conn);
6380 cmd->user_data = hci_conn_get(conn);
6382 hci_cp.handle = cpu_to_le16(conn->handle);
6383 hci_cp.which = 0x01; /* Piconet clock */
6384 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6387 err = hci_req_run(&req, get_clock_info_complete);
6389 mgmt_pending_remove(cmd);
6392 hci_dev_unlock(hdev);
6396 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6398 struct hci_conn *conn;
6400 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6404 if (conn->dst_type != type)
6407 if (conn->state != BT_CONNECTED)
6413 /* This function requires the caller holds hdev->lock */
6414 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6415 u8 addr_type, u8 auto_connect)
6417 struct hci_conn_params *params;
6419 params = hci_conn_params_add(hdev, addr, addr_type);
6423 if (params->auto_connect == auto_connect)
6426 list_del_init(¶ms->action);
6428 switch (auto_connect) {
6429 case HCI_AUTO_CONN_DISABLED:
6430 case HCI_AUTO_CONN_LINK_LOSS:
6431 /* If auto connect is being disabled when we're trying to
6432 * connect to device, keep connecting.
6434 if (params->explicit_connect)
6435 list_add(¶ms->action, &hdev->pend_le_conns);
6437 case HCI_AUTO_CONN_REPORT:
6438 if (params->explicit_connect)
6439 list_add(¶ms->action, &hdev->pend_le_conns);
6441 list_add(¶ms->action, &hdev->pend_le_reports);
6443 case HCI_AUTO_CONN_DIRECT:
6444 case HCI_AUTO_CONN_ALWAYS:
6445 if (!is_connected(hdev, addr, addr_type))
6446 list_add(¶ms->action, &hdev->pend_le_conns);
6450 params->auto_connect = auto_connect;
6452 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6453 addr, addr_type, auto_connect);
6458 static void device_added(struct sock *sk, struct hci_dev *hdev,
6459 bdaddr_t *bdaddr, u8 type, u8 action)
6461 struct mgmt_ev_device_added ev;
6463 bacpy(&ev.addr.bdaddr, bdaddr);
6464 ev.addr.type = type;
6467 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6470 static int add_device(struct sock *sk, struct hci_dev *hdev,
6471 void *data, u16 len)
6473 struct mgmt_cp_add_device *cp = data;
6474 u8 auto_conn, addr_type;
6475 struct hci_conn_params *params;
6477 u32 current_flags = 0;
6479 bt_dev_dbg(hdev, "sock %p", sk);
6481 if (!bdaddr_type_is_valid(cp->addr.type) ||
6482 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6483 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6484 MGMT_STATUS_INVALID_PARAMS,
6485 &cp->addr, sizeof(cp->addr));
6487 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6488 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6489 MGMT_STATUS_INVALID_PARAMS,
6490 &cp->addr, sizeof(cp->addr));
6494 if (cp->addr.type == BDADDR_BREDR) {
6495 /* Only incoming connections action is supported for now */
6496 if (cp->action != 0x01) {
6497 err = mgmt_cmd_complete(sk, hdev->id,
6499 MGMT_STATUS_INVALID_PARAMS,
6500 &cp->addr, sizeof(cp->addr));
6504 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6510 hci_req_update_scan(hdev);
6515 addr_type = le_addr_type(cp->addr.type);
6517 if (cp->action == 0x02)
6518 auto_conn = HCI_AUTO_CONN_ALWAYS;
6519 else if (cp->action == 0x01)
6520 auto_conn = HCI_AUTO_CONN_DIRECT;
6522 auto_conn = HCI_AUTO_CONN_REPORT;
6524 /* Kernel internally uses conn_params with resolvable private
6525 * address, but Add Device allows only identity addresses.
6526 * Make sure it is enforced before calling
6527 * hci_conn_params_lookup.
6529 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6530 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6531 MGMT_STATUS_INVALID_PARAMS,
6532 &cp->addr, sizeof(cp->addr));
6536 /* If the connection parameters don't exist for this device,
6537 * they will be created and configured with defaults.
6539 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6542 MGMT_STATUS_FAILED, &cp->addr,
6546 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6549 current_flags = params->current_flags;
6552 hci_update_background_scan(hdev);
6555 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6556 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6557 SUPPORTED_DEVICE_FLAGS(), current_flags);
6559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6560 MGMT_STATUS_SUCCESS, &cp->addr,
6564 hci_dev_unlock(hdev);
6568 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6569 bdaddr_t *bdaddr, u8 type)
6571 struct mgmt_ev_device_removed ev;
6573 bacpy(&ev.addr.bdaddr, bdaddr);
6574 ev.addr.type = type;
6576 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6579 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6580 void *data, u16 len)
6582 struct mgmt_cp_remove_device *cp = data;
6585 bt_dev_dbg(hdev, "sock %p", sk);
6589 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6590 struct hci_conn_params *params;
6593 if (!bdaddr_type_is_valid(cp->addr.type)) {
6594 err = mgmt_cmd_complete(sk, hdev->id,
6595 MGMT_OP_REMOVE_DEVICE,
6596 MGMT_STATUS_INVALID_PARAMS,
6597 &cp->addr, sizeof(cp->addr));
6601 if (cp->addr.type == BDADDR_BREDR) {
6602 err = hci_bdaddr_list_del(&hdev->whitelist,
6606 err = mgmt_cmd_complete(sk, hdev->id,
6607 MGMT_OP_REMOVE_DEVICE,
6608 MGMT_STATUS_INVALID_PARAMS,
6614 hci_req_update_scan(hdev);
6616 device_removed(sk, hdev, &cp->addr.bdaddr,
6621 addr_type = le_addr_type(cp->addr.type);
6623 /* Kernel internally uses conn_params with resolvable private
6624 * address, but Remove Device allows only identity addresses.
6625 * Make sure it is enforced before calling
6626 * hci_conn_params_lookup.
6628 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6629 err = mgmt_cmd_complete(sk, hdev->id,
6630 MGMT_OP_REMOVE_DEVICE,
6631 MGMT_STATUS_INVALID_PARAMS,
6632 &cp->addr, sizeof(cp->addr));
6636 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6639 err = mgmt_cmd_complete(sk, hdev->id,
6640 MGMT_OP_REMOVE_DEVICE,
6641 MGMT_STATUS_INVALID_PARAMS,
6642 &cp->addr, sizeof(cp->addr));
6646 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6647 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6648 err = mgmt_cmd_complete(sk, hdev->id,
6649 MGMT_OP_REMOVE_DEVICE,
6650 MGMT_STATUS_INVALID_PARAMS,
6651 &cp->addr, sizeof(cp->addr));
6655 list_del(¶ms->action);
6656 list_del(¶ms->list);
6658 hci_update_background_scan(hdev);
6660 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6662 struct hci_conn_params *p, *tmp;
6663 struct bdaddr_list *b, *btmp;
6665 if (cp->addr.type) {
6666 err = mgmt_cmd_complete(sk, hdev->id,
6667 MGMT_OP_REMOVE_DEVICE,
6668 MGMT_STATUS_INVALID_PARAMS,
6669 &cp->addr, sizeof(cp->addr));
6673 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6674 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6679 hci_req_update_scan(hdev);
6681 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6682 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6684 device_removed(sk, hdev, &p->addr, p->addr_type);
6685 if (p->explicit_connect) {
6686 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6689 list_del(&p->action);
6694 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6696 hci_update_background_scan(hdev);
6700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6701 MGMT_STATUS_SUCCESS, &cp->addr,
6704 hci_dev_unlock(hdev);
6708 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6711 struct mgmt_cp_load_conn_param *cp = data;
6712 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6713 sizeof(struct mgmt_conn_param));
6714 u16 param_count, expected_len;
6717 if (!lmp_le_capable(hdev))
6718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6719 MGMT_STATUS_NOT_SUPPORTED);
6721 param_count = __le16_to_cpu(cp->param_count);
6722 if (param_count > max_param_count) {
6723 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6725 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6726 MGMT_STATUS_INVALID_PARAMS);
6729 expected_len = struct_size(cp, params, param_count);
6730 if (expected_len != len) {
6731 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6733 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6734 MGMT_STATUS_INVALID_PARAMS);
6737 bt_dev_dbg(hdev, "param_count %u", param_count);
6741 hci_conn_params_clear_disabled(hdev);
6743 for (i = 0; i < param_count; i++) {
6744 struct mgmt_conn_param *param = &cp->params[i];
6745 struct hci_conn_params *hci_param;
6746 u16 min, max, latency, timeout;
6749 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
6752 if (param->addr.type == BDADDR_LE_PUBLIC) {
6753 addr_type = ADDR_LE_DEV_PUBLIC;
6754 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6755 addr_type = ADDR_LE_DEV_RANDOM;
6757 bt_dev_err(hdev, "ignoring invalid connection parameters");
6761 min = le16_to_cpu(param->min_interval);
6762 max = le16_to_cpu(param->max_interval);
6763 latency = le16_to_cpu(param->latency);
6764 timeout = le16_to_cpu(param->timeout);
6766 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6767 min, max, latency, timeout);
6769 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6770 bt_dev_err(hdev, "ignoring invalid connection parameters");
6774 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
6777 bt_dev_err(hdev, "failed to add connection parameters");
6781 hci_param->conn_min_interval = min;
6782 hci_param->conn_max_interval = max;
6783 hci_param->conn_latency = latency;
6784 hci_param->supervision_timeout = timeout;
6787 hci_dev_unlock(hdev);
6789 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6793 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6794 void *data, u16 len)
6796 struct mgmt_cp_set_external_config *cp = data;
6800 bt_dev_dbg(hdev, "sock %p", sk);
6802 if (hdev_is_powered(hdev))
6803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6804 MGMT_STATUS_REJECTED);
6806 if (cp->config != 0x00 && cp->config != 0x01)
6807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6808 MGMT_STATUS_INVALID_PARAMS);
6810 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6812 MGMT_STATUS_NOT_SUPPORTED);
6817 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6819 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6821 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6828 err = new_options(hdev, sk);
6830 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6831 mgmt_index_removed(hdev);
6833 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6834 hci_dev_set_flag(hdev, HCI_CONFIG);
6835 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6837 queue_work(hdev->req_workqueue, &hdev->power_on);
6839 set_bit(HCI_RAW, &hdev->flags);
6840 mgmt_index_added(hdev);
6845 hci_dev_unlock(hdev);
6849 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6850 void *data, u16 len)
6852 struct mgmt_cp_set_public_address *cp = data;
6856 bt_dev_dbg(hdev, "sock %p", sk);
6858 if (hdev_is_powered(hdev))
6859 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6860 MGMT_STATUS_REJECTED);
6862 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6864 MGMT_STATUS_INVALID_PARAMS);
6866 if (!hdev->set_bdaddr)
6867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6868 MGMT_STATUS_NOT_SUPPORTED);
6872 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6873 bacpy(&hdev->public_addr, &cp->bdaddr);
6875 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6882 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6883 err = new_options(hdev, sk);
6885 if (is_configured(hdev)) {
6886 mgmt_index_removed(hdev);
6888 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6890 hci_dev_set_flag(hdev, HCI_CONFIG);
6891 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6893 queue_work(hdev->req_workqueue, &hdev->power_on);
6897 hci_dev_unlock(hdev);
6901 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6902 u16 opcode, struct sk_buff *skb)
6904 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6905 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6906 u8 *h192, *r192, *h256, *r256;
6907 struct mgmt_pending_cmd *cmd;
6911 bt_dev_dbg(hdev, "status %u", status);
6913 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6917 mgmt_cp = cmd->param;
6920 status = mgmt_status(status);
6927 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6928 struct hci_rp_read_local_oob_data *rp;
6930 if (skb->len != sizeof(*rp)) {
6931 status = MGMT_STATUS_FAILED;
6934 status = MGMT_STATUS_SUCCESS;
6935 rp = (void *)skb->data;
6937 eir_len = 5 + 18 + 18;
6944 struct hci_rp_read_local_oob_ext_data *rp;
6946 if (skb->len != sizeof(*rp)) {
6947 status = MGMT_STATUS_FAILED;
6950 status = MGMT_STATUS_SUCCESS;
6951 rp = (void *)skb->data;
6953 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6954 eir_len = 5 + 18 + 18;
6958 eir_len = 5 + 18 + 18 + 18 + 18;
6968 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6975 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6976 hdev->dev_class, 3);
6979 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6980 EIR_SSP_HASH_C192, h192, 16);
6981 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6982 EIR_SSP_RAND_R192, r192, 16);
6986 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6987 EIR_SSP_HASH_C256, h256, 16);
6988 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6989 EIR_SSP_RAND_R256, r256, 16);
6993 mgmt_rp->type = mgmt_cp->type;
6994 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6996 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6997 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6998 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6999 if (err < 0 || status)
7002 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7004 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7005 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7006 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7009 mgmt_pending_remove(cmd);
7012 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7013 struct mgmt_cp_read_local_oob_ext_data *cp)
7015 struct mgmt_pending_cmd *cmd;
7016 struct hci_request req;
7019 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7024 hci_req_init(&req, hdev);
7026 if (bredr_sc_enabled(hdev))
7027 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7029 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7031 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7033 mgmt_pending_remove(cmd);
7040 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7041 void *data, u16 data_len)
7043 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7044 struct mgmt_rp_read_local_oob_ext_data *rp;
7047 u8 status, flags, role, addr[7], hash[16], rand[16];
7050 bt_dev_dbg(hdev, "sock %p", sk);
7052 if (hdev_is_powered(hdev)) {
7054 case BIT(BDADDR_BREDR):
7055 status = mgmt_bredr_support(hdev);
7061 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7062 status = mgmt_le_support(hdev);
7066 eir_len = 9 + 3 + 18 + 18 + 3;
7069 status = MGMT_STATUS_INVALID_PARAMS;
7074 status = MGMT_STATUS_NOT_POWERED;
7078 rp_len = sizeof(*rp) + eir_len;
7079 rp = kmalloc(rp_len, GFP_ATOMIC);
7090 case BIT(BDADDR_BREDR):
7091 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7092 err = read_local_ssp_oob_req(hdev, sk, cp);
7093 hci_dev_unlock(hdev);
7097 status = MGMT_STATUS_FAILED;
7100 eir_len = eir_append_data(rp->eir, eir_len,
7102 hdev->dev_class, 3);
7105 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7106 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7107 smp_generate_oob(hdev, hash, rand) < 0) {
7108 hci_dev_unlock(hdev);
7109 status = MGMT_STATUS_FAILED;
7113 /* This should return the active RPA, but since the RPA
7114 * is only programmed on demand, it is really hard to fill
7115 * this in at the moment. For now disallow retrieving
7116 * local out-of-band data when privacy is in use.
7118 * Returning the identity address will not help here since
7119 * pairing happens before the identity resolving key is
7120 * known and thus the connection establishment happens
7121 * based on the RPA and not the identity address.
7123 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7124 hci_dev_unlock(hdev);
7125 status = MGMT_STATUS_REJECTED;
7129 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7130 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7131 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7132 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7133 memcpy(addr, &hdev->static_addr, 6);
7136 memcpy(addr, &hdev->bdaddr, 6);
7140 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7141 addr, sizeof(addr));
7143 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7148 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7149 &role, sizeof(role));
7151 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7152 eir_len = eir_append_data(rp->eir, eir_len,
7154 hash, sizeof(hash));
7156 eir_len = eir_append_data(rp->eir, eir_len,
7158 rand, sizeof(rand));
7161 flags = mgmt_get_adv_discov_flags(hdev);
7163 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7164 flags |= LE_AD_NO_BREDR;
7166 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7167 &flags, sizeof(flags));
7171 hci_dev_unlock(hdev);
7173 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7175 status = MGMT_STATUS_SUCCESS;
7178 rp->type = cp->type;
7179 rp->eir_len = cpu_to_le16(eir_len);
7181 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7182 status, rp, sizeof(*rp) + eir_len);
7183 if (err < 0 || status)
7186 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7187 rp, sizeof(*rp) + eir_len,
7188 HCI_MGMT_OOB_DATA_EVENTS, sk);
7196 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7200 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7201 flags |= MGMT_ADV_FLAG_DISCOV;
7202 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7203 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7204 flags |= MGMT_ADV_FLAG_APPEARANCE;
7205 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7207 /* In extended adv TX_POWER returned from Set Adv Param
7208 * will be always valid.
7210 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7211 ext_adv_capable(hdev))
7212 flags |= MGMT_ADV_FLAG_TX_POWER;
7214 if (ext_adv_capable(hdev)) {
7215 flags |= MGMT_ADV_FLAG_SEC_1M;
7216 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7217 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7219 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7220 flags |= MGMT_ADV_FLAG_SEC_2M;
7222 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7223 flags |= MGMT_ADV_FLAG_SEC_CODED;
7229 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7230 void *data, u16 data_len)
7232 struct mgmt_rp_read_adv_features *rp;
7235 struct adv_info *adv_instance;
7236 u32 supported_flags;
7239 bt_dev_dbg(hdev, "sock %p", sk);
7241 if (!lmp_le_capable(hdev))
7242 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7243 MGMT_STATUS_REJECTED);
7245 /* Enabling the experimental LL Privay support disables support for
7248 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7249 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7250 MGMT_STATUS_NOT_SUPPORTED);
7254 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7255 rp = kmalloc(rp_len, GFP_ATOMIC);
7257 hci_dev_unlock(hdev);
7261 supported_flags = get_supported_adv_flags(hdev);
7263 rp->supported_flags = cpu_to_le32(supported_flags);
7264 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7265 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7266 rp->max_instances = hdev->le_num_of_adv_sets;
7267 rp->num_instances = hdev->adv_instance_cnt;
7269 instance = rp->instance;
7270 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7271 *instance = adv_instance->instance;
7275 hci_dev_unlock(hdev);
7277 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7278 MGMT_STATUS_SUCCESS, rp, rp_len);
7285 static u8 calculate_name_len(struct hci_dev *hdev)
7287 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7289 return append_local_name(hdev, buf, 0);
7292 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7295 u8 max_len = HCI_MAX_AD_LENGTH;
7298 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7299 MGMT_ADV_FLAG_LIMITED_DISCOV |
7300 MGMT_ADV_FLAG_MANAGED_FLAGS))
7303 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7306 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7307 max_len -= calculate_name_len(hdev);
7309 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7316 static bool flags_managed(u32 adv_flags)
7318 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7319 MGMT_ADV_FLAG_LIMITED_DISCOV |
7320 MGMT_ADV_FLAG_MANAGED_FLAGS);
7323 static bool tx_power_managed(u32 adv_flags)
7325 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7328 static bool name_managed(u32 adv_flags)
7330 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7333 static bool appearance_managed(u32 adv_flags)
7335 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7338 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7339 u8 len, bool is_adv_data)
7344 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7349 /* Make sure that the data is correctly formatted. */
7350 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7353 if (data[i + 1] == EIR_FLAGS &&
7354 (!is_adv_data || flags_managed(adv_flags)))
7357 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7360 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7363 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7366 if (data[i + 1] == EIR_APPEARANCE &&
7367 appearance_managed(adv_flags))
7370 /* If the current field length would exceed the total data
7371 * length, then it's invalid.
7373 if (i + cur_len >= len)
7380 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7383 struct mgmt_pending_cmd *cmd;
7384 struct mgmt_cp_add_advertising *cp;
7385 struct mgmt_rp_add_advertising rp;
7386 struct adv_info *adv_instance, *n;
7389 bt_dev_dbg(hdev, "status %d", status);
7393 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7395 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7396 if (!adv_instance->pending)
7400 adv_instance->pending = false;
7404 instance = adv_instance->instance;
7406 if (hdev->cur_adv_instance == instance)
7407 cancel_adv_timeout(hdev);
7409 hci_remove_adv_instance(hdev, instance);
7410 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7417 rp.instance = cp->instance;
7420 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7421 mgmt_status(status));
7423 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7424 mgmt_status(status), &rp, sizeof(rp));
7426 mgmt_pending_remove(cmd);
7429 hci_dev_unlock(hdev);
7432 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7433 void *data, u16 data_len)
7435 struct mgmt_cp_add_advertising *cp = data;
7436 struct mgmt_rp_add_advertising rp;
7438 u32 supported_flags, phy_flags;
7440 u16 timeout, duration;
7441 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7442 u8 schedule_instance = 0;
7443 struct adv_info *next_instance;
7445 struct mgmt_pending_cmd *cmd;
7446 struct hci_request req;
7448 bt_dev_dbg(hdev, "sock %p", sk);
7450 status = mgmt_le_support(hdev);
7452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7455 /* Enabling the experimental LL Privay support disables support for
7458 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7459 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7460 MGMT_STATUS_NOT_SUPPORTED);
7462 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7464 MGMT_STATUS_INVALID_PARAMS);
7466 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7468 MGMT_STATUS_INVALID_PARAMS);
7470 flags = __le32_to_cpu(cp->flags);
7471 timeout = __le16_to_cpu(cp->timeout);
7472 duration = __le16_to_cpu(cp->duration);
7474 /* The current implementation only supports a subset of the specified
7475 * flags. Also need to check mutual exclusiveness of sec flags.
7477 supported_flags = get_supported_adv_flags(hdev);
7478 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7479 if (flags & ~supported_flags ||
7480 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7482 MGMT_STATUS_INVALID_PARAMS);
7486 if (timeout && !hdev_is_powered(hdev)) {
7487 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7488 MGMT_STATUS_REJECTED);
7492 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7493 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7494 pending_find(MGMT_OP_SET_LE, hdev)) {
7495 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7500 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7501 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7502 cp->scan_rsp_len, false)) {
7503 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7504 MGMT_STATUS_INVALID_PARAMS);
7508 err = hci_add_adv_instance(hdev, cp->instance, flags,
7509 cp->adv_data_len, cp->data,
7511 cp->data + cp->adv_data_len,
7514 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7515 MGMT_STATUS_FAILED);
7519 /* Only trigger an advertising added event if a new instance was
7522 if (hdev->adv_instance_cnt > prev_instance_cnt)
7523 mgmt_advertising_added(sk, hdev, cp->instance);
7525 if (hdev->cur_adv_instance == cp->instance) {
7526 /* If the currently advertised instance is being changed then
7527 * cancel the current advertising and schedule the next
7528 * instance. If there is only one instance then the overridden
7529 * advertising data will be visible right away.
7531 cancel_adv_timeout(hdev);
7533 next_instance = hci_get_next_instance(hdev, cp->instance);
7535 schedule_instance = next_instance->instance;
7536 } else if (!hdev->adv_instance_timeout) {
7537 /* Immediately advertise the new instance if no other
7538 * instance is currently being advertised.
7540 schedule_instance = cp->instance;
7543 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7544 * there is no instance to be advertised then we have no HCI
7545 * communication to make. Simply return.
7547 if (!hdev_is_powered(hdev) ||
7548 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7549 !schedule_instance) {
7550 rp.instance = cp->instance;
7551 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7552 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7556 /* We're good to go, update advertising data, parameters, and start
7559 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7566 hci_req_init(&req, hdev);
7568 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7571 err = hci_req_run(&req, add_advertising_complete);
7574 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7575 MGMT_STATUS_FAILED);
7576 mgmt_pending_remove(cmd);
7580 hci_dev_unlock(hdev);
7585 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7588 struct mgmt_pending_cmd *cmd;
7589 struct mgmt_cp_remove_advertising *cp;
7590 struct mgmt_rp_remove_advertising rp;
7592 bt_dev_dbg(hdev, "status %d", status);
7596 /* A failure status here only means that we failed to disable
7597 * advertising. Otherwise, the advertising instance has been removed,
7598 * so report success.
7600 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7605 rp.instance = cp->instance;
7607 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7609 mgmt_pending_remove(cmd);
7612 hci_dev_unlock(hdev);
7615 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7616 void *data, u16 data_len)
7618 struct mgmt_cp_remove_advertising *cp = data;
7619 struct mgmt_rp_remove_advertising rp;
7620 struct mgmt_pending_cmd *cmd;
7621 struct hci_request req;
7624 bt_dev_dbg(hdev, "sock %p", sk);
7626 /* Enabling the experimental LL Privay support disables support for
7629 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7630 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7631 MGMT_STATUS_NOT_SUPPORTED);
7635 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7636 err = mgmt_cmd_status(sk, hdev->id,
7637 MGMT_OP_REMOVE_ADVERTISING,
7638 MGMT_STATUS_INVALID_PARAMS);
7642 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7643 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7644 pending_find(MGMT_OP_SET_LE, hdev)) {
7645 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7650 if (list_empty(&hdev->adv_instances)) {
7651 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7652 MGMT_STATUS_INVALID_PARAMS);
7656 hci_req_init(&req, hdev);
7658 /* If we use extended advertising, instance is disabled and removed */
7659 if (ext_adv_capable(hdev)) {
7660 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7661 __hci_req_remove_ext_adv_instance(&req, cp->instance);
7664 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7666 if (list_empty(&hdev->adv_instances))
7667 __hci_req_disable_advertising(&req);
7669 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7670 * flag is set or the device isn't powered then we have no HCI
7671 * communication to make. Simply return.
7673 if (skb_queue_empty(&req.cmd_q) ||
7674 !hdev_is_powered(hdev) ||
7675 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7676 hci_req_purge(&req);
7677 rp.instance = cp->instance;
7678 err = mgmt_cmd_complete(sk, hdev->id,
7679 MGMT_OP_REMOVE_ADVERTISING,
7680 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7684 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7691 err = hci_req_run(&req, remove_advertising_complete);
7693 mgmt_pending_remove(cmd);
7696 hci_dev_unlock(hdev);
7701 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7702 void *data, u16 data_len)
7704 struct mgmt_cp_get_adv_size_info *cp = data;
7705 struct mgmt_rp_get_adv_size_info rp;
7706 u32 flags, supported_flags;
7709 bt_dev_dbg(hdev, "sock %p", sk);
7711 if (!lmp_le_capable(hdev))
7712 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7713 MGMT_STATUS_REJECTED);
7715 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7717 MGMT_STATUS_INVALID_PARAMS);
7719 flags = __le32_to_cpu(cp->flags);
7721 /* The current implementation only supports a subset of the specified
7724 supported_flags = get_supported_adv_flags(hdev);
7725 if (flags & ~supported_flags)
7726 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7727 MGMT_STATUS_INVALID_PARAMS);
7729 rp.instance = cp->instance;
7730 rp.flags = cp->flags;
7731 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7732 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7734 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7735 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7740 static const struct hci_mgmt_handler mgmt_handlers[] = {
7741 { NULL }, /* 0x0000 (no command) */
7742 { read_version, MGMT_READ_VERSION_SIZE,
7744 HCI_MGMT_UNTRUSTED },
7745 { read_commands, MGMT_READ_COMMANDS_SIZE,
7747 HCI_MGMT_UNTRUSTED },
7748 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7750 HCI_MGMT_UNTRUSTED },
7751 { read_controller_info, MGMT_READ_INFO_SIZE,
7752 HCI_MGMT_UNTRUSTED },
7753 { set_powered, MGMT_SETTING_SIZE },
7754 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7755 { set_connectable, MGMT_SETTING_SIZE },
7756 { set_fast_connectable, MGMT_SETTING_SIZE },
7757 { set_bondable, MGMT_SETTING_SIZE },
7758 { set_link_security, MGMT_SETTING_SIZE },
7759 { set_ssp, MGMT_SETTING_SIZE },
7760 { set_hs, MGMT_SETTING_SIZE },
7761 { set_le, MGMT_SETTING_SIZE },
7762 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7763 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7764 { add_uuid, MGMT_ADD_UUID_SIZE },
7765 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7766 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7768 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7770 { disconnect, MGMT_DISCONNECT_SIZE },
7771 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7772 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7773 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7774 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7775 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7776 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7777 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7778 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7779 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7780 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7781 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7782 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7783 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7785 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7786 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7787 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7788 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7789 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7790 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7791 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7792 { set_advertising, MGMT_SETTING_SIZE },
7793 { set_bredr, MGMT_SETTING_SIZE },
7794 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7795 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7796 { set_secure_conn, MGMT_SETTING_SIZE },
7797 { set_debug_keys, MGMT_SETTING_SIZE },
7798 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7799 { load_irks, MGMT_LOAD_IRKS_SIZE,
7801 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7802 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7803 { add_device, MGMT_ADD_DEVICE_SIZE },
7804 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7805 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7807 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7809 HCI_MGMT_UNTRUSTED },
7810 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7811 HCI_MGMT_UNCONFIGURED |
7812 HCI_MGMT_UNTRUSTED },
7813 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7814 HCI_MGMT_UNCONFIGURED },
7815 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7816 HCI_MGMT_UNCONFIGURED },
7817 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7819 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7820 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7822 HCI_MGMT_UNTRUSTED },
7823 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7824 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7826 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7827 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
7828 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7829 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7830 HCI_MGMT_UNTRUSTED },
7831 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
7832 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
7833 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
7834 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7836 { set_wideband_speech, MGMT_SETTING_SIZE },
7837 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
7838 HCI_MGMT_UNTRUSTED },
7839 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
7840 HCI_MGMT_UNTRUSTED |
7841 HCI_MGMT_HDEV_OPTIONAL },
7842 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
7844 HCI_MGMT_HDEV_OPTIONAL },
7845 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7846 HCI_MGMT_UNTRUSTED },
7847 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7849 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7850 HCI_MGMT_UNTRUSTED },
7851 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7853 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
7854 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
7855 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7856 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7858 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
7861 void mgmt_index_added(struct hci_dev *hdev)
7863 struct mgmt_ev_ext_index ev;
7865 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7868 switch (hdev->dev_type) {
7870 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7871 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7872 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7875 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7876 HCI_MGMT_INDEX_EVENTS);
7889 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7890 HCI_MGMT_EXT_INDEX_EVENTS);
7893 void mgmt_index_removed(struct hci_dev *hdev)
7895 struct mgmt_ev_ext_index ev;
7896 u8 status = MGMT_STATUS_INVALID_INDEX;
7898 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7901 switch (hdev->dev_type) {
7903 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7905 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7906 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7907 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7910 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7911 HCI_MGMT_INDEX_EVENTS);
7924 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7925 HCI_MGMT_EXT_INDEX_EVENTS);
7928 /* This function requires the caller holds hdev->lock */
7929 static void restart_le_actions(struct hci_dev *hdev)
7931 struct hci_conn_params *p;
7933 list_for_each_entry(p, &hdev->le_conn_params, list) {
7934 /* Needed for AUTO_OFF case where might not "really"
7935 * have been powered off.
7937 list_del_init(&p->action);
7939 switch (p->auto_connect) {
7940 case HCI_AUTO_CONN_DIRECT:
7941 case HCI_AUTO_CONN_ALWAYS:
7942 list_add(&p->action, &hdev->pend_le_conns);
7944 case HCI_AUTO_CONN_REPORT:
7945 list_add(&p->action, &hdev->pend_le_reports);
7953 void mgmt_power_on(struct hci_dev *hdev, int err)
7955 struct cmd_lookup match = { NULL, hdev };
7957 bt_dev_dbg(hdev, "err %d", err);
7962 restart_le_actions(hdev);
7963 hci_update_background_scan(hdev);
7966 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7968 new_settings(hdev, match.sk);
7973 hci_dev_unlock(hdev);
7976 void __mgmt_power_off(struct hci_dev *hdev)
7978 struct cmd_lookup match = { NULL, hdev };
7979 u8 status, zero_cod[] = { 0, 0, 0 };
7981 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7983 /* If the power off is because of hdev unregistration let
7984 * use the appropriate INVALID_INDEX status. Otherwise use
7985 * NOT_POWERED. We cover both scenarios here since later in
7986 * mgmt_index_removed() any hci_conn callbacks will have already
7987 * been triggered, potentially causing misleading DISCONNECTED
7990 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7991 status = MGMT_STATUS_INVALID_INDEX;
7993 status = MGMT_STATUS_NOT_POWERED;
7995 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7997 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7998 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7999 zero_cod, sizeof(zero_cod),
8000 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8001 ext_info_changed(hdev, NULL);
8004 new_settings(hdev, match.sk);
8010 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8012 struct mgmt_pending_cmd *cmd;
8015 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8019 if (err == -ERFKILL)
8020 status = MGMT_STATUS_RFKILLED;
8022 status = MGMT_STATUS_FAILED;
8024 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8026 mgmt_pending_remove(cmd);
8029 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8032 struct mgmt_ev_new_link_key ev;
8034 memset(&ev, 0, sizeof(ev));
8036 ev.store_hint = persistent;
8037 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8038 ev.key.addr.type = BDADDR_BREDR;
8039 ev.key.type = key->type;
8040 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8041 ev.key.pin_len = key->pin_len;
8043 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8046 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8048 switch (ltk->type) {
8051 if (ltk->authenticated)
8052 return MGMT_LTK_AUTHENTICATED;
8053 return MGMT_LTK_UNAUTHENTICATED;
8055 if (ltk->authenticated)
8056 return MGMT_LTK_P256_AUTH;
8057 return MGMT_LTK_P256_UNAUTH;
8058 case SMP_LTK_P256_DEBUG:
8059 return MGMT_LTK_P256_DEBUG;
8062 return MGMT_LTK_UNAUTHENTICATED;
8065 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8067 struct mgmt_ev_new_long_term_key ev;
8069 memset(&ev, 0, sizeof(ev));
8071 /* Devices using resolvable or non-resolvable random addresses
8072 * without providing an identity resolving key don't require
8073 * to store long term keys. Their addresses will change the
8076 * Only when a remote device provides an identity address
8077 * make sure the long term key is stored. If the remote
8078 * identity is known, the long term keys are internally
8079 * mapped to the identity address. So allow static random
8080 * and public addresses here.
8082 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8083 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8084 ev.store_hint = 0x00;
8086 ev.store_hint = persistent;
8088 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8089 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8090 ev.key.type = mgmt_ltk_type(key);
8091 ev.key.enc_size = key->enc_size;
8092 ev.key.ediv = key->ediv;
8093 ev.key.rand = key->rand;
8095 if (key->type == SMP_LTK)
8098 /* Make sure we copy only the significant bytes based on the
8099 * encryption key size, and set the rest of the value to zeroes.
8101 memcpy(ev.key.val, key->val, key->enc_size);
8102 memset(ev.key.val + key->enc_size, 0,
8103 sizeof(ev.key.val) - key->enc_size);
8105 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8108 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8110 struct mgmt_ev_new_irk ev;
8112 memset(&ev, 0, sizeof(ev));
8114 ev.store_hint = persistent;
8116 bacpy(&ev.rpa, &irk->rpa);
8117 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8118 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8119 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8121 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8124 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8127 struct mgmt_ev_new_csrk ev;
8129 memset(&ev, 0, sizeof(ev));
8131 /* Devices using resolvable or non-resolvable random addresses
8132 * without providing an identity resolving key don't require
8133 * to store signature resolving keys. Their addresses will change
8134 * the next time around.
8136 * Only when a remote device provides an identity address
8137 * make sure the signature resolving key is stored. So allow
8138 * static random and public addresses here.
8140 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8141 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8142 ev.store_hint = 0x00;
8144 ev.store_hint = persistent;
8146 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8147 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8148 ev.key.type = csrk->type;
8149 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8151 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8154 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8155 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8156 u16 max_interval, u16 latency, u16 timeout)
8158 struct mgmt_ev_new_conn_param ev;
8160 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8163 memset(&ev, 0, sizeof(ev));
8164 bacpy(&ev.addr.bdaddr, bdaddr);
8165 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8166 ev.store_hint = store_hint;
8167 ev.min_interval = cpu_to_le16(min_interval);
8168 ev.max_interval = cpu_to_le16(max_interval);
8169 ev.latency = cpu_to_le16(latency);
8170 ev.timeout = cpu_to_le16(timeout);
8172 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8175 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8176 u32 flags, u8 *name, u8 name_len)
8179 struct mgmt_ev_device_connected *ev = (void *) buf;
8182 bacpy(&ev->addr.bdaddr, &conn->dst);
8183 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8185 ev->flags = __cpu_to_le32(flags);
8187 /* We must ensure that the EIR Data fields are ordered and
8188 * unique. Keep it simple for now and avoid the problem by not
8189 * adding any BR/EDR data to the LE adv.
8191 if (conn->le_adv_data_len > 0) {
8192 memcpy(&ev->eir[eir_len],
8193 conn->le_adv_data, conn->le_adv_data_len);
8194 eir_len = conn->le_adv_data_len;
8197 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8200 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8201 eir_len = eir_append_data(ev->eir, eir_len,
8203 conn->dev_class, 3);
8206 ev->eir_len = cpu_to_le16(eir_len);
8208 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8209 sizeof(*ev) + eir_len, NULL);
8212 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8214 struct sock **sk = data;
8216 cmd->cmd_complete(cmd, 0);
8221 mgmt_pending_remove(cmd);
8224 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8226 struct hci_dev *hdev = data;
8227 struct mgmt_cp_unpair_device *cp = cmd->param;
8229 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8231 cmd->cmd_complete(cmd, 0);
8232 mgmt_pending_remove(cmd);
8235 bool mgmt_powering_down(struct hci_dev *hdev)
8237 struct mgmt_pending_cmd *cmd;
8238 struct mgmt_mode *cp;
8240 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8251 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8252 u8 link_type, u8 addr_type, u8 reason,
8253 bool mgmt_connected)
8255 struct mgmt_ev_device_disconnected ev;
8256 struct sock *sk = NULL;
8258 /* The connection is still in hci_conn_hash so test for 1
8259 * instead of 0 to know if this is the last one.
8261 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8262 cancel_delayed_work(&hdev->power_off);
8263 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8266 if (!mgmt_connected)
8269 if (link_type != ACL_LINK && link_type != LE_LINK)
8272 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8274 bacpy(&ev.addr.bdaddr, bdaddr);
8275 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8278 /* Report disconnects due to suspend */
8279 if (hdev->suspended)
8280 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8282 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8287 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8291 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8292 u8 link_type, u8 addr_type, u8 status)
8294 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8295 struct mgmt_cp_disconnect *cp;
8296 struct mgmt_pending_cmd *cmd;
8298 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8301 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8307 if (bacmp(bdaddr, &cp->addr.bdaddr))
8310 if (cp->addr.type != bdaddr_type)
8313 cmd->cmd_complete(cmd, mgmt_status(status));
8314 mgmt_pending_remove(cmd);
8317 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8318 u8 addr_type, u8 status)
8320 struct mgmt_ev_connect_failed ev;
8322 /* The connection is still in hci_conn_hash so test for 1
8323 * instead of 0 to know if this is the last one.
8325 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8326 cancel_delayed_work(&hdev->power_off);
8327 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8330 bacpy(&ev.addr.bdaddr, bdaddr);
8331 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8332 ev.status = mgmt_status(status);
8334 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8337 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8339 struct mgmt_ev_pin_code_request ev;
8341 bacpy(&ev.addr.bdaddr, bdaddr);
8342 ev.addr.type = BDADDR_BREDR;
8345 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8348 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8351 struct mgmt_pending_cmd *cmd;
8353 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8357 cmd->cmd_complete(cmd, mgmt_status(status));
8358 mgmt_pending_remove(cmd);
8361 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8364 struct mgmt_pending_cmd *cmd;
8366 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8370 cmd->cmd_complete(cmd, mgmt_status(status));
8371 mgmt_pending_remove(cmd);
8374 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8375 u8 link_type, u8 addr_type, u32 value,
8378 struct mgmt_ev_user_confirm_request ev;
8380 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8382 bacpy(&ev.addr.bdaddr, bdaddr);
8383 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8384 ev.confirm_hint = confirm_hint;
8385 ev.value = cpu_to_le32(value);
8387 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8391 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8392 u8 link_type, u8 addr_type)
8394 struct mgmt_ev_user_passkey_request ev;
8396 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8398 bacpy(&ev.addr.bdaddr, bdaddr);
8399 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8401 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8405 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8406 u8 link_type, u8 addr_type, u8 status,
8409 struct mgmt_pending_cmd *cmd;
8411 cmd = pending_find(opcode, hdev);
8415 cmd->cmd_complete(cmd, mgmt_status(status));
8416 mgmt_pending_remove(cmd);
8421 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8422 u8 link_type, u8 addr_type, u8 status)
8424 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8425 status, MGMT_OP_USER_CONFIRM_REPLY);
8428 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8429 u8 link_type, u8 addr_type, u8 status)
8431 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8433 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8436 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8437 u8 link_type, u8 addr_type, u8 status)
8439 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8440 status, MGMT_OP_USER_PASSKEY_REPLY);
8443 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8444 u8 link_type, u8 addr_type, u8 status)
8446 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8448 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8451 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8452 u8 link_type, u8 addr_type, u32 passkey,
8455 struct mgmt_ev_passkey_notify ev;
8457 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8459 bacpy(&ev.addr.bdaddr, bdaddr);
8460 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8461 ev.passkey = __cpu_to_le32(passkey);
8462 ev.entered = entered;
8464 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8467 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8469 struct mgmt_ev_auth_failed ev;
8470 struct mgmt_pending_cmd *cmd;
8471 u8 status = mgmt_status(hci_status);
8473 bacpy(&ev.addr.bdaddr, &conn->dst);
8474 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8477 cmd = find_pairing(conn);
8479 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8480 cmd ? cmd->sk : NULL);
8483 cmd->cmd_complete(cmd, status);
8484 mgmt_pending_remove(cmd);
8488 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8490 struct cmd_lookup match = { NULL, hdev };
8494 u8 mgmt_err = mgmt_status(status);
8495 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8496 cmd_status_rsp, &mgmt_err);
8500 if (test_bit(HCI_AUTH, &hdev->flags))
8501 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8503 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8505 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8509 new_settings(hdev, match.sk);
8515 static void clear_eir(struct hci_request *req)
8517 struct hci_dev *hdev = req->hdev;
8518 struct hci_cp_write_eir cp;
8520 if (!lmp_ext_inq_capable(hdev))
8523 memset(hdev->eir, 0, sizeof(hdev->eir));
8525 memset(&cp, 0, sizeof(cp));
8527 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8530 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8532 struct cmd_lookup match = { NULL, hdev };
8533 struct hci_request req;
8534 bool changed = false;
8537 u8 mgmt_err = mgmt_status(status);
8539 if (enable && hci_dev_test_and_clear_flag(hdev,
8541 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8542 new_settings(hdev, NULL);
8545 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8551 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8553 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8555 changed = hci_dev_test_and_clear_flag(hdev,
8558 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8561 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8564 new_settings(hdev, match.sk);
8569 hci_req_init(&req, hdev);
8571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8572 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8573 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8574 sizeof(enable), &enable);
8575 __hci_req_update_eir(&req);
8580 hci_req_run(&req, NULL);
8583 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8585 struct cmd_lookup *match = data;
8587 if (match->sk == NULL) {
8588 match->sk = cmd->sk;
8589 sock_hold(match->sk);
8593 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8596 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8598 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8599 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8600 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8603 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8604 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8605 ext_info_changed(hdev, NULL);
8612 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8614 struct mgmt_cp_set_local_name ev;
8615 struct mgmt_pending_cmd *cmd;
8620 memset(&ev, 0, sizeof(ev));
8621 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8622 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8624 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8626 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8628 /* If this is a HCI command related to powering on the
8629 * HCI dev don't send any mgmt signals.
8631 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8635 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8636 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8637 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8640 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8644 for (i = 0; i < uuid_count; i++) {
8645 if (!memcmp(uuid, uuids[i], 16))
8652 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8656 while (parsed < eir_len) {
8657 u8 field_len = eir[0];
8664 if (eir_len - parsed < field_len + 1)
8668 case EIR_UUID16_ALL:
8669 case EIR_UUID16_SOME:
8670 for (i = 0; i + 3 <= field_len; i += 2) {
8671 memcpy(uuid, bluetooth_base_uuid, 16);
8672 uuid[13] = eir[i + 3];
8673 uuid[12] = eir[i + 2];
8674 if (has_uuid(uuid, uuid_count, uuids))
8678 case EIR_UUID32_ALL:
8679 case EIR_UUID32_SOME:
8680 for (i = 0; i + 5 <= field_len; i += 4) {
8681 memcpy(uuid, bluetooth_base_uuid, 16);
8682 uuid[15] = eir[i + 5];
8683 uuid[14] = eir[i + 4];
8684 uuid[13] = eir[i + 3];
8685 uuid[12] = eir[i + 2];
8686 if (has_uuid(uuid, uuid_count, uuids))
8690 case EIR_UUID128_ALL:
8691 case EIR_UUID128_SOME:
8692 for (i = 0; i + 17 <= field_len; i += 16) {
8693 memcpy(uuid, eir + i + 2, 16);
8694 if (has_uuid(uuid, uuid_count, uuids))
8700 parsed += field_len + 1;
8701 eir += field_len + 1;
8707 static void restart_le_scan(struct hci_dev *hdev)
8709 /* If controller is not scanning we are done. */
8710 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8713 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8714 hdev->discovery.scan_start +
8715 hdev->discovery.scan_duration))
8718 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8719 DISCOV_LE_RESTART_DELAY);
8722 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8723 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8725 /* If a RSSI threshold has been specified, and
8726 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8727 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8728 * is set, let it through for further processing, as we might need to
8731 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8732 * the results are also dropped.
8734 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8735 (rssi == HCI_RSSI_INVALID ||
8736 (rssi < hdev->discovery.rssi &&
8737 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8740 if (hdev->discovery.uuid_count != 0) {
8741 /* If a list of UUIDs is provided in filter, results with no
8742 * matching UUID should be dropped.
8744 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8745 hdev->discovery.uuids) &&
8746 !eir_has_uuids(scan_rsp, scan_rsp_len,
8747 hdev->discovery.uuid_count,
8748 hdev->discovery.uuids))
8752 /* If duplicate filtering does not report RSSI changes, then restart
8753 * scanning to ensure updated result with updated RSSI values.
8755 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8756 restart_le_scan(hdev);
8758 /* Validate RSSI value against the RSSI threshold once more. */
8759 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8760 rssi < hdev->discovery.rssi)
8767 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8768 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8769 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8772 struct mgmt_ev_device_found *ev = (void *)buf;
8775 /* Don't send events for a non-kernel initiated discovery. With
8776 * LE one exception is if we have pend_le_reports > 0 in which
8777 * case we're doing passive scanning and want these events.
8779 if (!hci_discovery_active(hdev)) {
8780 if (link_type == ACL_LINK)
8782 if (link_type == LE_LINK &&
8783 list_empty(&hdev->pend_le_reports) &&
8784 !hci_is_adv_monitoring(hdev)) {
8789 if (hdev->discovery.result_filtering) {
8790 /* We are using service discovery */
8791 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8796 if (hdev->discovery.limited) {
8797 /* Check for limited discoverable bit */
8799 if (!(dev_class[1] & 0x20))
8802 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8803 if (!flags || !(flags[0] & LE_AD_LIMITED))
8808 /* Make sure that the buffer is big enough. The 5 extra bytes
8809 * are for the potential CoD field.
8811 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8814 memset(buf, 0, sizeof(buf));
8816 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8817 * RSSI value was reported as 0 when not available. This behavior
8818 * is kept when using device discovery. This is required for full
8819 * backwards compatibility with the API.
8821 * However when using service discovery, the value 127 will be
8822 * returned when the RSSI is not available.
8824 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8825 link_type == ACL_LINK)
8828 bacpy(&ev->addr.bdaddr, bdaddr);
8829 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8831 ev->flags = cpu_to_le32(flags);
8834 /* Copy EIR or advertising data into event */
8835 memcpy(ev->eir, eir, eir_len);
8837 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8839 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8842 if (scan_rsp_len > 0)
8843 /* Append scan response data to event */
8844 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8846 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8847 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8849 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8852 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8853 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8855 struct mgmt_ev_device_found *ev;
8856 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8859 ev = (struct mgmt_ev_device_found *) buf;
8861 memset(buf, 0, sizeof(buf));
8863 bacpy(&ev->addr.bdaddr, bdaddr);
8864 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8867 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8870 ev->eir_len = cpu_to_le16(eir_len);
8872 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8875 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8877 struct mgmt_ev_discovering ev;
8879 bt_dev_dbg(hdev, "discovering %u", discovering);
8881 memset(&ev, 0, sizeof(ev));
8882 ev.type = hdev->discovery.type;
8883 ev.discovering = discovering;
8885 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8888 void mgmt_suspending(struct hci_dev *hdev, u8 state)
8890 struct mgmt_ev_controller_suspend ev;
8892 ev.suspend_state = state;
8893 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
8896 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
8899 struct mgmt_ev_controller_resume ev;
8901 ev.wake_reason = reason;
8903 bacpy(&ev.addr.bdaddr, bdaddr);
8904 ev.addr.type = addr_type;
8906 memset(&ev.addr, 0, sizeof(ev.addr));
8909 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
8912 static struct hci_mgmt_chan chan = {
8913 .channel = HCI_CHANNEL_CONTROL,
8914 .handler_count = ARRAY_SIZE(mgmt_handlers),
8915 .handlers = mgmt_handlers,
8916 .hdev_init = mgmt_init_hdev,
8921 return hci_mgmt_chan_register(&chan);
8924 void mgmt_exit(void)
8926 hci_mgmt_chan_unregister(&chan);