2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
860 settings |= MGMT_SETTING_CONFIGURATION;
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
873 static u32 get_current_settings(struct hci_dev *hdev)
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
947 if (bis_capable(hdev))
948 settings |= MGMT_SETTING_ISO_BROADCASTER;
950 if (sync_recv_capable(hdev))
951 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
956 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
958 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
961 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
963 struct mgmt_pending_cmd *cmd;
965 /* If there's a pending mgmt command the flags will not yet have
966 * their final values, so check for this first.
968 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
970 struct mgmt_mode *cp = cmd->param;
972 return LE_AD_GENERAL;
973 else if (cp->val == 0x02)
974 return LE_AD_LIMITED;
976 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
977 return LE_AD_LIMITED;
978 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
979 return LE_AD_GENERAL;
985 bool mgmt_get_connectable(struct hci_dev *hdev)
987 struct mgmt_pending_cmd *cmd;
989 /* If there's a pending mgmt command the flag will not yet have
990 * it's final value, so check for this first.
992 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
994 struct mgmt_mode *cp = cmd->param;
999 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1002 static int service_cache_sync(struct hci_dev *hdev, void *data)
1004 hci_update_eir_sync(hdev);
1005 hci_update_class_sync(hdev);
1010 static void service_cache_off(struct work_struct *work)
1012 struct hci_dev *hdev = container_of(work, struct hci_dev,
1013 service_cache.work);
1015 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1018 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1021 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1023 /* The generation of a new RPA and programming it into the
1024 * controller happens in the hci_req_enable_advertising()
1027 if (ext_adv_capable(hdev))
1028 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1030 return hci_enable_advertising_sync(hdev);
1033 static void rpa_expired(struct work_struct *work)
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1038 bt_dev_dbg(hdev, "");
1040 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1042 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1045 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1048 static void discov_off(struct work_struct *work)
1050 struct hci_dev *hdev = container_of(work, struct hci_dev,
1053 bt_dev_dbg(hdev, "");
1057 /* When discoverable timeout triggers, then just make sure
1058 * the limited discoverable flag is cleared. Even in the case
1059 * of a timeout triggered from general discoverable, it is
1060 * safe to unconditionally clear the flag.
1062 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1063 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1064 hdev->discov_timeout = 0;
1066 hci_update_discoverable(hdev);
1068 mgmt_new_settings(hdev);
1070 hci_dev_unlock(hdev);
1073 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1075 static void mesh_send_complete(struct hci_dev *hdev,
1076 struct mgmt_mesh_tx *mesh_tx, bool silent)
1078 u8 handle = mesh_tx->handle;
1081 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1082 sizeof(handle), NULL);
1084 mgmt_mesh_remove(mesh_tx);
1087 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1089 struct mgmt_mesh_tx *mesh_tx;
1091 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1092 hci_disable_advertising_sync(hdev);
1093 mesh_tx = mgmt_mesh_next(hdev, NULL);
1096 mesh_send_complete(hdev, mesh_tx, false);
1101 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1102 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1103 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1105 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1110 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1111 mesh_send_start_complete);
1114 mesh_send_complete(hdev, mesh_tx, false);
1116 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1119 static void mesh_send_done(struct work_struct *work)
1121 struct hci_dev *hdev = container_of(work, struct hci_dev,
1122 mesh_send_done.work);
1124 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1127 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1130 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1132 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1137 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1138 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1139 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1140 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1142 /* Non-mgmt controlled devices get this bit set
1143 * implicitly so that pairing works for them, however
1144 * for mgmt we require user-space to explicitly enable
1147 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1149 hci_dev_set_flag(hdev, HCI_MGMT);
1152 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1153 void *data, u16 data_len)
1155 struct mgmt_rp_read_info rp;
1157 bt_dev_dbg(hdev, "sock %p", sk);
1161 memset(&rp, 0, sizeof(rp));
1163 bacpy(&rp.bdaddr, &hdev->bdaddr);
1165 rp.version = hdev->hci_ver;
1166 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1168 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1169 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1171 memcpy(rp.dev_class, hdev->dev_class, 3);
1173 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1174 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1176 hci_dev_unlock(hdev);
1178 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1182 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1187 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1188 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1189 hdev->dev_class, 3);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1192 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1195 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1196 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1197 hdev->dev_name, name_len);
1199 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1201 hdev->short_name, name_len);
1206 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1207 void *data, u16 data_len)
1210 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1213 bt_dev_dbg(hdev, "sock %p", sk);
1215 memset(&buf, 0, sizeof(buf));
1219 bacpy(&rp->bdaddr, &hdev->bdaddr);
1221 rp->version = hdev->hci_ver;
1222 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1224 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1225 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1228 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1229 rp->eir_len = cpu_to_le16(eir_len);
1231 hci_dev_unlock(hdev);
1233 /* If this command is called at least once, then the events
1234 * for class of device and local name changes are disabled
1235 * and only the new extended controller information event
1238 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1239 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1240 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1243 sizeof(*rp) + eir_len);
1246 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1249 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1252 memset(buf, 0, sizeof(buf));
1254 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1255 ev->eir_len = cpu_to_le16(eir_len);
1257 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1258 sizeof(*ev) + eir_len,
1259 HCI_MGMT_EXT_INFO_EVENTS, skip);
1262 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1264 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1266 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1270 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1272 struct mgmt_ev_advertising_added ev;
1274 ev.instance = instance;
1276 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1279 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1282 struct mgmt_ev_advertising_removed ev;
1284 ev.instance = instance;
1286 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1289 static void cancel_adv_timeout(struct hci_dev *hdev)
1291 if (hdev->adv_instance_timeout) {
1292 hdev->adv_instance_timeout = 0;
1293 cancel_delayed_work(&hdev->adv_instance_expire);
1297 /* This function requires the caller holds hdev->lock */
1298 static void restart_le_actions(struct hci_dev *hdev)
1300 struct hci_conn_params *p;
1302 list_for_each_entry(p, &hdev->le_conn_params, list) {
1303 /* Needed for AUTO_OFF case where might not "really"
1304 * have been powered off.
1306 hci_pend_le_list_del_init(p);
1308 switch (p->auto_connect) {
1309 case HCI_AUTO_CONN_DIRECT:
1310 case HCI_AUTO_CONN_ALWAYS:
1311 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1313 case HCI_AUTO_CONN_REPORT:
1314 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1322 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1324 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1326 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1327 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1330 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1332 struct mgmt_pending_cmd *cmd = data;
1333 struct mgmt_mode *cp;
1335 /* Make sure cmd still outstanding. */
1336 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1341 bt_dev_dbg(hdev, "err %d", err);
1346 restart_le_actions(hdev);
1347 hci_update_passive_scan(hdev);
1348 hci_dev_unlock(hdev);
1351 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1353 /* Only call new_setting for power on as power off is deferred
1354 * to hdev->power_off work which does call hci_dev_do_close.
1357 new_settings(hdev, cmd->sk);
1359 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1363 mgmt_pending_remove(cmd);
1366 static int set_powered_sync(struct hci_dev *hdev, void *data)
1368 struct mgmt_pending_cmd *cmd = data;
1369 struct mgmt_mode *cp = cmd->param;
1371 BT_DBG("%s", hdev->name);
1373 return hci_set_powered_sync(hdev, cp->val);
1376 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1379 struct mgmt_mode *cp = data;
1380 struct mgmt_pending_cmd *cmd;
1383 bt_dev_dbg(hdev, "sock %p", sk);
1385 if (cp->val != 0x00 && cp->val != 0x01)
1386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 MGMT_STATUS_INVALID_PARAMS);
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1420 mgmt_pending_remove(cmd);
1423 hci_dev_unlock(hdev);
1427 int mgmt_new_settings(struct hci_dev *hdev)
1429 return new_settings(hdev, NULL);
1434 struct hci_dev *hdev;
1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 struct cmd_lookup *match = data;
1442 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1444 list_del(&cmd->list);
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(match->sk);
1451 mgmt_pending_free(cmd);
1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1458 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 mgmt_pending_remove(cmd);
1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1464 if (cmd->cmd_complete) {
1467 cmd->cmd_complete(cmd, *status);
1468 mgmt_pending_remove(cmd);
1473 cmd_status_rsp(cmd, data);
1476 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1478 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1479 cmd->param, cmd->param_len);
1482 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1484 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1485 cmd->param, sizeof(struct mgmt_addr_info));
1488 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1490 if (!lmp_bredr_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1495 return MGMT_STATUS_SUCCESS;
1498 static u8 mgmt_le_support(struct hci_dev *hdev)
1500 if (!lmp_le_capable(hdev))
1501 return MGMT_STATUS_NOT_SUPPORTED;
1502 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1503 return MGMT_STATUS_REJECTED;
1505 return MGMT_STATUS_SUCCESS;
1508 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1511 struct mgmt_pending_cmd *cmd = data;
1513 bt_dev_dbg(hdev, "err %d", err);
1515 /* Make sure cmd still outstanding. */
1516 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1522 u8 mgmt_err = mgmt_status(err);
1523 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1524 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1528 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1529 hdev->discov_timeout > 0) {
1530 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1531 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1538 mgmt_pending_remove(cmd);
1539 hci_dev_unlock(hdev);
1542 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1544 BT_DBG("%s", hdev->name);
1546 return hci_update_discoverable_sync(hdev);
1549 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 struct mgmt_cp_set_discoverable *cp = data;
1553 struct mgmt_pending_cmd *cmd;
1557 bt_dev_dbg(hdev, "sock %p", sk);
1559 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1560 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 MGMT_STATUS_REJECTED);
1564 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1568 timeout = __le16_to_cpu(cp->timeout);
1570 /* Disabling discoverable requires that no timeout is set,
1571 * and enabling limited discoverable requires a timeout.
1573 if ((cp->val == 0x00 && timeout > 0) ||
1574 (cp->val == 0x02 && timeout == 0))
1575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1576 MGMT_STATUS_INVALID_PARAMS);
1580 if (!hdev_is_powered(hdev) && timeout > 0) {
1581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_NOT_POWERED);
1586 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1587 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1588 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1599 if (hdev->advertising_paused) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 if (!hdev_is_powered(hdev)) {
1606 bool changed = false;
1608 /* Setting limited discoverable when powered off is
1609 * not a valid operation since it requires a timeout
1610 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1612 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1613 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1617 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1622 err = new_settings(hdev, sk);
1627 /* If the current mode is the same, then just update the timeout
1628 * value with the new value. And if only the timeout gets updated,
1629 * then no need for any HCI transactions.
1631 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1632 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1633 HCI_LIMITED_DISCOVERABLE)) {
1634 cancel_delayed_work(&hdev->discov_off);
1635 hdev->discov_timeout = timeout;
1637 if (cp->val && hdev->discov_timeout > 0) {
1638 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1639 queue_delayed_work(hdev->req_workqueue,
1640 &hdev->discov_off, to);
1643 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1647 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1653 /* Cancel any potential discoverable timeout that might be
1654 * still active and store new timeout value. The arming of
1655 * the timeout happens in the complete handler.
1657 cancel_delayed_work(&hdev->discov_off);
1658 hdev->discov_timeout = timeout;
1661 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1663 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1665 /* Limited discoverable mode */
1666 if (cp->val == 0x02)
1667 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1669 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1671 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1672 mgmt_set_discoverable_complete);
1675 mgmt_pending_remove(cmd);
1678 hci_dev_unlock(hdev);
1682 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1685 struct mgmt_pending_cmd *cmd = data;
1687 bt_dev_dbg(hdev, "err %d", err);
1689 /* Make sure cmd still outstanding. */
1690 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696 u8 mgmt_err = mgmt_status(err);
1697 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1701 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1702 new_settings(hdev, cmd->sk);
1706 mgmt_pending_remove(cmd);
1708 hci_dev_unlock(hdev);
1711 static int set_connectable_update_settings(struct hci_dev *hdev,
1712 struct sock *sk, u8 val)
1714 bool changed = false;
1717 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1721 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1723 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1724 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1727 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732 hci_update_scan(hdev);
1733 hci_update_passive_scan(hdev);
1734 return new_settings(hdev, sk);
1740 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1742 BT_DBG("%s", hdev->name);
1744 return hci_update_connectable_sync(hdev);
1747 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1750 struct mgmt_mode *cp = data;
1751 struct mgmt_pending_cmd *cmd;
1754 bt_dev_dbg(hdev, "sock %p", sk);
1756 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1757 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1759 MGMT_STATUS_REJECTED);
1761 if (cp->val != 0x00 && cp->val != 0x01)
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_INVALID_PARAMS);
1767 if (!hdev_is_powered(hdev)) {
1768 err = set_connectable_update_settings(hdev, sk, cp->val);
1772 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1773 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1788 if (hdev->discov_timeout > 0)
1789 cancel_delayed_work(&hdev->discov_off);
1791 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1792 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1793 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1796 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1797 mgmt_set_connectable_complete);
1800 mgmt_pending_remove(cmd);
1803 hci_dev_unlock(hdev);
1807 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1810 struct mgmt_mode *cp = data;
1814 bt_dev_dbg(hdev, "sock %p", sk);
1816 if (cp->val != 0x00 && cp->val != 0x01)
1817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1818 MGMT_STATUS_INVALID_PARAMS);
1823 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1825 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1827 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832 /* In limited privacy mode the change of bondable mode
1833 * may affect the local advertising address.
1835 hci_update_discoverable(hdev);
1837 err = new_settings(hdev, sk);
1841 hci_dev_unlock(hdev);
1845 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1848 struct mgmt_mode *cp = data;
1849 struct mgmt_pending_cmd *cmd;
1853 bt_dev_dbg(hdev, "sock %p", sk);
1855 status = mgmt_bredr_support(hdev);
1857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1860 if (cp->val != 0x00 && cp->val != 0x01)
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 MGMT_STATUS_INVALID_PARAMS);
1866 if (!hdev_is_powered(hdev)) {
1867 bool changed = false;
1869 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1870 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1874 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 err = new_settings(hdev, sk);
1884 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1893 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1897 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1903 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1905 mgmt_pending_remove(cmd);
1910 hci_dev_unlock(hdev);
1914 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1916 struct cmd_lookup match = { NULL, hdev };
1917 struct mgmt_pending_cmd *cmd = data;
1918 struct mgmt_mode *cp = cmd->param;
1919 u8 enable = cp->val;
1922 /* Make sure cmd still outstanding. */
1923 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1927 u8 mgmt_err = mgmt_status(err);
1929 if (enable && hci_dev_test_and_clear_flag(hdev,
1931 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1932 new_settings(hdev, NULL);
1935 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1941 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1943 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1946 changed = hci_dev_test_and_clear_flag(hdev,
1949 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1952 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1955 new_settings(hdev, match.sk);
1960 hci_update_eir_sync(hdev);
1963 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1965 struct mgmt_pending_cmd *cmd = data;
1966 struct mgmt_mode *cp = cmd->param;
1967 bool changed = false;
1971 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1973 err = hci_write_ssp_mode_sync(hdev, cp->val);
1975 if (!err && changed)
1976 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1981 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1983 struct mgmt_mode *cp = data;
1984 struct mgmt_pending_cmd *cmd;
1988 bt_dev_dbg(hdev, "sock %p", sk);
1990 status = mgmt_bredr_support(hdev);
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1994 if (!lmp_ssp_capable(hdev))
1995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1996 MGMT_STATUS_NOT_SUPPORTED);
1998 if (cp->val != 0x00 && cp->val != 0x01)
1999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_INVALID_PARAMS);
2004 if (!hdev_is_powered(hdev)) {
2008 changed = !hci_dev_test_and_set_flag(hdev,
2011 changed = hci_dev_test_and_clear_flag(hdev,
2014 changed = hci_dev_test_and_clear_flag(hdev,
2017 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2020 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2025 err = new_settings(hdev, sk);
2030 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2045 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_FAILED);
2053 mgmt_pending_remove(cmd);
2057 hci_dev_unlock(hdev);
2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2063 struct mgmt_mode *cp = data;
2068 bt_dev_dbg(hdev, "sock %p", sk);
2070 if (!IS_ENABLED(CONFIG_BT_HS))
2071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2072 MGMT_STATUS_NOT_SUPPORTED);
2074 status = mgmt_bredr_support(hdev);
2076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_REJECTED);
2086 if (cp->val != 0x00 && cp->val != 0x01)
2087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_INVALID_PARAMS);
2092 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2093 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2099 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2101 if (hdev_is_powered(hdev)) {
2102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2103 MGMT_STATUS_REJECTED);
2107 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2110 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2115 err = new_settings(hdev, sk);
2118 hci_dev_unlock(hdev);
2122 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2124 struct cmd_lookup match = { NULL, hdev };
2125 u8 status = mgmt_status(err);
2127 bt_dev_dbg(hdev, "err %d", err);
2130 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2135 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2137 new_settings(hdev, match.sk);
2143 static int set_le_sync(struct hci_dev *hdev, void *data)
2145 struct mgmt_pending_cmd *cmd = data;
2146 struct mgmt_mode *cp = cmd->param;
2151 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2153 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2154 hci_disable_advertising_sync(hdev);
2156 if (ext_adv_capable(hdev))
2157 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2159 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2162 err = hci_write_le_host_supported_sync(hdev, val, 0);
2164 /* Make sure the controller has a good default for
2165 * advertising data. Restrict the update to when LE
2166 * has actually been enabled. During power on, the
2167 * update in powered_update_hci will take care of it.
2169 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2170 if (ext_adv_capable(hdev)) {
2173 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2175 hci_update_scan_rsp_data_sync(hdev, 0x00);
2177 hci_update_adv_data_sync(hdev, 0x00);
2178 hci_update_scan_rsp_data_sync(hdev, 0x00);
2181 hci_update_passive_scan(hdev);
2187 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2189 struct mgmt_pending_cmd *cmd = data;
2190 u8 status = mgmt_status(err);
2191 struct sock *sk = cmd->sk;
2194 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2195 cmd_status_rsp, &status);
2199 mgmt_pending_remove(cmd);
2200 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2203 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2205 struct mgmt_pending_cmd *cmd = data;
2206 struct mgmt_cp_set_mesh *cp = cmd->param;
2207 size_t len = cmd->param_len;
2209 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2212 hci_dev_set_flag(hdev, HCI_MESH);
2214 hci_dev_clear_flag(hdev, HCI_MESH);
2218 /* If filters don't fit, forward all adv pkts */
2219 if (len <= sizeof(hdev->mesh_ad_types))
2220 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2222 hci_update_passive_scan_sync(hdev);
2226 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2228 struct mgmt_cp_set_mesh *cp = data;
2229 struct mgmt_pending_cmd *cmd;
2232 bt_dev_dbg(hdev, "sock %p", sk);
2234 if (!lmp_le_capable(hdev) ||
2235 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2236 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2237 MGMT_STATUS_NOT_SUPPORTED);
2239 if (cp->enable != 0x00 && cp->enable != 0x01)
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_INVALID_PARAMS);
2245 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2249 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2254 MGMT_STATUS_FAILED);
2257 mgmt_pending_remove(cmd);
2260 hci_dev_unlock(hdev);
2264 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2266 struct mgmt_mesh_tx *mesh_tx = data;
2267 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2268 unsigned long mesh_send_interval;
2269 u8 mgmt_err = mgmt_status(err);
2271 /* Report any errors here, but don't report completion */
2274 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2275 /* Send Complete Error Code for handle */
2276 mesh_send_complete(hdev, mesh_tx, false);
2280 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2281 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2282 mesh_send_interval);
2285 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2287 struct mgmt_mesh_tx *mesh_tx = data;
2288 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2289 struct adv_info *adv, *next_instance;
2290 u8 instance = hdev->le_num_of_adv_sets + 1;
2291 u16 timeout, duration;
2294 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2295 return MGMT_STATUS_BUSY;
2298 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2299 adv = hci_add_adv_instance(hdev, instance, 0,
2300 send->adv_data_len, send->adv_data,
2303 HCI_ADV_TX_POWER_NO_PREFERENCE,
2304 hdev->le_adv_min_interval,
2305 hdev->le_adv_max_interval,
2309 mesh_tx->instance = instance;
2313 if (hdev->cur_adv_instance == instance) {
2314 /* If the currently advertised instance is being changed then
2315 * cancel the current advertising and schedule the next
2316 * instance. If there is only one instance then the overridden
2317 * advertising data will be visible right away.
2319 cancel_adv_timeout(hdev);
2321 next_instance = hci_get_next_instance(hdev, instance);
2323 instance = next_instance->instance;
2326 } else if (hdev->adv_instance_timeout) {
2327 /* Immediately advertise the new instance if no other, or
2328 * let it go naturally from queue if ADV is already happening
2334 return hci_schedule_adv_instance_sync(hdev, instance, true);
2339 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2341 struct mgmt_rp_mesh_read_features *rp = data;
2343 if (rp->used_handles >= rp->max_handles)
2346 rp->handles[rp->used_handles++] = mesh_tx->handle;
2349 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2352 struct mgmt_rp_mesh_read_features rp;
2354 if (!lmp_le_capable(hdev) ||
2355 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 memset(&rp, 0, sizeof(rp));
2360 rp.index = cpu_to_le16(hdev->id);
2361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 rp.max_handles = MESH_HANDLES_MAX;
2367 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2369 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2370 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2372 hci_dev_unlock(hdev);
2376 static int send_cancel(struct hci_dev *hdev, void *data)
2378 struct mgmt_pending_cmd *cmd = data;
2379 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2380 struct mgmt_mesh_tx *mesh_tx;
2382 if (!cancel->handle) {
2384 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2387 mesh_send_complete(hdev, mesh_tx, false);
2390 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2392 if (mesh_tx && mesh_tx->sk == cmd->sk)
2393 mesh_send_complete(hdev, mesh_tx, false);
2396 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2398 mgmt_pending_free(cmd);
2403 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2404 void *data, u16 len)
2406 struct mgmt_pending_cmd *cmd;
2409 if (!lmp_le_capable(hdev) ||
2410 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2412 MGMT_STATUS_NOT_SUPPORTED);
2414 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_REJECTED);
2419 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2423 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2427 MGMT_STATUS_FAILED);
2430 mgmt_pending_free(cmd);
2433 hci_dev_unlock(hdev);
2437 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2439 struct mgmt_mesh_tx *mesh_tx;
2440 struct mgmt_cp_mesh_send *send = data;
2441 struct mgmt_rp_mesh_read_features rp;
2445 if (!lmp_le_capable(hdev) ||
2446 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2448 MGMT_STATUS_NOT_SUPPORTED);
2449 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2450 len <= MGMT_MESH_SEND_SIZE ||
2451 len > (MGMT_MESH_SEND_SIZE + 31))
2452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2453 MGMT_STATUS_REJECTED);
2457 memset(&rp, 0, sizeof(rp));
2458 rp.max_handles = MESH_HANDLES_MAX;
2460 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2462 if (rp.max_handles <= rp.used_handles) {
2463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2468 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2469 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2474 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2475 mesh_send_start_complete);
2478 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2479 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2480 MGMT_STATUS_FAILED);
2484 mgmt_mesh_remove(mesh_tx);
2487 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2489 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2490 &mesh_tx->handle, 1);
2494 hci_dev_unlock(hdev);
2498 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2500 struct mgmt_mode *cp = data;
2501 struct mgmt_pending_cmd *cmd;
2505 bt_dev_dbg(hdev, "sock %p", sk);
2507 if (!lmp_le_capable(hdev))
2508 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2509 MGMT_STATUS_NOT_SUPPORTED);
2511 if (cp->val != 0x00 && cp->val != 0x01)
2512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_INVALID_PARAMS);
2515 /* Bluetooth single mode LE only controllers or dual-mode
2516 * controllers configured as LE only devices, do not allow
2517 * switching LE off. These have either LE enabled explicitly
2518 * or BR/EDR has been previously switched off.
2520 * When trying to enable an already enabled LE, then gracefully
2521 * send a positive response. Trying to disable it however will
2522 * result into rejection.
2524 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2525 if (cp->val == 0x01)
2526 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2529 MGMT_STATUS_REJECTED);
2535 enabled = lmp_host_le_capable(hdev);
2537 if (!hdev_is_powered(hdev) || val == enabled) {
2538 bool changed = false;
2540 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2541 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2545 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2550 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2555 err = new_settings(hdev, sk);
2560 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2561 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2567 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2571 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2576 MGMT_STATUS_FAILED);
2579 mgmt_pending_remove(cmd);
2583 hci_dev_unlock(hdev);
2587 /* This is a helper function to test for pending mgmt commands that can
2588 * cause CoD or EIR HCI commands. We can only allow one such pending
2589 * mgmt command at a time since otherwise we cannot easily track what
2590 * the current values are, will be, and based on that calculate if a new
2591 * HCI command needs to be sent and if yes with what value.
2593 static bool pending_eir_or_class(struct hci_dev *hdev)
2595 struct mgmt_pending_cmd *cmd;
2597 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2598 switch (cmd->opcode) {
2599 case MGMT_OP_ADD_UUID:
2600 case MGMT_OP_REMOVE_UUID:
2601 case MGMT_OP_SET_DEV_CLASS:
2602 case MGMT_OP_SET_POWERED:
2610 static const u8 bluetooth_base_uuid[] = {
2611 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2612 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2615 static u8 get_uuid_size(const u8 *uuid)
2619 if (memcmp(uuid, bluetooth_base_uuid, 12))
2622 val = get_unaligned_le32(&uuid[12]);
2629 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2631 struct mgmt_pending_cmd *cmd = data;
2633 bt_dev_dbg(hdev, "err %d", err);
2635 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2636 mgmt_status(err), hdev->dev_class, 3);
2638 mgmt_pending_free(cmd);
2641 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2645 err = hci_update_class_sync(hdev);
2649 return hci_update_eir_sync(hdev);
2652 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2654 struct mgmt_cp_add_uuid *cp = data;
2655 struct mgmt_pending_cmd *cmd;
2656 struct bt_uuid *uuid;
2659 bt_dev_dbg(hdev, "sock %p", sk);
2663 if (pending_eir_or_class(hdev)) {
2664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2669 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2675 memcpy(uuid->uuid, cp->uuid, 16);
2676 uuid->svc_hint = cp->svc_hint;
2677 uuid->size = get_uuid_size(cp->uuid);
2679 list_add_tail(&uuid->list, &hdev->uuids);
2681 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2687 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2689 mgmt_pending_free(cmd);
2694 hci_dev_unlock(hdev);
2698 static bool enable_service_cache(struct hci_dev *hdev)
2700 if (!hdev_is_powered(hdev))
2703 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2704 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2712 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 err = hci_update_class_sync(hdev);
2720 return hci_update_eir_sync(hdev);
2723 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2726 struct mgmt_cp_remove_uuid *cp = data;
2727 struct mgmt_pending_cmd *cmd;
2728 struct bt_uuid *match, *tmp;
2729 static const u8 bt_uuid_any[] = {
2730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 bt_dev_dbg(hdev, "sock %p", sk);
2738 if (pending_eir_or_class(hdev)) {
2739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2744 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2745 hci_uuids_clear(hdev);
2747 if (enable_service_cache(hdev)) {
2748 err = mgmt_cmd_complete(sk, hdev->id,
2749 MGMT_OP_REMOVE_UUID,
2750 0, hdev->dev_class, 3);
2759 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2760 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2763 list_del(&match->list);
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2770 MGMT_STATUS_INVALID_PARAMS);
2775 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2781 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2782 mgmt_class_complete);
2784 mgmt_pending_free(cmd);
2787 hci_dev_unlock(hdev);
2791 static int set_class_sync(struct hci_dev *hdev, void *data)
2795 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2796 cancel_delayed_work_sync(&hdev->service_cache);
2797 err = hci_update_eir_sync(hdev);
2803 return hci_update_class_sync(hdev);
2806 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2809 struct mgmt_cp_set_dev_class *cp = data;
2810 struct mgmt_pending_cmd *cmd;
2813 bt_dev_dbg(hdev, "sock %p", sk);
2815 if (!lmp_bredr_capable(hdev))
2816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 MGMT_STATUS_NOT_SUPPORTED);
2821 if (pending_eir_or_class(hdev)) {
2822 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2827 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_INVALID_PARAMS);
2833 hdev->major_class = cp->major;
2834 hdev->minor_class = cp->minor;
2836 if (!hdev_is_powered(hdev)) {
2837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2838 hdev->dev_class, 3);
2842 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2848 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2849 mgmt_class_complete);
2851 mgmt_pending_free(cmd);
2854 hci_dev_unlock(hdev);
2858 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2861 struct mgmt_cp_load_link_keys *cp = data;
2862 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2863 sizeof(struct mgmt_link_key_info));
2864 u16 key_count, expected_len;
2868 bt_dev_dbg(hdev, "sock %p", sk);
2870 if (!lmp_bredr_capable(hdev))
2871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2872 MGMT_STATUS_NOT_SUPPORTED);
2874 key_count = __le16_to_cpu(cp->key_count);
2875 if (key_count > max_key_count) {
2876 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2878 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2879 MGMT_STATUS_INVALID_PARAMS);
2882 expected_len = struct_size(cp, keys, key_count);
2883 if (expected_len != len) {
2884 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 MGMT_STATUS_INVALID_PARAMS);
2890 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2892 MGMT_STATUS_INVALID_PARAMS);
2894 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2897 for (i = 0; i < key_count; i++) {
2898 struct mgmt_link_key_info *key = &cp->keys[i];
2900 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2901 if (key->type > 0x08)
2902 return mgmt_cmd_status(sk, hdev->id,
2903 MGMT_OP_LOAD_LINK_KEYS,
2904 MGMT_STATUS_INVALID_PARAMS);
2909 hci_link_keys_clear(hdev);
2912 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2914 changed = hci_dev_test_and_clear_flag(hdev,
2915 HCI_KEEP_DEBUG_KEYS);
2918 new_settings(hdev, NULL);
2920 for (i = 0; i < key_count; i++) {
2921 struct mgmt_link_key_info *key = &cp->keys[i];
2923 if (hci_is_blocked_key(hdev,
2924 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2926 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2931 /* Always ignore debug keys and require a new pairing if
2932 * the user wants to use them.
2934 if (key->type == HCI_LK_DEBUG_COMBINATION)
2937 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2938 key->type, key->pin_len, NULL);
2941 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2943 hci_dev_unlock(hdev);
2948 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2949 u8 addr_type, struct sock *skip_sk)
2951 struct mgmt_ev_device_unpaired ev;
2953 bacpy(&ev.addr.bdaddr, bdaddr);
2954 ev.addr.type = addr_type;
2956 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2960 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2962 struct mgmt_pending_cmd *cmd = data;
2963 struct mgmt_cp_unpair_device *cp = cmd->param;
2966 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2968 cmd->cmd_complete(cmd, err);
2969 mgmt_pending_free(cmd);
2972 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2974 struct mgmt_pending_cmd *cmd = data;
2975 struct mgmt_cp_unpair_device *cp = cmd->param;
2976 struct hci_conn *conn;
2978 if (cp->addr.type == BDADDR_BREDR)
2979 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2982 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2983 le_addr_type(cp->addr.type));
2988 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2991 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2994 struct mgmt_cp_unpair_device *cp = data;
2995 struct mgmt_rp_unpair_device rp;
2996 struct hci_conn_params *params;
2997 struct mgmt_pending_cmd *cmd;
2998 struct hci_conn *conn;
3002 memset(&rp, 0, sizeof(rp));
3003 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3004 rp.addr.type = cp->addr.type;
3006 if (!bdaddr_type_is_valid(cp->addr.type))
3007 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3008 MGMT_STATUS_INVALID_PARAMS,
3011 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3012 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3013 MGMT_STATUS_INVALID_PARAMS,
3018 if (!hdev_is_powered(hdev)) {
3019 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3020 MGMT_STATUS_NOT_POWERED, &rp,
3025 if (cp->addr.type == BDADDR_BREDR) {
3026 /* If disconnection is requested, then look up the
3027 * connection. If the remote device is connected, it
3028 * will be later used to terminate the link.
3030 * Setting it to NULL explicitly will cause no
3031 * termination of the link.
3034 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3039 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3041 err = mgmt_cmd_complete(sk, hdev->id,
3042 MGMT_OP_UNPAIR_DEVICE,
3043 MGMT_STATUS_NOT_PAIRED, &rp,
3051 /* LE address type */
3052 addr_type = le_addr_type(cp->addr.type);
3054 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3055 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3057 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3058 MGMT_STATUS_NOT_PAIRED, &rp,
3063 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3065 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3070 /* Defer clearing up the connection parameters until closing to
3071 * give a chance of keeping them if a repairing happens.
3073 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3075 /* Disable auto-connection parameters if present */
3076 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3078 if (params->explicit_connect)
3079 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3081 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3084 /* If disconnection is not requested, then clear the connection
3085 * variable so that the link is not terminated.
3087 if (!cp->disconnect)
3091 /* If the connection variable is set, then termination of the
3092 * link is requested.
3095 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3097 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3101 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3108 cmd->cmd_complete = addr_cmd_complete;
3110 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3111 unpair_device_complete);
3113 mgmt_pending_free(cmd);
3116 hci_dev_unlock(hdev);
3120 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3123 struct mgmt_cp_disconnect *cp = data;
3124 struct mgmt_rp_disconnect rp;
3125 struct mgmt_pending_cmd *cmd;
3126 struct hci_conn *conn;
3129 bt_dev_dbg(hdev, "sock %p", sk);
3131 memset(&rp, 0, sizeof(rp));
3132 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3133 rp.addr.type = cp->addr.type;
3135 if (!bdaddr_type_is_valid(cp->addr.type))
3136 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137 MGMT_STATUS_INVALID_PARAMS,
3142 if (!test_bit(HCI_UP, &hdev->flags)) {
3143 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3144 MGMT_STATUS_NOT_POWERED, &rp,
3149 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3150 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3151 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3155 if (cp->addr.type == BDADDR_BREDR)
3156 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3159 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3160 le_addr_type(cp->addr.type));
3162 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3164 MGMT_STATUS_NOT_CONNECTED, &rp,
3169 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3175 cmd->cmd_complete = generic_cmd_complete;
3177 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3179 mgmt_pending_remove(cmd);
3182 hci_dev_unlock(hdev);
3186 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3188 switch (link_type) {
3190 switch (addr_type) {
3191 case ADDR_LE_DEV_PUBLIC:
3192 return BDADDR_LE_PUBLIC;
3195 /* Fallback to LE Random address type */
3196 return BDADDR_LE_RANDOM;
3200 /* Fallback to BR/EDR type */
3201 return BDADDR_BREDR;
3205 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3208 struct mgmt_rp_get_connections *rp;
3213 bt_dev_dbg(hdev, "sock %p", sk);
3217 if (!hdev_is_powered(hdev)) {
3218 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3219 MGMT_STATUS_NOT_POWERED);
3224 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3225 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3229 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3236 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3237 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3239 bacpy(&rp->addr[i].bdaddr, &c->dst);
3240 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3241 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3246 rp->conn_count = cpu_to_le16(i);
3248 /* Recalculate length in case of filtered SCO connections, etc */
3249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3250 struct_size(rp, addr, i));
3255 hci_dev_unlock(hdev);
3259 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3260 struct mgmt_cp_pin_code_neg_reply *cp)
3262 struct mgmt_pending_cmd *cmd;
3265 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3270 cmd->cmd_complete = addr_cmd_complete;
3272 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3273 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3275 mgmt_pending_remove(cmd);
3280 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3283 struct hci_conn *conn;
3284 struct mgmt_cp_pin_code_reply *cp = data;
3285 struct hci_cp_pin_code_reply reply;
3286 struct mgmt_pending_cmd *cmd;
3289 bt_dev_dbg(hdev, "sock %p", sk);
3293 if (!hdev_is_powered(hdev)) {
3294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3295 MGMT_STATUS_NOT_POWERED);
3299 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3301 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3302 MGMT_STATUS_NOT_CONNECTED);
3306 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3307 struct mgmt_cp_pin_code_neg_reply ncp;
3309 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3311 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3313 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3315 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3316 MGMT_STATUS_INVALID_PARAMS);
3321 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3327 cmd->cmd_complete = addr_cmd_complete;
3329 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3330 reply.pin_len = cp->pin_len;
3331 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3333 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3335 mgmt_pending_remove(cmd);
3338 hci_dev_unlock(hdev);
3342 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3345 struct mgmt_cp_set_io_capability *cp = data;
3347 bt_dev_dbg(hdev, "sock %p", sk);
3349 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3351 MGMT_STATUS_INVALID_PARAMS);
3355 hdev->io_capability = cp->io_capability;
3357 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3359 hci_dev_unlock(hdev);
3361 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3365 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3367 struct hci_dev *hdev = conn->hdev;
3368 struct mgmt_pending_cmd *cmd;
3370 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3371 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3374 if (cmd->user_data != conn)
3383 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3385 struct mgmt_rp_pair_device rp;
3386 struct hci_conn *conn = cmd->user_data;
3389 bacpy(&rp.addr.bdaddr, &conn->dst);
3390 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3392 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3393 status, &rp, sizeof(rp));
3395 /* So we don't get further callbacks for this connection */
3396 conn->connect_cfm_cb = NULL;
3397 conn->security_cfm_cb = NULL;
3398 conn->disconn_cfm_cb = NULL;
3400 hci_conn_drop(conn);
3402 /* The device is paired so there is no need to remove
3403 * its connection parameters anymore.
3405 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3412 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3414 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3415 struct mgmt_pending_cmd *cmd;
3417 cmd = find_pairing(conn);
3419 cmd->cmd_complete(cmd, status);
3420 mgmt_pending_remove(cmd);
3424 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3426 struct mgmt_pending_cmd *cmd;
3428 BT_DBG("status %u", status);
3430 cmd = find_pairing(conn);
3432 BT_DBG("Unable to find a pending command");
3436 cmd->cmd_complete(cmd, mgmt_status(status));
3437 mgmt_pending_remove(cmd);
3440 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3442 struct mgmt_pending_cmd *cmd;
3444 BT_DBG("status %u", status);
3449 cmd = find_pairing(conn);
3451 BT_DBG("Unable to find a pending command");
3455 cmd->cmd_complete(cmd, mgmt_status(status));
3456 mgmt_pending_remove(cmd);
3459 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3462 struct mgmt_cp_pair_device *cp = data;
3463 struct mgmt_rp_pair_device rp;
3464 struct mgmt_pending_cmd *cmd;
3465 u8 sec_level, auth_type;
3466 struct hci_conn *conn;
3469 bt_dev_dbg(hdev, "sock %p", sk);
3471 memset(&rp, 0, sizeof(rp));
3472 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3473 rp.addr.type = cp->addr.type;
3475 if (!bdaddr_type_is_valid(cp->addr.type))
3476 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3477 MGMT_STATUS_INVALID_PARAMS,
3480 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3481 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3482 MGMT_STATUS_INVALID_PARAMS,
3487 if (!hdev_is_powered(hdev)) {
3488 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3489 MGMT_STATUS_NOT_POWERED, &rp,
3494 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3495 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3496 MGMT_STATUS_ALREADY_PAIRED, &rp,
3501 sec_level = BT_SECURITY_MEDIUM;
3502 auth_type = HCI_AT_DEDICATED_BONDING;
3504 if (cp->addr.type == BDADDR_BREDR) {
3505 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3506 auth_type, CONN_REASON_PAIR_DEVICE);
3508 u8 addr_type = le_addr_type(cp->addr.type);
3509 struct hci_conn_params *p;
3511 /* When pairing a new device, it is expected to remember
3512 * this device for future connections. Adding the connection
3513 * parameter information ahead of time allows tracking
3514 * of the peripheral preferred values and will speed up any
3515 * further connection establishment.
3517 * If connection parameters already exist, then they
3518 * will be kept and this function does nothing.
3520 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3522 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3523 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3525 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3526 sec_level, HCI_LE_CONN_TIMEOUT,
3527 CONN_REASON_PAIR_DEVICE);
3533 if (PTR_ERR(conn) == -EBUSY)
3534 status = MGMT_STATUS_BUSY;
3535 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3536 status = MGMT_STATUS_NOT_SUPPORTED;
3537 else if (PTR_ERR(conn) == -ECONNREFUSED)
3538 status = MGMT_STATUS_REJECTED;
3540 status = MGMT_STATUS_CONNECT_FAILED;
3542 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3543 status, &rp, sizeof(rp));
3547 if (conn->connect_cfm_cb) {
3548 hci_conn_drop(conn);
3549 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3550 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3554 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3557 hci_conn_drop(conn);
3561 cmd->cmd_complete = pairing_complete;
3563 /* For LE, just connecting isn't a proof that the pairing finished */
3564 if (cp->addr.type == BDADDR_BREDR) {
3565 conn->connect_cfm_cb = pairing_complete_cb;
3566 conn->security_cfm_cb = pairing_complete_cb;
3567 conn->disconn_cfm_cb = pairing_complete_cb;
3569 conn->connect_cfm_cb = le_pairing_complete_cb;
3570 conn->security_cfm_cb = le_pairing_complete_cb;
3571 conn->disconn_cfm_cb = le_pairing_complete_cb;
3574 conn->io_capability = cp->io_cap;
3575 cmd->user_data = hci_conn_get(conn);
3577 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3578 hci_conn_security(conn, sec_level, auth_type, true)) {
3579 cmd->cmd_complete(cmd, 0);
3580 mgmt_pending_remove(cmd);
3586 hci_dev_unlock(hdev);
3590 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3593 struct mgmt_addr_info *addr = data;
3594 struct mgmt_pending_cmd *cmd;
3595 struct hci_conn *conn;
3598 bt_dev_dbg(hdev, "sock %p", sk);
3602 if (!hdev_is_powered(hdev)) {
3603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3604 MGMT_STATUS_NOT_POWERED);
3608 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3611 MGMT_STATUS_INVALID_PARAMS);
3615 conn = cmd->user_data;
3617 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3618 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3619 MGMT_STATUS_INVALID_PARAMS);
3623 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3624 mgmt_pending_remove(cmd);
3626 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3627 addr, sizeof(*addr));
3629 /* Since user doesn't want to proceed with the connection, abort any
3630 * ongoing pairing and then terminate the link if it was created
3631 * because of the pair device action.
3633 if (addr->type == BDADDR_BREDR)
3634 hci_remove_link_key(hdev, &addr->bdaddr);
3636 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3637 le_addr_type(addr->type));
3639 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3640 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3643 hci_dev_unlock(hdev);
3647 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3648 struct mgmt_addr_info *addr, u16 mgmt_op,
3649 u16 hci_op, __le32 passkey)
3651 struct mgmt_pending_cmd *cmd;
3652 struct hci_conn *conn;
3657 if (!hdev_is_powered(hdev)) {
3658 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3659 MGMT_STATUS_NOT_POWERED, addr,
3664 if (addr->type == BDADDR_BREDR)
3665 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3667 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3668 le_addr_type(addr->type));
3671 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3672 MGMT_STATUS_NOT_CONNECTED, addr,
3677 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3678 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3680 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3681 MGMT_STATUS_SUCCESS, addr,
3684 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685 MGMT_STATUS_FAILED, addr,
3691 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3697 cmd->cmd_complete = addr_cmd_complete;
3699 /* Continue with pairing via HCI */
3700 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3701 struct hci_cp_user_passkey_reply cp;
3703 bacpy(&cp.bdaddr, &addr->bdaddr);
3704 cp.passkey = passkey;
3705 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3707 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3711 mgmt_pending_remove(cmd);
3714 hci_dev_unlock(hdev);
3718 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3719 void *data, u16 len)
3721 struct mgmt_cp_pin_code_neg_reply *cp = data;
3723 bt_dev_dbg(hdev, "sock %p", sk);
3725 return user_pairing_resp(sk, hdev, &cp->addr,
3726 MGMT_OP_PIN_CODE_NEG_REPLY,
3727 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3730 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3733 struct mgmt_cp_user_confirm_reply *cp = data;
3735 bt_dev_dbg(hdev, "sock %p", sk);
3737 if (len != sizeof(*cp))
3738 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3739 MGMT_STATUS_INVALID_PARAMS);
3741 return user_pairing_resp(sk, hdev, &cp->addr,
3742 MGMT_OP_USER_CONFIRM_REPLY,
3743 HCI_OP_USER_CONFIRM_REPLY, 0);
3746 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3747 void *data, u16 len)
3749 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3751 bt_dev_dbg(hdev, "sock %p", sk);
3753 return user_pairing_resp(sk, hdev, &cp->addr,
3754 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3755 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3758 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3761 struct mgmt_cp_user_passkey_reply *cp = data;
3763 bt_dev_dbg(hdev, "sock %p", sk);
3765 return user_pairing_resp(sk, hdev, &cp->addr,
3766 MGMT_OP_USER_PASSKEY_REPLY,
3767 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3770 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3771 void *data, u16 len)
3773 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3775 bt_dev_dbg(hdev, "sock %p", sk);
3777 return user_pairing_resp(sk, hdev, &cp->addr,
3778 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3779 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3782 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3784 struct adv_info *adv_instance;
3786 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3790 /* stop if current instance doesn't need to be changed */
3791 if (!(adv_instance->flags & flags))
3794 cancel_adv_timeout(hdev);
3796 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3800 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3805 static int name_changed_sync(struct hci_dev *hdev, void *data)
3807 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3810 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3812 struct mgmt_pending_cmd *cmd = data;
3813 struct mgmt_cp_set_local_name *cp = cmd->param;
3814 u8 status = mgmt_status(err);
3816 bt_dev_dbg(hdev, "err %d", err);
3818 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3822 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3825 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3828 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3829 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3832 mgmt_pending_remove(cmd);
3835 static int set_name_sync(struct hci_dev *hdev, void *data)
3837 if (lmp_bredr_capable(hdev)) {
3838 hci_update_name_sync(hdev);
3839 hci_update_eir_sync(hdev);
3842 /* The name is stored in the scan response data and so
3843 * no need to update the advertising data here.
3845 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3846 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3851 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3854 struct mgmt_cp_set_local_name *cp = data;
3855 struct mgmt_pending_cmd *cmd;
3858 bt_dev_dbg(hdev, "sock %p", sk);
3862 /* If the old values are the same as the new ones just return a
3863 * direct command complete event.
3865 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3866 !memcmp(hdev->short_name, cp->short_name,
3867 sizeof(hdev->short_name))) {
3868 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3873 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3875 if (!hdev_is_powered(hdev)) {
3876 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3883 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3884 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3885 ext_info_changed(hdev, sk);
3890 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3894 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3898 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3899 MGMT_STATUS_FAILED);
3902 mgmt_pending_remove(cmd);
3907 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3910 hci_dev_unlock(hdev);
3914 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3916 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3919 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3922 struct mgmt_cp_set_appearance *cp = data;
3926 bt_dev_dbg(hdev, "sock %p", sk);
3928 if (!lmp_le_capable(hdev))
3929 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3930 MGMT_STATUS_NOT_SUPPORTED);
3932 appearance = le16_to_cpu(cp->appearance);
3936 if (hdev->appearance != appearance) {
3937 hdev->appearance = appearance;
3939 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3940 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3943 ext_info_changed(hdev, sk);
3946 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3949 hci_dev_unlock(hdev);
3954 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3955 void *data, u16 len)
3957 struct mgmt_rp_get_phy_configuration rp;
3959 bt_dev_dbg(hdev, "sock %p", sk);
3963 memset(&rp, 0, sizeof(rp));
3965 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3966 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3967 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3969 hci_dev_unlock(hdev);
3971 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3975 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3977 struct mgmt_ev_phy_configuration_changed ev;
3979 memset(&ev, 0, sizeof(ev));
3981 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3983 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3987 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3989 struct mgmt_pending_cmd *cmd = data;
3990 struct sk_buff *skb = cmd->skb;
3991 u8 status = mgmt_status(err);
3993 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3998 status = MGMT_STATUS_FAILED;
3999 else if (IS_ERR(skb))
4000 status = mgmt_status(PTR_ERR(skb));
4002 status = mgmt_status(skb->data[0]);
4005 bt_dev_dbg(hdev, "status %d", status);
4008 mgmt_cmd_status(cmd->sk, hdev->id,
4009 MGMT_OP_SET_PHY_CONFIGURATION, status);
4011 mgmt_cmd_complete(cmd->sk, hdev->id,
4012 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4015 mgmt_phy_configuration_changed(hdev, cmd->sk);
4018 if (skb && !IS_ERR(skb))
4021 mgmt_pending_remove(cmd);
4024 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4026 struct mgmt_pending_cmd *cmd = data;
4027 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4028 struct hci_cp_le_set_default_phy cp_phy;
4029 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4031 memset(&cp_phy, 0, sizeof(cp_phy));
4033 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4034 cp_phy.all_phys |= 0x01;
4036 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4037 cp_phy.all_phys |= 0x02;
4039 if (selected_phys & MGMT_PHY_LE_1M_TX)
4040 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4042 if (selected_phys & MGMT_PHY_LE_2M_TX)
4043 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4045 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4046 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4048 if (selected_phys & MGMT_PHY_LE_1M_RX)
4049 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4051 if (selected_phys & MGMT_PHY_LE_2M_RX)
4052 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4054 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4055 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4057 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4058 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4063 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4064 void *data, u16 len)
4066 struct mgmt_cp_set_phy_configuration *cp = data;
4067 struct mgmt_pending_cmd *cmd;
4068 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4069 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4070 bool changed = false;
4073 bt_dev_dbg(hdev, "sock %p", sk);
4075 configurable_phys = get_configurable_phys(hdev);
4076 supported_phys = get_supported_phys(hdev);
4077 selected_phys = __le32_to_cpu(cp->selected_phys);
4079 if (selected_phys & ~supported_phys)
4080 return mgmt_cmd_status(sk, hdev->id,
4081 MGMT_OP_SET_PHY_CONFIGURATION,
4082 MGMT_STATUS_INVALID_PARAMS);
4084 unconfigure_phys = supported_phys & ~configurable_phys;
4086 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4087 return mgmt_cmd_status(sk, hdev->id,
4088 MGMT_OP_SET_PHY_CONFIGURATION,
4089 MGMT_STATUS_INVALID_PARAMS);
4091 if (selected_phys == get_selected_phys(hdev))
4092 return mgmt_cmd_complete(sk, hdev->id,
4093 MGMT_OP_SET_PHY_CONFIGURATION,
4098 if (!hdev_is_powered(hdev)) {
4099 err = mgmt_cmd_status(sk, hdev->id,
4100 MGMT_OP_SET_PHY_CONFIGURATION,
4101 MGMT_STATUS_REJECTED);
4105 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4106 err = mgmt_cmd_status(sk, hdev->id,
4107 MGMT_OP_SET_PHY_CONFIGURATION,
4112 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4113 pkt_type |= (HCI_DH3 | HCI_DM3);
4115 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4117 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4118 pkt_type |= (HCI_DH5 | HCI_DM5);
4120 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4122 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4123 pkt_type &= ~HCI_2DH1;
4125 pkt_type |= HCI_2DH1;
4127 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4128 pkt_type &= ~HCI_2DH3;
4130 pkt_type |= HCI_2DH3;
4132 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4133 pkt_type &= ~HCI_2DH5;
4135 pkt_type |= HCI_2DH5;
4137 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4138 pkt_type &= ~HCI_3DH1;
4140 pkt_type |= HCI_3DH1;
4142 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4143 pkt_type &= ~HCI_3DH3;
4145 pkt_type |= HCI_3DH3;
4147 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4148 pkt_type &= ~HCI_3DH5;
4150 pkt_type |= HCI_3DH5;
4152 if (pkt_type != hdev->pkt_type) {
4153 hdev->pkt_type = pkt_type;
4157 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4158 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4160 mgmt_phy_configuration_changed(hdev, sk);
4162 err = mgmt_cmd_complete(sk, hdev->id,
4163 MGMT_OP_SET_PHY_CONFIGURATION,
4169 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4174 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4175 set_default_phy_complete);
4178 err = mgmt_cmd_status(sk, hdev->id,
4179 MGMT_OP_SET_PHY_CONFIGURATION,
4180 MGMT_STATUS_FAILED);
4183 mgmt_pending_remove(cmd);
4187 hci_dev_unlock(hdev);
4192 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4195 int err = MGMT_STATUS_SUCCESS;
4196 struct mgmt_cp_set_blocked_keys *keys = data;
4197 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4198 sizeof(struct mgmt_blocked_key_info));
4199 u16 key_count, expected_len;
4202 bt_dev_dbg(hdev, "sock %p", sk);
4204 key_count = __le16_to_cpu(keys->key_count);
4205 if (key_count > max_key_count) {
4206 bt_dev_err(hdev, "too big key_count value %u", key_count);
4207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4208 MGMT_STATUS_INVALID_PARAMS);
4211 expected_len = struct_size(keys, keys, key_count);
4212 if (expected_len != len) {
4213 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4215 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4216 MGMT_STATUS_INVALID_PARAMS);
4221 hci_blocked_keys_clear(hdev);
4223 for (i = 0; i < key_count; ++i) {
4224 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4227 err = MGMT_STATUS_NO_RESOURCES;
4231 b->type = keys->keys[i].type;
4232 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4233 list_add_rcu(&b->list, &hdev->blocked_keys);
4235 hci_dev_unlock(hdev);
4237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4241 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4242 void *data, u16 len)
4244 struct mgmt_mode *cp = data;
4246 bool changed = false;
4248 bt_dev_dbg(hdev, "sock %p", sk);
4250 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4251 return mgmt_cmd_status(sk, hdev->id,
4252 MGMT_OP_SET_WIDEBAND_SPEECH,
4253 MGMT_STATUS_NOT_SUPPORTED);
4255 if (cp->val != 0x00 && cp->val != 0x01)
4256 return mgmt_cmd_status(sk, hdev->id,
4257 MGMT_OP_SET_WIDEBAND_SPEECH,
4258 MGMT_STATUS_INVALID_PARAMS);
4262 if (hdev_is_powered(hdev) &&
4263 !!cp->val != hci_dev_test_flag(hdev,
4264 HCI_WIDEBAND_SPEECH_ENABLED)) {
4265 err = mgmt_cmd_status(sk, hdev->id,
4266 MGMT_OP_SET_WIDEBAND_SPEECH,
4267 MGMT_STATUS_REJECTED);
4272 changed = !hci_dev_test_and_set_flag(hdev,
4273 HCI_WIDEBAND_SPEECH_ENABLED);
4275 changed = hci_dev_test_and_clear_flag(hdev,
4276 HCI_WIDEBAND_SPEECH_ENABLED);
4278 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4283 err = new_settings(hdev, sk);
4286 hci_dev_unlock(hdev);
4290 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4291 void *data, u16 data_len)
4294 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4297 u8 tx_power_range[2];
4299 bt_dev_dbg(hdev, "sock %p", sk);
4301 memset(&buf, 0, sizeof(buf));
4305 /* When the Read Simple Pairing Options command is supported, then
4306 * the remote public key validation is supported.
4308 * Alternatively, when Microsoft extensions are available, they can
4309 * indicate support for public key validation as well.
4311 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4312 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4314 flags |= 0x02; /* Remote public key validation (LE) */
4316 /* When the Read Encryption Key Size command is supported, then the
4317 * encryption key size is enforced.
4319 if (hdev->commands[20] & 0x10)
4320 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4322 flags |= 0x08; /* Encryption key size enforcement (LE) */
4324 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4327 /* When the Read Simple Pairing Options command is supported, then
4328 * also max encryption key size information is provided.
4330 if (hdev->commands[41] & 0x08)
4331 cap_len = eir_append_le16(rp->cap, cap_len,
4332 MGMT_CAP_MAX_ENC_KEY_SIZE,
4333 hdev->max_enc_key_size);
4335 cap_len = eir_append_le16(rp->cap, cap_len,
4336 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4337 SMP_MAX_ENC_KEY_SIZE);
4339 /* Append the min/max LE tx power parameters if we were able to fetch
4340 * it from the controller
4342 if (hdev->commands[38] & 0x80) {
4343 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4344 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4345 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4349 rp->cap_len = cpu_to_le16(cap_len);
4351 hci_dev_unlock(hdev);
4353 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4354 rp, sizeof(*rp) + cap_len);
4357 #ifdef CONFIG_BT_FEATURE_DEBUG
4358 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4359 static const u8 debug_uuid[16] = {
4360 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4361 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4365 /* 330859bc-7506-492d-9370-9a6f0614037f */
4366 static const u8 quality_report_uuid[16] = {
4367 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4368 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4371 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4372 static const u8 offload_codecs_uuid[16] = {
4373 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4374 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4377 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4378 static const u8 le_simultaneous_roles_uuid[16] = {
4379 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4380 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4383 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4384 static const u8 rpa_resolution_uuid[16] = {
4385 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4386 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4389 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4390 static const u8 iso_socket_uuid[16] = {
4391 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4392 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4395 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4396 static const u8 mgmt_mesh_uuid[16] = {
4397 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4398 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4401 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4402 void *data, u16 data_len)
4404 struct mgmt_rp_read_exp_features_info *rp;
4410 bt_dev_dbg(hdev, "sock %p", sk);
4412 /* Enough space for 7 features */
4413 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4414 rp = kzalloc(len, GFP_KERNEL);
4418 #ifdef CONFIG_BT_FEATURE_DEBUG
4420 flags = bt_dbg_get() ? BIT(0) : 0;
4422 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4423 rp->features[idx].flags = cpu_to_le32(flags);
4428 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4429 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4434 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4435 rp->features[idx].flags = cpu_to_le32(flags);
4439 if (hdev && ll_privacy_capable(hdev)) {
4440 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4441 flags = BIT(0) | BIT(1);
4445 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4446 rp->features[idx].flags = cpu_to_le32(flags);
4450 if (hdev && (aosp_has_quality_report(hdev) ||
4451 hdev->set_quality_report)) {
4452 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4457 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4458 rp->features[idx].flags = cpu_to_le32(flags);
4462 if (hdev && hdev->get_data_path_id) {
4463 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4468 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4469 rp->features[idx].flags = cpu_to_le32(flags);
4473 if (IS_ENABLED(CONFIG_BT_LE)) {
4474 flags = iso_enabled() ? BIT(0) : 0;
4475 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4476 rp->features[idx].flags = cpu_to_le32(flags);
4480 if (hdev && lmp_le_capable(hdev)) {
4481 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4486 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4487 rp->features[idx].flags = cpu_to_le32(flags);
4491 rp->feature_count = cpu_to_le16(idx);
4493 /* After reading the experimental features information, enable
4494 * the events to update client on any future change.
4496 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4498 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4499 MGMT_OP_READ_EXP_FEATURES_INFO,
4500 0, rp, sizeof(*rp) + (20 * idx));
4506 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4509 struct mgmt_ev_exp_feature_changed ev;
4511 memset(&ev, 0, sizeof(ev));
4512 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4513 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4515 // Do we need to be atomic with the conn_flags?
4516 if (enabled && privacy_mode_capable(hdev))
4517 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4519 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4521 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4523 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4527 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4528 bool enabled, struct sock *skip)
4530 struct mgmt_ev_exp_feature_changed ev;
4532 memset(&ev, 0, sizeof(ev));
4533 memcpy(ev.uuid, uuid, 16);
4534 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4536 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4538 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4541 #define EXP_FEAT(_uuid, _set_func) \
4544 .set_func = _set_func, \
4547 /* The zero key uuid is special. Multiple exp features are set through it. */
4548 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4549 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4551 struct mgmt_rp_set_exp_feature rp;
4553 memset(rp.uuid, 0, 16);
4554 rp.flags = cpu_to_le32(0);
4556 #ifdef CONFIG_BT_FEATURE_DEBUG
4558 bool changed = bt_dbg_get();
4563 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4567 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4570 changed = hci_dev_test_and_clear_flag(hdev,
4571 HCI_ENABLE_LL_PRIVACY);
4573 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4577 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4579 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4580 MGMT_OP_SET_EXP_FEATURE, 0,
4584 #ifdef CONFIG_BT_FEATURE_DEBUG
4585 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4586 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4588 struct mgmt_rp_set_exp_feature rp;
4593 /* Command requires to use the non-controller index */
4595 return mgmt_cmd_status(sk, hdev->id,
4596 MGMT_OP_SET_EXP_FEATURE,
4597 MGMT_STATUS_INVALID_INDEX);
4599 /* Parameters are limited to a single octet */
4600 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4601 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4602 MGMT_OP_SET_EXP_FEATURE,
4603 MGMT_STATUS_INVALID_PARAMS);
4605 /* Only boolean on/off is supported */
4606 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4607 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608 MGMT_OP_SET_EXP_FEATURE,
4609 MGMT_STATUS_INVALID_PARAMS);
4611 val = !!cp->param[0];
4612 changed = val ? !bt_dbg_get() : bt_dbg_get();
4615 memcpy(rp.uuid, debug_uuid, 16);
4616 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4618 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4621 MGMT_OP_SET_EXP_FEATURE, 0,
4625 exp_feature_changed(hdev, debug_uuid, val, sk);
4631 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4632 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4634 struct mgmt_rp_set_exp_feature rp;
4638 /* Command requires to use the controller index */
4640 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4641 MGMT_OP_SET_EXP_FEATURE,
4642 MGMT_STATUS_INVALID_INDEX);
4644 /* Parameters are limited to a single octet */
4645 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4646 return mgmt_cmd_status(sk, hdev->id,
4647 MGMT_OP_SET_EXP_FEATURE,
4648 MGMT_STATUS_INVALID_PARAMS);
4650 /* Only boolean on/off is supported */
4651 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4652 return mgmt_cmd_status(sk, hdev->id,
4653 MGMT_OP_SET_EXP_FEATURE,
4654 MGMT_STATUS_INVALID_PARAMS);
4656 val = !!cp->param[0];
4659 changed = !hci_dev_test_and_set_flag(hdev,
4660 HCI_MESH_EXPERIMENTAL);
4662 hci_dev_clear_flag(hdev, HCI_MESH);
4663 changed = hci_dev_test_and_clear_flag(hdev,
4664 HCI_MESH_EXPERIMENTAL);
4667 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4668 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4670 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4672 err = mgmt_cmd_complete(sk, hdev->id,
4673 MGMT_OP_SET_EXP_FEATURE, 0,
4677 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4682 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4683 struct mgmt_cp_set_exp_feature *cp,
4686 struct mgmt_rp_set_exp_feature rp;
4691 /* Command requires to use the controller index */
4693 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4694 MGMT_OP_SET_EXP_FEATURE,
4695 MGMT_STATUS_INVALID_INDEX);
4697 /* Changes can only be made when controller is powered down */
4698 if (hdev_is_powered(hdev))
4699 return mgmt_cmd_status(sk, hdev->id,
4700 MGMT_OP_SET_EXP_FEATURE,
4701 MGMT_STATUS_REJECTED);
4703 /* Parameters are limited to a single octet */
4704 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4705 return mgmt_cmd_status(sk, hdev->id,
4706 MGMT_OP_SET_EXP_FEATURE,
4707 MGMT_STATUS_INVALID_PARAMS);
4709 /* Only boolean on/off is supported */
4710 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4711 return mgmt_cmd_status(sk, hdev->id,
4712 MGMT_OP_SET_EXP_FEATURE,
4713 MGMT_STATUS_INVALID_PARAMS);
4715 val = !!cp->param[0];
4718 changed = !hci_dev_test_and_set_flag(hdev,
4719 HCI_ENABLE_LL_PRIVACY);
4720 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4722 /* Enable LL privacy + supported settings changed */
4723 flags = BIT(0) | BIT(1);
4725 changed = hci_dev_test_and_clear_flag(hdev,
4726 HCI_ENABLE_LL_PRIVACY);
4728 /* Disable LL privacy + supported settings changed */
4732 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4733 rp.flags = cpu_to_le32(flags);
4735 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4737 err = mgmt_cmd_complete(sk, hdev->id,
4738 MGMT_OP_SET_EXP_FEATURE, 0,
4742 exp_ll_privacy_feature_changed(val, hdev, sk);
4747 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4748 struct mgmt_cp_set_exp_feature *cp,
4751 struct mgmt_rp_set_exp_feature rp;
4755 /* Command requires to use a valid controller index */
4757 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4758 MGMT_OP_SET_EXP_FEATURE,
4759 MGMT_STATUS_INVALID_INDEX);
4761 /* Parameters are limited to a single octet */
4762 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4763 return mgmt_cmd_status(sk, hdev->id,
4764 MGMT_OP_SET_EXP_FEATURE,
4765 MGMT_STATUS_INVALID_PARAMS);
4767 /* Only boolean on/off is supported */
4768 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4769 return mgmt_cmd_status(sk, hdev->id,
4770 MGMT_OP_SET_EXP_FEATURE,
4771 MGMT_STATUS_INVALID_PARAMS);
4773 hci_req_sync_lock(hdev);
4775 val = !!cp->param[0];
4776 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4778 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4779 err = mgmt_cmd_status(sk, hdev->id,
4780 MGMT_OP_SET_EXP_FEATURE,
4781 MGMT_STATUS_NOT_SUPPORTED);
4782 goto unlock_quality_report;
4786 if (hdev->set_quality_report)
4787 err = hdev->set_quality_report(hdev, val);
4789 err = aosp_set_quality_report(hdev, val);
4792 err = mgmt_cmd_status(sk, hdev->id,
4793 MGMT_OP_SET_EXP_FEATURE,
4794 MGMT_STATUS_FAILED);
4795 goto unlock_quality_report;
4799 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4801 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4804 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4806 memcpy(rp.uuid, quality_report_uuid, 16);
4807 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4808 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4810 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4814 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4816 unlock_quality_report:
4817 hci_req_sync_unlock(hdev);
4821 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4822 struct mgmt_cp_set_exp_feature *cp,
4827 struct mgmt_rp_set_exp_feature rp;
4829 /* Command requires to use a valid controller index */
4831 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4832 MGMT_OP_SET_EXP_FEATURE,
4833 MGMT_STATUS_INVALID_INDEX);
4835 /* Parameters are limited to a single octet */
4836 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4837 return mgmt_cmd_status(sk, hdev->id,
4838 MGMT_OP_SET_EXP_FEATURE,
4839 MGMT_STATUS_INVALID_PARAMS);
4841 /* Only boolean on/off is supported */
4842 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4843 return mgmt_cmd_status(sk, hdev->id,
4844 MGMT_OP_SET_EXP_FEATURE,
4845 MGMT_STATUS_INVALID_PARAMS);
4847 val = !!cp->param[0];
4848 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4850 if (!hdev->get_data_path_id) {
4851 return mgmt_cmd_status(sk, hdev->id,
4852 MGMT_OP_SET_EXP_FEATURE,
4853 MGMT_STATUS_NOT_SUPPORTED);
4858 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4860 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4863 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4866 memcpy(rp.uuid, offload_codecs_uuid, 16);
4867 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4868 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4869 err = mgmt_cmd_complete(sk, hdev->id,
4870 MGMT_OP_SET_EXP_FEATURE, 0,
4874 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4879 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4880 struct mgmt_cp_set_exp_feature *cp,
4885 struct mgmt_rp_set_exp_feature rp;
4887 /* Command requires to use a valid controller index */
4889 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4890 MGMT_OP_SET_EXP_FEATURE,
4891 MGMT_STATUS_INVALID_INDEX);
4893 /* Parameters are limited to a single octet */
4894 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4895 return mgmt_cmd_status(sk, hdev->id,
4896 MGMT_OP_SET_EXP_FEATURE,
4897 MGMT_STATUS_INVALID_PARAMS);
4899 /* Only boolean on/off is supported */
4900 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4901 return mgmt_cmd_status(sk, hdev->id,
4902 MGMT_OP_SET_EXP_FEATURE,
4903 MGMT_STATUS_INVALID_PARAMS);
4905 val = !!cp->param[0];
4906 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4908 if (!hci_dev_le_state_simultaneous(hdev)) {
4909 return mgmt_cmd_status(sk, hdev->id,
4910 MGMT_OP_SET_EXP_FEATURE,
4911 MGMT_STATUS_NOT_SUPPORTED);
4916 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4918 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4921 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4924 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4925 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4926 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4927 err = mgmt_cmd_complete(sk, hdev->id,
4928 MGMT_OP_SET_EXP_FEATURE, 0,
4932 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4938 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4939 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4941 struct mgmt_rp_set_exp_feature rp;
4942 bool val, changed = false;
4945 /* Command requires to use the non-controller index */
4947 return mgmt_cmd_status(sk, hdev->id,
4948 MGMT_OP_SET_EXP_FEATURE,
4949 MGMT_STATUS_INVALID_INDEX);
4951 /* Parameters are limited to a single octet */
4952 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4953 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4954 MGMT_OP_SET_EXP_FEATURE,
4955 MGMT_STATUS_INVALID_PARAMS);
4957 /* Only boolean on/off is supported */
4958 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4959 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4960 MGMT_OP_SET_EXP_FEATURE,
4961 MGMT_STATUS_INVALID_PARAMS);
4963 val = cp->param[0] ? true : false;
4972 memcpy(rp.uuid, iso_socket_uuid, 16);
4973 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4975 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4977 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4978 MGMT_OP_SET_EXP_FEATURE, 0,
4982 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4988 static const struct mgmt_exp_feature {
4990 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4991 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4992 } exp_features[] = {
4993 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4994 #ifdef CONFIG_BT_FEATURE_DEBUG
4995 EXP_FEAT(debug_uuid, set_debug_func),
4997 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4998 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4999 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5000 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5001 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5003 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5006 /* end with a null feature */
5007 EXP_FEAT(NULL, NULL)
5010 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5011 void *data, u16 data_len)
5013 struct mgmt_cp_set_exp_feature *cp = data;
5016 bt_dev_dbg(hdev, "sock %p", sk);
5018 for (i = 0; exp_features[i].uuid; i++) {
5019 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5020 return exp_features[i].set_func(sk, hdev, cp, data_len);
5023 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5024 MGMT_OP_SET_EXP_FEATURE,
5025 MGMT_STATUS_NOT_SUPPORTED);
5028 static u32 get_params_flags(struct hci_dev *hdev,
5029 struct hci_conn_params *params)
5031 u32 flags = hdev->conn_flags;
5033 /* Devices using RPAs can only be programmed in the acceptlist if
5034 * LL Privacy has been enable otherwise they cannot mark
5035 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5037 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5038 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5039 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5044 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5047 struct mgmt_cp_get_device_flags *cp = data;
5048 struct mgmt_rp_get_device_flags rp;
5049 struct bdaddr_list_with_flags *br_params;
5050 struct hci_conn_params *params;
5051 u32 supported_flags;
5052 u32 current_flags = 0;
5053 u8 status = MGMT_STATUS_INVALID_PARAMS;
5055 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5056 &cp->addr.bdaddr, cp->addr.type);
5060 supported_flags = hdev->conn_flags;
5062 memset(&rp, 0, sizeof(rp));
5064 if (cp->addr.type == BDADDR_BREDR) {
5065 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5071 current_flags = br_params->flags;
5073 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5074 le_addr_type(cp->addr.type));
5078 supported_flags = get_params_flags(hdev, params);
5079 current_flags = params->flags;
5082 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5083 rp.addr.type = cp->addr.type;
5084 rp.supported_flags = cpu_to_le32(supported_flags);
5085 rp.current_flags = cpu_to_le32(current_flags);
5087 status = MGMT_STATUS_SUCCESS;
5090 hci_dev_unlock(hdev);
5092 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5096 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5097 bdaddr_t *bdaddr, u8 bdaddr_type,
5098 u32 supported_flags, u32 current_flags)
5100 struct mgmt_ev_device_flags_changed ev;
5102 bacpy(&ev.addr.bdaddr, bdaddr);
5103 ev.addr.type = bdaddr_type;
5104 ev.supported_flags = cpu_to_le32(supported_flags);
5105 ev.current_flags = cpu_to_le32(current_flags);
5107 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5110 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5113 struct mgmt_cp_set_device_flags *cp = data;
5114 struct bdaddr_list_with_flags *br_params;
5115 struct hci_conn_params *params;
5116 u8 status = MGMT_STATUS_INVALID_PARAMS;
5117 u32 supported_flags;
5118 u32 current_flags = __le32_to_cpu(cp->current_flags);
5120 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5121 &cp->addr.bdaddr, cp->addr.type, current_flags);
5123 // We should take hci_dev_lock() early, I think.. conn_flags can change
5124 supported_flags = hdev->conn_flags;
5126 if ((supported_flags | current_flags) != supported_flags) {
5127 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5128 current_flags, supported_flags);
5134 if (cp->addr.type == BDADDR_BREDR) {
5135 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5140 br_params->flags = current_flags;
5141 status = MGMT_STATUS_SUCCESS;
5143 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5144 &cp->addr.bdaddr, cp->addr.type);
5150 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5151 le_addr_type(cp->addr.type));
5153 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5154 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5158 supported_flags = get_params_flags(hdev, params);
5160 if ((supported_flags | current_flags) != supported_flags) {
5161 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5162 current_flags, supported_flags);
5166 WRITE_ONCE(params->flags, current_flags);
5167 status = MGMT_STATUS_SUCCESS;
5169 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5172 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5173 hci_update_passive_scan(hdev);
5176 hci_dev_unlock(hdev);
5179 if (status == MGMT_STATUS_SUCCESS)
5180 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5181 supported_flags, current_flags);
5183 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5184 &cp->addr, sizeof(cp->addr));
5187 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5190 struct mgmt_ev_adv_monitor_added ev;
5192 ev.monitor_handle = cpu_to_le16(handle);
5194 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5197 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5199 struct mgmt_ev_adv_monitor_removed ev;
5200 struct mgmt_pending_cmd *cmd;
5201 struct sock *sk_skip = NULL;
5202 struct mgmt_cp_remove_adv_monitor *cp;
5204 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5208 if (cp->monitor_handle)
5212 ev.monitor_handle = cpu_to_le16(handle);
5214 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5217 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5218 void *data, u16 len)
5220 struct adv_monitor *monitor = NULL;
5221 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5224 __u32 supported = 0;
5226 __u16 num_handles = 0;
5227 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5229 BT_DBG("request for %s", hdev->name);
5233 if (msft_monitor_supported(hdev))
5234 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5236 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5237 handles[num_handles++] = monitor->handle;
5239 hci_dev_unlock(hdev);
5241 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5242 rp = kmalloc(rp_size, GFP_KERNEL);
5246 /* All supported features are currently enabled */
5247 enabled = supported;
5249 rp->supported_features = cpu_to_le32(supported);
5250 rp->enabled_features = cpu_to_le32(enabled);
5251 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5252 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5253 rp->num_handles = cpu_to_le16(num_handles);
5255 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5257 err = mgmt_cmd_complete(sk, hdev->id,
5258 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5259 MGMT_STATUS_SUCCESS, rp, rp_size);
5266 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5267 void *data, int status)
5269 struct mgmt_rp_add_adv_patterns_monitor rp;
5270 struct mgmt_pending_cmd *cmd = data;
5271 struct adv_monitor *monitor = cmd->user_data;
5275 rp.monitor_handle = cpu_to_le16(monitor->handle);
5278 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5279 hdev->adv_monitors_cnt++;
5280 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5281 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5282 hci_update_passive_scan(hdev);
5285 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5286 mgmt_status(status), &rp, sizeof(rp));
5287 mgmt_pending_remove(cmd);
5289 hci_dev_unlock(hdev);
5290 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5291 rp.monitor_handle, status);
5294 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5296 struct mgmt_pending_cmd *cmd = data;
5297 struct adv_monitor *monitor = cmd->user_data;
5299 return hci_add_adv_monitor(hdev, monitor);
5302 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5303 struct adv_monitor *m, u8 status,
5304 void *data, u16 len, u16 op)
5306 struct mgmt_pending_cmd *cmd;
5314 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5315 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5316 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5317 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5318 status = MGMT_STATUS_BUSY;
5322 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5324 status = MGMT_STATUS_NO_RESOURCES;
5329 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5330 mgmt_add_adv_patterns_monitor_complete);
5333 status = MGMT_STATUS_NO_RESOURCES;
5335 status = MGMT_STATUS_FAILED;
5340 hci_dev_unlock(hdev);
5345 hci_free_adv_monitor(hdev, m);
5346 hci_dev_unlock(hdev);
5347 return mgmt_cmd_status(sk, hdev->id, op, status);
5350 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5351 struct mgmt_adv_rssi_thresholds *rssi)
5354 m->rssi.low_threshold = rssi->low_threshold;
5355 m->rssi.low_threshold_timeout =
5356 __le16_to_cpu(rssi->low_threshold_timeout);
5357 m->rssi.high_threshold = rssi->high_threshold;
5358 m->rssi.high_threshold_timeout =
5359 __le16_to_cpu(rssi->high_threshold_timeout);
5360 m->rssi.sampling_period = rssi->sampling_period;
5362 /* Default values. These numbers are the least constricting
5363 * parameters for MSFT API to work, so it behaves as if there
5364 * are no rssi parameter to consider. May need to be changed
5365 * if other API are to be supported.
5367 m->rssi.low_threshold = -127;
5368 m->rssi.low_threshold_timeout = 60;
5369 m->rssi.high_threshold = -127;
5370 m->rssi.high_threshold_timeout = 0;
5371 m->rssi.sampling_period = 0;
5375 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5376 struct mgmt_adv_pattern *patterns)
5378 u8 offset = 0, length = 0;
5379 struct adv_pattern *p = NULL;
5382 for (i = 0; i < pattern_count; i++) {
5383 offset = patterns[i].offset;
5384 length = patterns[i].length;
5385 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5386 length > HCI_MAX_EXT_AD_LENGTH ||
5387 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5388 return MGMT_STATUS_INVALID_PARAMS;
5390 p = kmalloc(sizeof(*p), GFP_KERNEL);
5392 return MGMT_STATUS_NO_RESOURCES;
5394 p->ad_type = patterns[i].ad_type;
5395 p->offset = patterns[i].offset;
5396 p->length = patterns[i].length;
5397 memcpy(p->value, patterns[i].value, p->length);
5399 INIT_LIST_HEAD(&p->list);
5400 list_add(&p->list, &m->patterns);
5403 return MGMT_STATUS_SUCCESS;
5406 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5407 void *data, u16 len)
5409 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5410 struct adv_monitor *m = NULL;
5411 u8 status = MGMT_STATUS_SUCCESS;
5412 size_t expected_size = sizeof(*cp);
5414 BT_DBG("request for %s", hdev->name);
5416 if (len <= sizeof(*cp)) {
5417 status = MGMT_STATUS_INVALID_PARAMS;
5421 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5422 if (len != expected_size) {
5423 status = MGMT_STATUS_INVALID_PARAMS;
5427 m = kzalloc(sizeof(*m), GFP_KERNEL);
5429 status = MGMT_STATUS_NO_RESOURCES;
5433 INIT_LIST_HEAD(&m->patterns);
5435 parse_adv_monitor_rssi(m, NULL);
5436 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5439 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5440 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5443 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5444 void *data, u16 len)
5446 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5447 struct adv_monitor *m = NULL;
5448 u8 status = MGMT_STATUS_SUCCESS;
5449 size_t expected_size = sizeof(*cp);
5451 BT_DBG("request for %s", hdev->name);
5453 if (len <= sizeof(*cp)) {
5454 status = MGMT_STATUS_INVALID_PARAMS;
5458 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5459 if (len != expected_size) {
5460 status = MGMT_STATUS_INVALID_PARAMS;
5464 m = kzalloc(sizeof(*m), GFP_KERNEL);
5466 status = MGMT_STATUS_NO_RESOURCES;
5470 INIT_LIST_HEAD(&m->patterns);
5472 parse_adv_monitor_rssi(m, &cp->rssi);
5473 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5476 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5477 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5480 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5481 void *data, int status)
5483 struct mgmt_rp_remove_adv_monitor rp;
5484 struct mgmt_pending_cmd *cmd = data;
5485 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5489 rp.monitor_handle = cp->monitor_handle;
5492 hci_update_passive_scan(hdev);
5494 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5495 mgmt_status(status), &rp, sizeof(rp));
5496 mgmt_pending_remove(cmd);
5498 hci_dev_unlock(hdev);
5499 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5500 rp.monitor_handle, status);
5503 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5505 struct mgmt_pending_cmd *cmd = data;
5506 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5507 u16 handle = __le16_to_cpu(cp->monitor_handle);
5510 return hci_remove_all_adv_monitor(hdev);
5512 return hci_remove_single_adv_monitor(hdev, handle);
5515 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5516 void *data, u16 len)
5518 struct mgmt_pending_cmd *cmd;
5523 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5524 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5525 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5526 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5527 status = MGMT_STATUS_BUSY;
5531 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5533 status = MGMT_STATUS_NO_RESOURCES;
5537 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5538 mgmt_remove_adv_monitor_complete);
5541 mgmt_pending_remove(cmd);
5544 status = MGMT_STATUS_NO_RESOURCES;
5546 status = MGMT_STATUS_FAILED;
5551 hci_dev_unlock(hdev);
5556 hci_dev_unlock(hdev);
5557 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5561 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5563 struct mgmt_rp_read_local_oob_data mgmt_rp;
5564 size_t rp_size = sizeof(mgmt_rp);
5565 struct mgmt_pending_cmd *cmd = data;
5566 struct sk_buff *skb = cmd->skb;
5567 u8 status = mgmt_status(err);
5571 status = MGMT_STATUS_FAILED;
5572 else if (IS_ERR(skb))
5573 status = mgmt_status(PTR_ERR(skb));
5575 status = mgmt_status(skb->data[0]);
5578 bt_dev_dbg(hdev, "status %d", status);
5581 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5585 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5587 if (!bredr_sc_enabled(hdev)) {
5588 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5590 if (skb->len < sizeof(*rp)) {
5591 mgmt_cmd_status(cmd->sk, hdev->id,
5592 MGMT_OP_READ_LOCAL_OOB_DATA,
5593 MGMT_STATUS_FAILED);
5597 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5598 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5600 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5602 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5604 if (skb->len < sizeof(*rp)) {
5605 mgmt_cmd_status(cmd->sk, hdev->id,
5606 MGMT_OP_READ_LOCAL_OOB_DATA,
5607 MGMT_STATUS_FAILED);
5611 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5612 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5614 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5615 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5618 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5619 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5622 if (skb && !IS_ERR(skb))
5625 mgmt_pending_free(cmd);
5628 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5630 struct mgmt_pending_cmd *cmd = data;
5632 if (bredr_sc_enabled(hdev))
5633 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5635 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5637 if (IS_ERR(cmd->skb))
5638 return PTR_ERR(cmd->skb);
5643 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5644 void *data, u16 data_len)
5646 struct mgmt_pending_cmd *cmd;
5649 bt_dev_dbg(hdev, "sock %p", sk);
5653 if (!hdev_is_powered(hdev)) {
5654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5655 MGMT_STATUS_NOT_POWERED);
5659 if (!lmp_ssp_capable(hdev)) {
5660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5661 MGMT_STATUS_NOT_SUPPORTED);
5665 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5669 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5670 read_local_oob_data_complete);
5673 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5674 MGMT_STATUS_FAILED);
5677 mgmt_pending_free(cmd);
5681 hci_dev_unlock(hdev);
5685 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5686 void *data, u16 len)
5688 struct mgmt_addr_info *addr = data;
5691 bt_dev_dbg(hdev, "sock %p", sk);
5693 if (!bdaddr_type_is_valid(addr->type))
5694 return mgmt_cmd_complete(sk, hdev->id,
5695 MGMT_OP_ADD_REMOTE_OOB_DATA,
5696 MGMT_STATUS_INVALID_PARAMS,
5697 addr, sizeof(*addr));
5701 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5702 struct mgmt_cp_add_remote_oob_data *cp = data;
5705 if (cp->addr.type != BDADDR_BREDR) {
5706 err = mgmt_cmd_complete(sk, hdev->id,
5707 MGMT_OP_ADD_REMOTE_OOB_DATA,
5708 MGMT_STATUS_INVALID_PARAMS,
5709 &cp->addr, sizeof(cp->addr));
5713 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5714 cp->addr.type, cp->hash,
5715 cp->rand, NULL, NULL);
5717 status = MGMT_STATUS_FAILED;
5719 status = MGMT_STATUS_SUCCESS;
5721 err = mgmt_cmd_complete(sk, hdev->id,
5722 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5723 &cp->addr, sizeof(cp->addr));
5724 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5725 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5726 u8 *rand192, *hash192, *rand256, *hash256;
5729 if (bdaddr_type_is_le(cp->addr.type)) {
5730 /* Enforce zero-valued 192-bit parameters as
5731 * long as legacy SMP OOB isn't implemented.
5733 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5734 memcmp(cp->hash192, ZERO_KEY, 16)) {
5735 err = mgmt_cmd_complete(sk, hdev->id,
5736 MGMT_OP_ADD_REMOTE_OOB_DATA,
5737 MGMT_STATUS_INVALID_PARAMS,
5738 addr, sizeof(*addr));
5745 /* In case one of the P-192 values is set to zero,
5746 * then just disable OOB data for P-192.
5748 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5749 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5753 rand192 = cp->rand192;
5754 hash192 = cp->hash192;
5758 /* In case one of the P-256 values is set to zero, then just
5759 * disable OOB data for P-256.
5761 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5762 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5766 rand256 = cp->rand256;
5767 hash256 = cp->hash256;
5770 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5771 cp->addr.type, hash192, rand192,
5774 status = MGMT_STATUS_FAILED;
5776 status = MGMT_STATUS_SUCCESS;
5778 err = mgmt_cmd_complete(sk, hdev->id,
5779 MGMT_OP_ADD_REMOTE_OOB_DATA,
5780 status, &cp->addr, sizeof(cp->addr));
5782 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5785 MGMT_STATUS_INVALID_PARAMS);
5789 hci_dev_unlock(hdev);
5793 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5794 void *data, u16 len)
5796 struct mgmt_cp_remove_remote_oob_data *cp = data;
5800 bt_dev_dbg(hdev, "sock %p", sk);
5802 if (cp->addr.type != BDADDR_BREDR)
5803 return mgmt_cmd_complete(sk, hdev->id,
5804 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5805 MGMT_STATUS_INVALID_PARAMS,
5806 &cp->addr, sizeof(cp->addr));
5810 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5811 hci_remote_oob_data_clear(hdev);
5812 status = MGMT_STATUS_SUCCESS;
5816 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5818 status = MGMT_STATUS_INVALID_PARAMS;
5820 status = MGMT_STATUS_SUCCESS;
5823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5824 status, &cp->addr, sizeof(cp->addr));
5826 hci_dev_unlock(hdev);
5830 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5832 struct mgmt_pending_cmd *cmd;
5834 bt_dev_dbg(hdev, "status %u", status);
5838 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5840 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5843 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5846 cmd->cmd_complete(cmd, mgmt_status(status));
5847 mgmt_pending_remove(cmd);
5850 hci_dev_unlock(hdev);
5853 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5854 uint8_t *mgmt_status)
5857 case DISCOV_TYPE_LE:
5858 *mgmt_status = mgmt_le_support(hdev);
5862 case DISCOV_TYPE_INTERLEAVED:
5863 *mgmt_status = mgmt_le_support(hdev);
5867 case DISCOV_TYPE_BREDR:
5868 *mgmt_status = mgmt_bredr_support(hdev);
5873 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5880 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5882 struct mgmt_pending_cmd *cmd = data;
5884 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5885 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5886 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5889 bt_dev_dbg(hdev, "err %d", err);
5891 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5893 mgmt_pending_remove(cmd);
5895 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5899 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5901 return hci_start_discovery_sync(hdev);
5904 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5905 u16 op, void *data, u16 len)
5907 struct mgmt_cp_start_discovery *cp = data;
5908 struct mgmt_pending_cmd *cmd;
5912 bt_dev_dbg(hdev, "sock %p", sk);
5916 if (!hdev_is_powered(hdev)) {
5917 err = mgmt_cmd_complete(sk, hdev->id, op,
5918 MGMT_STATUS_NOT_POWERED,
5919 &cp->type, sizeof(cp->type));
5923 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5924 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5925 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5926 &cp->type, sizeof(cp->type));
5930 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5931 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5932 &cp->type, sizeof(cp->type));
5936 /* Can't start discovery when it is paused */
5937 if (hdev->discovery_paused) {
5938 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5939 &cp->type, sizeof(cp->type));
5943 /* Clear the discovery filter first to free any previously
5944 * allocated memory for the UUID list.
5946 hci_discovery_filter_clear(hdev);
5948 hdev->discovery.type = cp->type;
5949 hdev->discovery.report_invalid_rssi = false;
5950 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5951 hdev->discovery.limited = true;
5953 hdev->discovery.limited = false;
5955 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5961 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5962 start_discovery_complete);
5964 mgmt_pending_remove(cmd);
5968 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5971 hci_dev_unlock(hdev);
5975 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5976 void *data, u16 len)
5978 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5982 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5983 void *data, u16 len)
5985 return start_discovery_internal(sk, hdev,
5986 MGMT_OP_START_LIMITED_DISCOVERY,
5990 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5991 void *data, u16 len)
5993 struct mgmt_cp_start_service_discovery *cp = data;
5994 struct mgmt_pending_cmd *cmd;
5995 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5996 u16 uuid_count, expected_len;
6000 bt_dev_dbg(hdev, "sock %p", sk);
6004 if (!hdev_is_powered(hdev)) {
6005 err = mgmt_cmd_complete(sk, hdev->id,
6006 MGMT_OP_START_SERVICE_DISCOVERY,
6007 MGMT_STATUS_NOT_POWERED,
6008 &cp->type, sizeof(cp->type));
6012 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6013 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6014 err = mgmt_cmd_complete(sk, hdev->id,
6015 MGMT_OP_START_SERVICE_DISCOVERY,
6016 MGMT_STATUS_BUSY, &cp->type,
6021 if (hdev->discovery_paused) {
6022 err = mgmt_cmd_complete(sk, hdev->id,
6023 MGMT_OP_START_SERVICE_DISCOVERY,
6024 MGMT_STATUS_BUSY, &cp->type,
6029 uuid_count = __le16_to_cpu(cp->uuid_count);
6030 if (uuid_count > max_uuid_count) {
6031 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6033 err = mgmt_cmd_complete(sk, hdev->id,
6034 MGMT_OP_START_SERVICE_DISCOVERY,
6035 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6040 expected_len = sizeof(*cp) + uuid_count * 16;
6041 if (expected_len != len) {
6042 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6044 err = mgmt_cmd_complete(sk, hdev->id,
6045 MGMT_OP_START_SERVICE_DISCOVERY,
6046 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6051 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6052 err = mgmt_cmd_complete(sk, hdev->id,
6053 MGMT_OP_START_SERVICE_DISCOVERY,
6054 status, &cp->type, sizeof(cp->type));
6058 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6065 /* Clear the discovery filter first to free any previously
6066 * allocated memory for the UUID list.
6068 hci_discovery_filter_clear(hdev);
6070 hdev->discovery.result_filtering = true;
6071 hdev->discovery.type = cp->type;
6072 hdev->discovery.rssi = cp->rssi;
6073 hdev->discovery.uuid_count = uuid_count;
6075 if (uuid_count > 0) {
6076 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6078 if (!hdev->discovery.uuids) {
6079 err = mgmt_cmd_complete(sk, hdev->id,
6080 MGMT_OP_START_SERVICE_DISCOVERY,
6082 &cp->type, sizeof(cp->type));
6083 mgmt_pending_remove(cmd);
6088 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6089 start_discovery_complete);
6091 mgmt_pending_remove(cmd);
6095 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6098 hci_dev_unlock(hdev);
6102 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6104 struct mgmt_pending_cmd *cmd;
6106 bt_dev_dbg(hdev, "status %u", status);
6110 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6112 cmd->cmd_complete(cmd, mgmt_status(status));
6113 mgmt_pending_remove(cmd);
6116 hci_dev_unlock(hdev);
6119 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6121 struct mgmt_pending_cmd *cmd = data;
6123 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6126 bt_dev_dbg(hdev, "err %d", err);
6128 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6130 mgmt_pending_remove(cmd);
6133 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6136 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6138 return hci_stop_discovery_sync(hdev);
6141 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6144 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6145 struct mgmt_pending_cmd *cmd;
6148 bt_dev_dbg(hdev, "sock %p", sk);
6152 if (!hci_discovery_active(hdev)) {
6153 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6154 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6155 sizeof(mgmt_cp->type));
6159 if (hdev->discovery.type != mgmt_cp->type) {
6160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6161 MGMT_STATUS_INVALID_PARAMS,
6162 &mgmt_cp->type, sizeof(mgmt_cp->type));
6166 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6172 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6173 stop_discovery_complete);
6175 mgmt_pending_remove(cmd);
6179 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6182 hci_dev_unlock(hdev);
6186 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6189 struct mgmt_cp_confirm_name *cp = data;
6190 struct inquiry_entry *e;
6193 bt_dev_dbg(hdev, "sock %p", sk);
6197 if (!hci_discovery_active(hdev)) {
6198 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6199 MGMT_STATUS_FAILED, &cp->addr,
6204 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6206 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6207 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6212 if (cp->name_known) {
6213 e->name_state = NAME_KNOWN;
6216 e->name_state = NAME_NEEDED;
6217 hci_inquiry_cache_update_resolve(hdev, e);
6220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6221 &cp->addr, sizeof(cp->addr));
6224 hci_dev_unlock(hdev);
6228 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6231 struct mgmt_cp_block_device *cp = data;
6235 bt_dev_dbg(hdev, "sock %p", sk);
6237 if (!bdaddr_type_is_valid(cp->addr.type))
6238 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6239 MGMT_STATUS_INVALID_PARAMS,
6240 &cp->addr, sizeof(cp->addr));
6244 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6247 status = MGMT_STATUS_FAILED;
6251 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6253 status = MGMT_STATUS_SUCCESS;
6256 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6257 &cp->addr, sizeof(cp->addr));
6259 hci_dev_unlock(hdev);
6264 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6267 struct mgmt_cp_unblock_device *cp = data;
6271 bt_dev_dbg(hdev, "sock %p", sk);
6273 if (!bdaddr_type_is_valid(cp->addr.type))
6274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6275 MGMT_STATUS_INVALID_PARAMS,
6276 &cp->addr, sizeof(cp->addr));
6280 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6283 status = MGMT_STATUS_INVALID_PARAMS;
6287 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6289 status = MGMT_STATUS_SUCCESS;
6292 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6293 &cp->addr, sizeof(cp->addr));
6295 hci_dev_unlock(hdev);
6300 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6302 return hci_update_eir_sync(hdev);
6305 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6308 struct mgmt_cp_set_device_id *cp = data;
6312 bt_dev_dbg(hdev, "sock %p", sk);
6314 source = __le16_to_cpu(cp->source);
6316 if (source > 0x0002)
6317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6318 MGMT_STATUS_INVALID_PARAMS);
6322 hdev->devid_source = source;
6323 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6324 hdev->devid_product = __le16_to_cpu(cp->product);
6325 hdev->devid_version = __le16_to_cpu(cp->version);
6327 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6330 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6332 hci_dev_unlock(hdev);
6337 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6340 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6342 bt_dev_dbg(hdev, "status %d", err);
6345 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6347 struct cmd_lookup match = { NULL, hdev };
6349 struct adv_info *adv_instance;
6350 u8 status = mgmt_status(err);
6353 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6354 cmd_status_rsp, &status);
6358 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6359 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6361 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6363 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6366 new_settings(hdev, match.sk);
6371 /* If "Set Advertising" was just disabled and instance advertising was
6372 * set up earlier, then re-enable multi-instance advertising.
6374 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6375 list_empty(&hdev->adv_instances))
6378 instance = hdev->cur_adv_instance;
6380 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6381 struct adv_info, list);
6385 instance = adv_instance->instance;
6388 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6390 enable_advertising_instance(hdev, err);
6393 static int set_adv_sync(struct hci_dev *hdev, void *data)
6395 struct mgmt_pending_cmd *cmd = data;
6396 struct mgmt_mode *cp = cmd->param;
6399 if (cp->val == 0x02)
6400 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404 cancel_adv_timeout(hdev);
6407 /* Switch to instance "0" for the Set Advertising setting.
6408 * We cannot use update_[adv|scan_rsp]_data() here as the
6409 * HCI_ADVERTISING flag is not yet set.
6411 hdev->cur_adv_instance = 0x00;
6413 if (ext_adv_capable(hdev)) {
6414 hci_start_ext_adv_sync(hdev, 0x00);
6416 hci_update_adv_data_sync(hdev, 0x00);
6417 hci_update_scan_rsp_data_sync(hdev, 0x00);
6418 hci_enable_advertising_sync(hdev);
6421 hci_disable_advertising_sync(hdev);
6427 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6430 struct mgmt_mode *cp = data;
6431 struct mgmt_pending_cmd *cmd;
6435 bt_dev_dbg(hdev, "sock %p", sk);
6437 status = mgmt_le_support(hdev);
6439 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6442 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444 MGMT_STATUS_INVALID_PARAMS);
6446 if (hdev->advertising_paused)
6447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6454 /* The following conditions are ones which mean that we should
6455 * not do any HCI communication but directly send a mgmt
6456 * response to user space (after toggling the flag if
6459 if (!hdev_is_powered(hdev) ||
6460 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6461 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6462 hci_dev_test_flag(hdev, HCI_MESH) ||
6463 hci_conn_num(hdev, LE_LINK) > 0 ||
6464 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6465 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6469 hdev->cur_adv_instance = 0x00;
6470 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6471 if (cp->val == 0x02)
6472 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6477 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6485 err = new_settings(hdev, sk);
6490 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6491 pending_find(MGMT_OP_SET_LE, hdev)) {
6492 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6497 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6501 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6502 set_advertising_complete);
6505 mgmt_pending_remove(cmd);
6508 hci_dev_unlock(hdev);
6512 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6513 void *data, u16 len)
6515 struct mgmt_cp_set_static_address *cp = data;
6518 bt_dev_dbg(hdev, "sock %p", sk);
6520 if (!lmp_le_capable(hdev))
6521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6522 MGMT_STATUS_NOT_SUPPORTED);
6524 if (hdev_is_powered(hdev))
6525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526 MGMT_STATUS_REJECTED);
6528 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6529 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6530 return mgmt_cmd_status(sk, hdev->id,
6531 MGMT_OP_SET_STATIC_ADDRESS,
6532 MGMT_STATUS_INVALID_PARAMS);
6534 /* Two most significant bits shall be set */
6535 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6536 return mgmt_cmd_status(sk, hdev->id,
6537 MGMT_OP_SET_STATIC_ADDRESS,
6538 MGMT_STATUS_INVALID_PARAMS);
6543 bacpy(&hdev->static_addr, &cp->bdaddr);
6545 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6549 err = new_settings(hdev, sk);
6552 hci_dev_unlock(hdev);
6556 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6557 void *data, u16 len)
6559 struct mgmt_cp_set_scan_params *cp = data;
6560 __u16 interval, window;
6563 bt_dev_dbg(hdev, "sock %p", sk);
6565 if (!lmp_le_capable(hdev))
6566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6567 MGMT_STATUS_NOT_SUPPORTED);
6569 interval = __le16_to_cpu(cp->interval);
6571 if (interval < 0x0004 || interval > 0x4000)
6572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573 MGMT_STATUS_INVALID_PARAMS);
6575 window = __le16_to_cpu(cp->window);
6577 if (window < 0x0004 || window > 0x4000)
6578 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579 MGMT_STATUS_INVALID_PARAMS);
6581 if (window > interval)
6582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 MGMT_STATUS_INVALID_PARAMS);
6587 hdev->le_scan_interval = interval;
6588 hdev->le_scan_window = window;
6590 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6593 /* If background scan is running, restart it so new parameters are
6596 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6597 hdev->discovery.state == DISCOVERY_STOPPED)
6598 hci_update_passive_scan(hdev);
6600 hci_dev_unlock(hdev);
6605 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6607 struct mgmt_pending_cmd *cmd = data;
6609 bt_dev_dbg(hdev, "err %d", err);
6612 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6615 struct mgmt_mode *cp = cmd->param;
6618 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6620 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6622 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6623 new_settings(hdev, cmd->sk);
6626 mgmt_pending_free(cmd);
6629 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6631 struct mgmt_pending_cmd *cmd = data;
6632 struct mgmt_mode *cp = cmd->param;
6634 return hci_write_fast_connectable_sync(hdev, cp->val);
6637 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6638 void *data, u16 len)
6640 struct mgmt_mode *cp = data;
6641 struct mgmt_pending_cmd *cmd;
6644 bt_dev_dbg(hdev, "sock %p", sk);
6646 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6647 hdev->hci_ver < BLUETOOTH_VER_1_2)
6648 return mgmt_cmd_status(sk, hdev->id,
6649 MGMT_OP_SET_FAST_CONNECTABLE,
6650 MGMT_STATUS_NOT_SUPPORTED);
6652 if (cp->val != 0x00 && cp->val != 0x01)
6653 return mgmt_cmd_status(sk, hdev->id,
6654 MGMT_OP_SET_FAST_CONNECTABLE,
6655 MGMT_STATUS_INVALID_PARAMS);
6659 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6660 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6664 if (!hdev_is_powered(hdev)) {
6665 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6666 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6667 new_settings(hdev, sk);
6671 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6676 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6677 fast_connectable_complete);
6680 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6681 MGMT_STATUS_FAILED);
6684 mgmt_pending_free(cmd);
6688 hci_dev_unlock(hdev);
6693 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6695 struct mgmt_pending_cmd *cmd = data;
6697 bt_dev_dbg(hdev, "err %d", err);
6700 u8 mgmt_err = mgmt_status(err);
6702 /* We need to restore the flag if related HCI commands
6705 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6707 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6709 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6710 new_settings(hdev, cmd->sk);
6713 mgmt_pending_free(cmd);
6716 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6720 status = hci_write_fast_connectable_sync(hdev, false);
6723 status = hci_update_scan_sync(hdev);
6725 /* Since only the advertising data flags will change, there
6726 * is no need to update the scan response data.
6729 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6734 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6736 struct mgmt_mode *cp = data;
6737 struct mgmt_pending_cmd *cmd;
6740 bt_dev_dbg(hdev, "sock %p", sk);
6742 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6744 MGMT_STATUS_NOT_SUPPORTED);
6746 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 MGMT_STATUS_REJECTED);
6750 if (cp->val != 0x00 && cp->val != 0x01)
6751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 MGMT_STATUS_INVALID_PARAMS);
6756 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6757 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6761 if (!hdev_is_powered(hdev)) {
6763 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6764 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6765 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6766 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6767 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6770 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6772 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6776 err = new_settings(hdev, sk);
6780 /* Reject disabling when powered on */
6782 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6783 MGMT_STATUS_REJECTED);
6786 /* When configuring a dual-mode controller to operate
6787 * with LE only and using a static address, then switching
6788 * BR/EDR back on is not allowed.
6790 * Dual-mode controllers shall operate with the public
6791 * address as its identity address for BR/EDR and LE. So
6792 * reject the attempt to create an invalid configuration.
6794 * The same restrictions applies when secure connections
6795 * has been enabled. For BR/EDR this is a controller feature
6796 * while for LE it is a host stack feature. This means that
6797 * switching BR/EDR back on when secure connections has been
6798 * enabled is not a supported transaction.
6800 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6801 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6802 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6803 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6804 MGMT_STATUS_REJECTED);
6809 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6813 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6814 set_bredr_complete);
6817 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6818 MGMT_STATUS_FAILED);
6820 mgmt_pending_free(cmd);
6825 /* We need to flip the bit already here so that
6826 * hci_req_update_adv_data generates the correct flags.
6828 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6831 hci_dev_unlock(hdev);
6835 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6837 struct mgmt_pending_cmd *cmd = data;
6838 struct mgmt_mode *cp;
6840 bt_dev_dbg(hdev, "err %d", err);
6843 u8 mgmt_err = mgmt_status(err);
6845 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6853 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6854 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6857 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6858 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6866 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6867 new_settings(hdev, cmd->sk);
6870 mgmt_pending_free(cmd);
6873 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6875 struct mgmt_pending_cmd *cmd = data;
6876 struct mgmt_mode *cp = cmd->param;
6879 /* Force write of val */
6880 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6882 return hci_write_sc_support_sync(hdev, val);
6885 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6886 void *data, u16 len)
6888 struct mgmt_mode *cp = data;
6889 struct mgmt_pending_cmd *cmd;
6893 bt_dev_dbg(hdev, "sock %p", sk);
6895 if (!lmp_sc_capable(hdev) &&
6896 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6898 MGMT_STATUS_NOT_SUPPORTED);
6900 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6901 lmp_sc_capable(hdev) &&
6902 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6904 MGMT_STATUS_REJECTED);
6906 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908 MGMT_STATUS_INVALID_PARAMS);
6912 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6913 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6917 changed = !hci_dev_test_and_set_flag(hdev,
6919 if (cp->val == 0x02)
6920 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6922 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6924 changed = hci_dev_test_and_clear_flag(hdev,
6926 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6929 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6934 err = new_settings(hdev, sk);
6941 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6942 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6943 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6947 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6951 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6952 set_secure_conn_complete);
6955 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6956 MGMT_STATUS_FAILED);
6958 mgmt_pending_free(cmd);
6962 hci_dev_unlock(hdev);
6966 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6967 void *data, u16 len)
6969 struct mgmt_mode *cp = data;
6970 bool changed, use_changed;
6973 bt_dev_dbg(hdev, "sock %p", sk);
6975 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6977 MGMT_STATUS_INVALID_PARAMS);
6982 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6984 changed = hci_dev_test_and_clear_flag(hdev,
6985 HCI_KEEP_DEBUG_KEYS);
6987 if (cp->val == 0x02)
6988 use_changed = !hci_dev_test_and_set_flag(hdev,
6989 HCI_USE_DEBUG_KEYS);
6991 use_changed = hci_dev_test_and_clear_flag(hdev,
6992 HCI_USE_DEBUG_KEYS);
6994 if (hdev_is_powered(hdev) && use_changed &&
6995 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6996 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6997 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6998 sizeof(mode), &mode);
7001 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7006 err = new_settings(hdev, sk);
7009 hci_dev_unlock(hdev);
7013 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7016 struct mgmt_cp_set_privacy *cp = cp_data;
7020 bt_dev_dbg(hdev, "sock %p", sk);
7022 if (!lmp_le_capable(hdev))
7023 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7024 MGMT_STATUS_NOT_SUPPORTED);
7026 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028 MGMT_STATUS_INVALID_PARAMS);
7030 if (hdev_is_powered(hdev))
7031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032 MGMT_STATUS_REJECTED);
7036 /* If user space supports this command it is also expected to
7037 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7039 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7042 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7043 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7044 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7045 hci_adv_instances_set_rpa_expired(hdev, true);
7046 if (cp->privacy == 0x02)
7047 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7049 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7051 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7052 memset(hdev->irk, 0, sizeof(hdev->irk));
7053 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7054 hci_adv_instances_set_rpa_expired(hdev, false);
7055 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7058 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7063 err = new_settings(hdev, sk);
7066 hci_dev_unlock(hdev);
7070 static bool irk_is_valid(struct mgmt_irk_info *irk)
7072 switch (irk->addr.type) {
7073 case BDADDR_LE_PUBLIC:
7076 case BDADDR_LE_RANDOM:
7077 /* Two most significant bits shall be set */
7078 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7086 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7089 struct mgmt_cp_load_irks *cp = cp_data;
7090 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7091 sizeof(struct mgmt_irk_info));
7092 u16 irk_count, expected_len;
7095 bt_dev_dbg(hdev, "sock %p", sk);
7097 if (!lmp_le_capable(hdev))
7098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7099 MGMT_STATUS_NOT_SUPPORTED);
7101 irk_count = __le16_to_cpu(cp->irk_count);
7102 if (irk_count > max_irk_count) {
7103 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7106 MGMT_STATUS_INVALID_PARAMS);
7109 expected_len = struct_size(cp, irks, irk_count);
7110 if (expected_len != len) {
7111 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7113 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7114 MGMT_STATUS_INVALID_PARAMS);
7117 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7119 for (i = 0; i < irk_count; i++) {
7120 struct mgmt_irk_info *key = &cp->irks[i];
7122 if (!irk_is_valid(key))
7123 return mgmt_cmd_status(sk, hdev->id,
7125 MGMT_STATUS_INVALID_PARAMS);
7130 hci_smp_irks_clear(hdev);
7132 for (i = 0; i < irk_count; i++) {
7133 struct mgmt_irk_info *irk = &cp->irks[i];
7134 u8 addr_type = le_addr_type(irk->addr.type);
7136 if (hci_is_blocked_key(hdev,
7137 HCI_BLOCKED_KEY_TYPE_IRK,
7139 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7144 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7145 if (irk->addr.type == BDADDR_BREDR)
7146 addr_type = BDADDR_BREDR;
7148 hci_add_irk(hdev, &irk->addr.bdaddr,
7149 addr_type, irk->val,
7153 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7157 hci_dev_unlock(hdev);
7162 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7164 if (key->initiator != 0x00 && key->initiator != 0x01)
7167 switch (key->addr.type) {
7168 case BDADDR_LE_PUBLIC:
7171 case BDADDR_LE_RANDOM:
7172 /* Two most significant bits shall be set */
7173 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7181 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7182 void *cp_data, u16 len)
7184 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7185 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7186 sizeof(struct mgmt_ltk_info));
7187 u16 key_count, expected_len;
7190 bt_dev_dbg(hdev, "sock %p", sk);
7192 if (!lmp_le_capable(hdev))
7193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7194 MGMT_STATUS_NOT_SUPPORTED);
7196 key_count = __le16_to_cpu(cp->key_count);
7197 if (key_count > max_key_count) {
7198 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7201 MGMT_STATUS_INVALID_PARAMS);
7204 expected_len = struct_size(cp, keys, key_count);
7205 if (expected_len != len) {
7206 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7209 MGMT_STATUS_INVALID_PARAMS);
7212 bt_dev_dbg(hdev, "key_count %u", key_count);
7214 for (i = 0; i < key_count; i++) {
7215 struct mgmt_ltk_info *key = &cp->keys[i];
7217 if (!ltk_is_valid(key))
7218 return mgmt_cmd_status(sk, hdev->id,
7219 MGMT_OP_LOAD_LONG_TERM_KEYS,
7220 MGMT_STATUS_INVALID_PARAMS);
7225 hci_smp_ltks_clear(hdev);
7227 for (i = 0; i < key_count; i++) {
7228 struct mgmt_ltk_info *key = &cp->keys[i];
7229 u8 type, authenticated;
7230 u8 addr_type = le_addr_type(key->addr.type);
7232 if (hci_is_blocked_key(hdev,
7233 HCI_BLOCKED_KEY_TYPE_LTK,
7235 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7240 switch (key->type) {
7241 case MGMT_LTK_UNAUTHENTICATED:
7242 authenticated = 0x00;
7243 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7245 case MGMT_LTK_AUTHENTICATED:
7246 authenticated = 0x01;
7247 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7249 case MGMT_LTK_P256_UNAUTH:
7250 authenticated = 0x00;
7251 type = SMP_LTK_P256;
7253 case MGMT_LTK_P256_AUTH:
7254 authenticated = 0x01;
7255 type = SMP_LTK_P256;
7257 case MGMT_LTK_P256_DEBUG:
7258 authenticated = 0x00;
7259 type = SMP_LTK_P256_DEBUG;
7265 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7266 if (key->addr.type == BDADDR_BREDR)
7267 addr_type = BDADDR_BREDR;
7269 hci_add_ltk(hdev, &key->addr.bdaddr,
7270 addr_type, type, authenticated,
7271 key->val, key->enc_size, key->ediv, key->rand);
7274 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7277 hci_dev_unlock(hdev);
7282 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7284 struct mgmt_pending_cmd *cmd = data;
7285 struct hci_conn *conn = cmd->user_data;
7286 struct mgmt_cp_get_conn_info *cp = cmd->param;
7287 struct mgmt_rp_get_conn_info rp;
7290 bt_dev_dbg(hdev, "err %d", err);
7292 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7294 status = mgmt_status(err);
7295 if (status == MGMT_STATUS_SUCCESS) {
7296 rp.rssi = conn->rssi;
7297 rp.tx_power = conn->tx_power;
7298 rp.max_tx_power = conn->max_tx_power;
7300 rp.rssi = HCI_RSSI_INVALID;
7301 rp.tx_power = HCI_TX_POWER_INVALID;
7302 rp.max_tx_power = HCI_TX_POWER_INVALID;
7305 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7308 mgmt_pending_free(cmd);
7311 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7313 struct mgmt_pending_cmd *cmd = data;
7314 struct mgmt_cp_get_conn_info *cp = cmd->param;
7315 struct hci_conn *conn;
7319 /* Make sure we are still connected */
7320 if (cp->addr.type == BDADDR_BREDR)
7321 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7324 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7326 if (!conn || conn->state != BT_CONNECTED)
7327 return MGMT_STATUS_NOT_CONNECTED;
7329 cmd->user_data = conn;
7330 handle = cpu_to_le16(conn->handle);
7332 /* Refresh RSSI each time */
7333 err = hci_read_rssi_sync(hdev, handle);
7335 /* For LE links TX power does not change thus we don't need to
7336 * query for it once value is known.
7338 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7339 conn->tx_power == HCI_TX_POWER_INVALID))
7340 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7342 /* Max TX power needs to be read only once per connection */
7343 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7344 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7349 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7352 struct mgmt_cp_get_conn_info *cp = data;
7353 struct mgmt_rp_get_conn_info rp;
7354 struct hci_conn *conn;
7355 unsigned long conn_info_age;
7358 bt_dev_dbg(hdev, "sock %p", sk);
7360 memset(&rp, 0, sizeof(rp));
7361 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7362 rp.addr.type = cp->addr.type;
7364 if (!bdaddr_type_is_valid(cp->addr.type))
7365 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7366 MGMT_STATUS_INVALID_PARAMS,
7371 if (!hdev_is_powered(hdev)) {
7372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7373 MGMT_STATUS_NOT_POWERED, &rp,
7378 if (cp->addr.type == BDADDR_BREDR)
7379 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7382 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7384 if (!conn || conn->state != BT_CONNECTED) {
7385 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7386 MGMT_STATUS_NOT_CONNECTED, &rp,
7391 /* To avoid client trying to guess when to poll again for information we
7392 * calculate conn info age as random value between min/max set in hdev.
7394 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7395 hdev->conn_info_max_age - 1);
7397 /* Query controller to refresh cached values if they are too old or were
7400 if (time_after(jiffies, conn->conn_info_timestamp +
7401 msecs_to_jiffies(conn_info_age)) ||
7402 !conn->conn_info_timestamp) {
7403 struct mgmt_pending_cmd *cmd;
7405 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7410 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7411 cmd, get_conn_info_complete);
7415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7416 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7419 mgmt_pending_free(cmd);
7424 conn->conn_info_timestamp = jiffies;
7426 /* Cache is valid, just reply with values cached in hci_conn */
7427 rp.rssi = conn->rssi;
7428 rp.tx_power = conn->tx_power;
7429 rp.max_tx_power = conn->max_tx_power;
7431 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7432 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7436 hci_dev_unlock(hdev);
7440 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7442 struct mgmt_pending_cmd *cmd = data;
7443 struct mgmt_cp_get_clock_info *cp = cmd->param;
7444 struct mgmt_rp_get_clock_info rp;
7445 struct hci_conn *conn = cmd->user_data;
7446 u8 status = mgmt_status(err);
7448 bt_dev_dbg(hdev, "err %d", err);
7450 memset(&rp, 0, sizeof(rp));
7451 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7452 rp.addr.type = cp->addr.type;
7457 rp.local_clock = cpu_to_le32(hdev->clock);
7460 rp.piconet_clock = cpu_to_le32(conn->clock);
7461 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7465 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7468 mgmt_pending_free(cmd);
7471 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7473 struct mgmt_pending_cmd *cmd = data;
7474 struct mgmt_cp_get_clock_info *cp = cmd->param;
7475 struct hci_cp_read_clock hci_cp;
7476 struct hci_conn *conn;
7478 memset(&hci_cp, 0, sizeof(hci_cp));
7479 hci_read_clock_sync(hdev, &hci_cp);
7481 /* Make sure connection still exists */
7482 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7483 if (!conn || conn->state != BT_CONNECTED)
7484 return MGMT_STATUS_NOT_CONNECTED;
7486 cmd->user_data = conn;
7487 hci_cp.handle = cpu_to_le16(conn->handle);
7488 hci_cp.which = 0x01; /* Piconet clock */
7490 return hci_read_clock_sync(hdev, &hci_cp);
7493 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7496 struct mgmt_cp_get_clock_info *cp = data;
7497 struct mgmt_rp_get_clock_info rp;
7498 struct mgmt_pending_cmd *cmd;
7499 struct hci_conn *conn;
7502 bt_dev_dbg(hdev, "sock %p", sk);
7504 memset(&rp, 0, sizeof(rp));
7505 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7506 rp.addr.type = cp->addr.type;
7508 if (cp->addr.type != BDADDR_BREDR)
7509 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7510 MGMT_STATUS_INVALID_PARAMS,
7515 if (!hdev_is_powered(hdev)) {
7516 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_NOT_POWERED, &rp,
7522 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7523 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7525 if (!conn || conn->state != BT_CONNECTED) {
7526 err = mgmt_cmd_complete(sk, hdev->id,
7527 MGMT_OP_GET_CLOCK_INFO,
7528 MGMT_STATUS_NOT_CONNECTED,
7536 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7540 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7541 get_clock_info_complete);
7544 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7545 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7548 mgmt_pending_free(cmd);
7553 hci_dev_unlock(hdev);
7557 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7559 struct hci_conn *conn;
7561 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7565 if (conn->dst_type != type)
7568 if (conn->state != BT_CONNECTED)
7574 /* This function requires the caller holds hdev->lock */
7575 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7576 u8 addr_type, u8 auto_connect)
7578 struct hci_conn_params *params;
7580 params = hci_conn_params_add(hdev, addr, addr_type);
7584 if (params->auto_connect == auto_connect)
7587 hci_pend_le_list_del_init(params);
7589 switch (auto_connect) {
7590 case HCI_AUTO_CONN_DISABLED:
7591 case HCI_AUTO_CONN_LINK_LOSS:
7592 /* If auto connect is being disabled when we're trying to
7593 * connect to device, keep connecting.
7595 if (params->explicit_connect)
7596 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7598 case HCI_AUTO_CONN_REPORT:
7599 if (params->explicit_connect)
7600 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7602 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7604 case HCI_AUTO_CONN_DIRECT:
7605 case HCI_AUTO_CONN_ALWAYS:
7606 if (!is_connected(hdev, addr, addr_type))
7607 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7611 params->auto_connect = auto_connect;
7613 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7614 addr, addr_type, auto_connect);
7619 static void device_added(struct sock *sk, struct hci_dev *hdev,
7620 bdaddr_t *bdaddr, u8 type, u8 action)
7622 struct mgmt_ev_device_added ev;
7624 bacpy(&ev.addr.bdaddr, bdaddr);
7625 ev.addr.type = type;
7628 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7631 static int add_device_sync(struct hci_dev *hdev, void *data)
7633 return hci_update_passive_scan_sync(hdev);
7636 static int add_device(struct sock *sk, struct hci_dev *hdev,
7637 void *data, u16 len)
7639 struct mgmt_cp_add_device *cp = data;
7640 u8 auto_conn, addr_type;
7641 struct hci_conn_params *params;
7643 u32 current_flags = 0;
7644 u32 supported_flags;
7646 bt_dev_dbg(hdev, "sock %p", sk);
7648 if (!bdaddr_type_is_valid(cp->addr.type) ||
7649 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7650 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7651 MGMT_STATUS_INVALID_PARAMS,
7652 &cp->addr, sizeof(cp->addr));
7654 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7655 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7656 MGMT_STATUS_INVALID_PARAMS,
7657 &cp->addr, sizeof(cp->addr));
7661 if (cp->addr.type == BDADDR_BREDR) {
7662 /* Only incoming connections action is supported for now */
7663 if (cp->action != 0x01) {
7664 err = mgmt_cmd_complete(sk, hdev->id,
7666 MGMT_STATUS_INVALID_PARAMS,
7667 &cp->addr, sizeof(cp->addr));
7671 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7677 hci_update_scan(hdev);
7682 addr_type = le_addr_type(cp->addr.type);
7684 if (cp->action == 0x02)
7685 auto_conn = HCI_AUTO_CONN_ALWAYS;
7686 else if (cp->action == 0x01)
7687 auto_conn = HCI_AUTO_CONN_DIRECT;
7689 auto_conn = HCI_AUTO_CONN_REPORT;
7691 /* Kernel internally uses conn_params with resolvable private
7692 * address, but Add Device allows only identity addresses.
7693 * Make sure it is enforced before calling
7694 * hci_conn_params_lookup.
7696 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7698 MGMT_STATUS_INVALID_PARAMS,
7699 &cp->addr, sizeof(cp->addr));
7703 /* If the connection parameters don't exist for this device,
7704 * they will be created and configured with defaults.
7706 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7708 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7709 MGMT_STATUS_FAILED, &cp->addr,
7713 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7716 current_flags = params->flags;
7719 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7724 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7725 supported_flags = hdev->conn_flags;
7726 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7727 supported_flags, current_flags);
7729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7730 MGMT_STATUS_SUCCESS, &cp->addr,
7734 hci_dev_unlock(hdev);
7738 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7739 bdaddr_t *bdaddr, u8 type)
7741 struct mgmt_ev_device_removed ev;
7743 bacpy(&ev.addr.bdaddr, bdaddr);
7744 ev.addr.type = type;
7746 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7749 static int remove_device_sync(struct hci_dev *hdev, void *data)
7751 return hci_update_passive_scan_sync(hdev);
7754 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7755 void *data, u16 len)
7757 struct mgmt_cp_remove_device *cp = data;
7760 bt_dev_dbg(hdev, "sock %p", sk);
7764 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7765 struct hci_conn_params *params;
7768 if (!bdaddr_type_is_valid(cp->addr.type)) {
7769 err = mgmt_cmd_complete(sk, hdev->id,
7770 MGMT_OP_REMOVE_DEVICE,
7771 MGMT_STATUS_INVALID_PARAMS,
7772 &cp->addr, sizeof(cp->addr));
7776 if (cp->addr.type == BDADDR_BREDR) {
7777 err = hci_bdaddr_list_del(&hdev->accept_list,
7781 err = mgmt_cmd_complete(sk, hdev->id,
7782 MGMT_OP_REMOVE_DEVICE,
7783 MGMT_STATUS_INVALID_PARAMS,
7789 hci_update_scan(hdev);
7791 device_removed(sk, hdev, &cp->addr.bdaddr,
7796 addr_type = le_addr_type(cp->addr.type);
7798 /* Kernel internally uses conn_params with resolvable private
7799 * address, but Remove Device allows only identity addresses.
7800 * Make sure it is enforced before calling
7801 * hci_conn_params_lookup.
7803 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7804 err = mgmt_cmd_complete(sk, hdev->id,
7805 MGMT_OP_REMOVE_DEVICE,
7806 MGMT_STATUS_INVALID_PARAMS,
7807 &cp->addr, sizeof(cp->addr));
7811 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7814 err = mgmt_cmd_complete(sk, hdev->id,
7815 MGMT_OP_REMOVE_DEVICE,
7816 MGMT_STATUS_INVALID_PARAMS,
7817 &cp->addr, sizeof(cp->addr));
7821 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7822 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7823 err = mgmt_cmd_complete(sk, hdev->id,
7824 MGMT_OP_REMOVE_DEVICE,
7825 MGMT_STATUS_INVALID_PARAMS,
7826 &cp->addr, sizeof(cp->addr));
7830 hci_conn_params_free(params);
7832 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7834 struct hci_conn_params *p, *tmp;
7835 struct bdaddr_list *b, *btmp;
7837 if (cp->addr.type) {
7838 err = mgmt_cmd_complete(sk, hdev->id,
7839 MGMT_OP_REMOVE_DEVICE,
7840 MGMT_STATUS_INVALID_PARAMS,
7841 &cp->addr, sizeof(cp->addr));
7845 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7846 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7851 hci_update_scan(hdev);
7853 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7854 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7856 device_removed(sk, hdev, &p->addr, p->addr_type);
7857 if (p->explicit_connect) {
7858 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7861 hci_conn_params_free(p);
7864 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7867 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7871 MGMT_STATUS_SUCCESS, &cp->addr,
7874 hci_dev_unlock(hdev);
7878 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7881 struct mgmt_cp_load_conn_param *cp = data;
7882 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7883 sizeof(struct mgmt_conn_param));
7884 u16 param_count, expected_len;
7887 if (!lmp_le_capable(hdev))
7888 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7889 MGMT_STATUS_NOT_SUPPORTED);
7891 param_count = __le16_to_cpu(cp->param_count);
7892 if (param_count > max_param_count) {
7893 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896 MGMT_STATUS_INVALID_PARAMS);
7899 expected_len = struct_size(cp, params, param_count);
7900 if (expected_len != len) {
7901 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7903 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7904 MGMT_STATUS_INVALID_PARAMS);
7907 bt_dev_dbg(hdev, "param_count %u", param_count);
7911 hci_conn_params_clear_disabled(hdev);
7913 for (i = 0; i < param_count; i++) {
7914 struct mgmt_conn_param *param = &cp->params[i];
7915 struct hci_conn_params *hci_param;
7916 u16 min, max, latency, timeout;
7919 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7922 if (param->addr.type == BDADDR_LE_PUBLIC) {
7923 addr_type = ADDR_LE_DEV_PUBLIC;
7924 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7925 addr_type = ADDR_LE_DEV_RANDOM;
7927 bt_dev_err(hdev, "ignoring invalid connection parameters");
7931 min = le16_to_cpu(param->min_interval);
7932 max = le16_to_cpu(param->max_interval);
7933 latency = le16_to_cpu(param->latency);
7934 timeout = le16_to_cpu(param->timeout);
7936 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7937 min, max, latency, timeout);
7939 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7940 bt_dev_err(hdev, "ignoring invalid connection parameters");
7944 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7947 bt_dev_err(hdev, "failed to add connection parameters");
7951 hci_param->conn_min_interval = min;
7952 hci_param->conn_max_interval = max;
7953 hci_param->conn_latency = latency;
7954 hci_param->supervision_timeout = timeout;
7957 hci_dev_unlock(hdev);
7959 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7963 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7964 void *data, u16 len)
7966 struct mgmt_cp_set_external_config *cp = data;
7970 bt_dev_dbg(hdev, "sock %p", sk);
7972 if (hdev_is_powered(hdev))
7973 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974 MGMT_STATUS_REJECTED);
7976 if (cp->config != 0x00 && cp->config != 0x01)
7977 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7978 MGMT_STATUS_INVALID_PARAMS);
7980 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7982 MGMT_STATUS_NOT_SUPPORTED);
7987 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7989 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7991 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7998 err = new_options(hdev, sk);
8000 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8001 mgmt_index_removed(hdev);
8003 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8004 hci_dev_set_flag(hdev, HCI_CONFIG);
8005 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8007 queue_work(hdev->req_workqueue, &hdev->power_on);
8009 set_bit(HCI_RAW, &hdev->flags);
8010 mgmt_index_added(hdev);
8015 hci_dev_unlock(hdev);
8019 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8020 void *data, u16 len)
8022 struct mgmt_cp_set_public_address *cp = data;
8026 bt_dev_dbg(hdev, "sock %p", sk);
8028 if (hdev_is_powered(hdev))
8029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030 MGMT_STATUS_REJECTED);
8032 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8034 MGMT_STATUS_INVALID_PARAMS);
8036 if (!hdev->set_bdaddr)
8037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8038 MGMT_STATUS_NOT_SUPPORTED);
8042 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8043 bacpy(&hdev->public_addr, &cp->bdaddr);
8045 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8052 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8053 err = new_options(hdev, sk);
8055 if (is_configured(hdev)) {
8056 mgmt_index_removed(hdev);
8058 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8060 hci_dev_set_flag(hdev, HCI_CONFIG);
8061 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8063 queue_work(hdev->req_workqueue, &hdev->power_on);
8067 hci_dev_unlock(hdev);
8071 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8074 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8075 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8076 u8 *h192, *r192, *h256, *r256;
8077 struct mgmt_pending_cmd *cmd = data;
8078 struct sk_buff *skb = cmd->skb;
8079 u8 status = mgmt_status(err);
8082 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8087 status = MGMT_STATUS_FAILED;
8088 else if (IS_ERR(skb))
8089 status = mgmt_status(PTR_ERR(skb));
8091 status = mgmt_status(skb->data[0]);
8094 bt_dev_dbg(hdev, "status %u", status);
8096 mgmt_cp = cmd->param;
8099 status = mgmt_status(status);
8106 } else if (!bredr_sc_enabled(hdev)) {
8107 struct hci_rp_read_local_oob_data *rp;
8109 if (skb->len != sizeof(*rp)) {
8110 status = MGMT_STATUS_FAILED;
8113 status = MGMT_STATUS_SUCCESS;
8114 rp = (void *)skb->data;
8116 eir_len = 5 + 18 + 18;
8123 struct hci_rp_read_local_oob_ext_data *rp;
8125 if (skb->len != sizeof(*rp)) {
8126 status = MGMT_STATUS_FAILED;
8129 status = MGMT_STATUS_SUCCESS;
8130 rp = (void *)skb->data;
8132 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8133 eir_len = 5 + 18 + 18;
8137 eir_len = 5 + 18 + 18 + 18 + 18;
8147 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8154 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8155 hdev->dev_class, 3);
8158 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8159 EIR_SSP_HASH_C192, h192, 16);
8160 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8161 EIR_SSP_RAND_R192, r192, 16);
8165 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166 EIR_SSP_HASH_C256, h256, 16);
8167 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8168 EIR_SSP_RAND_R256, r256, 16);
8172 mgmt_rp->type = mgmt_cp->type;
8173 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8175 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8176 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8177 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8178 if (err < 0 || status)
8181 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8183 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8184 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8185 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8187 if (skb && !IS_ERR(skb))
8191 mgmt_pending_remove(cmd);
8194 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8195 struct mgmt_cp_read_local_oob_ext_data *cp)
8197 struct mgmt_pending_cmd *cmd;
8200 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8205 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8206 read_local_oob_ext_data_complete);
8209 mgmt_pending_remove(cmd);
8216 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8217 void *data, u16 data_len)
8219 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8220 struct mgmt_rp_read_local_oob_ext_data *rp;
8223 u8 status, flags, role, addr[7], hash[16], rand[16];
8226 bt_dev_dbg(hdev, "sock %p", sk);
8228 if (hdev_is_powered(hdev)) {
8230 case BIT(BDADDR_BREDR):
8231 status = mgmt_bredr_support(hdev);
8237 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8238 status = mgmt_le_support(hdev);
8242 eir_len = 9 + 3 + 18 + 18 + 3;
8245 status = MGMT_STATUS_INVALID_PARAMS;
8250 status = MGMT_STATUS_NOT_POWERED;
8254 rp_len = sizeof(*rp) + eir_len;
8255 rp = kmalloc(rp_len, GFP_ATOMIC);
8259 if (!status && !lmp_ssp_capable(hdev)) {
8260 status = MGMT_STATUS_NOT_SUPPORTED;
8271 case BIT(BDADDR_BREDR):
8272 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8273 err = read_local_ssp_oob_req(hdev, sk, cp);
8274 hci_dev_unlock(hdev);
8278 status = MGMT_STATUS_FAILED;
8281 eir_len = eir_append_data(rp->eir, eir_len,
8283 hdev->dev_class, 3);
8286 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8287 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8288 smp_generate_oob(hdev, hash, rand) < 0) {
8289 hci_dev_unlock(hdev);
8290 status = MGMT_STATUS_FAILED;
8294 /* This should return the active RPA, but since the RPA
8295 * is only programmed on demand, it is really hard to fill
8296 * this in at the moment. For now disallow retrieving
8297 * local out-of-band data when privacy is in use.
8299 * Returning the identity address will not help here since
8300 * pairing happens before the identity resolving key is
8301 * known and thus the connection establishment happens
8302 * based on the RPA and not the identity address.
8304 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8305 hci_dev_unlock(hdev);
8306 status = MGMT_STATUS_REJECTED;
8310 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8311 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8312 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8313 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8314 memcpy(addr, &hdev->static_addr, 6);
8317 memcpy(addr, &hdev->bdaddr, 6);
8321 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8322 addr, sizeof(addr));
8324 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8329 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8330 &role, sizeof(role));
8332 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8333 eir_len = eir_append_data(rp->eir, eir_len,
8335 hash, sizeof(hash));
8337 eir_len = eir_append_data(rp->eir, eir_len,
8339 rand, sizeof(rand));
8342 flags = mgmt_get_adv_discov_flags(hdev);
8344 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8345 flags |= LE_AD_NO_BREDR;
8347 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8348 &flags, sizeof(flags));
8352 hci_dev_unlock(hdev);
8354 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8356 status = MGMT_STATUS_SUCCESS;
8359 rp->type = cp->type;
8360 rp->eir_len = cpu_to_le16(eir_len);
8362 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8363 status, rp, sizeof(*rp) + eir_len);
8364 if (err < 0 || status)
8367 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8368 rp, sizeof(*rp) + eir_len,
8369 HCI_MGMT_OOB_DATA_EVENTS, sk);
8377 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8381 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8382 flags |= MGMT_ADV_FLAG_DISCOV;
8383 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8384 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8385 flags |= MGMT_ADV_FLAG_APPEARANCE;
8386 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8387 flags |= MGMT_ADV_PARAM_DURATION;
8388 flags |= MGMT_ADV_PARAM_TIMEOUT;
8389 flags |= MGMT_ADV_PARAM_INTERVALS;
8390 flags |= MGMT_ADV_PARAM_TX_POWER;
8391 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8393 /* In extended adv TX_POWER returned from Set Adv Param
8394 * will be always valid.
8396 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8397 flags |= MGMT_ADV_FLAG_TX_POWER;
8399 if (ext_adv_capable(hdev)) {
8400 flags |= MGMT_ADV_FLAG_SEC_1M;
8401 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8402 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8404 if (le_2m_capable(hdev))
8405 flags |= MGMT_ADV_FLAG_SEC_2M;
8407 if (le_coded_capable(hdev))
8408 flags |= MGMT_ADV_FLAG_SEC_CODED;
8414 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8415 void *data, u16 data_len)
8417 struct mgmt_rp_read_adv_features *rp;
8420 struct adv_info *adv_instance;
8421 u32 supported_flags;
8424 bt_dev_dbg(hdev, "sock %p", sk);
8426 if (!lmp_le_capable(hdev))
8427 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8428 MGMT_STATUS_REJECTED);
8432 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8433 rp = kmalloc(rp_len, GFP_ATOMIC);
8435 hci_dev_unlock(hdev);
8439 supported_flags = get_supported_adv_flags(hdev);
8441 rp->supported_flags = cpu_to_le32(supported_flags);
8442 rp->max_adv_data_len = max_adv_len(hdev);
8443 rp->max_scan_rsp_len = max_adv_len(hdev);
8444 rp->max_instances = hdev->le_num_of_adv_sets;
8445 rp->num_instances = hdev->adv_instance_cnt;
8447 instance = rp->instance;
8448 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8449 /* Only instances 1-le_num_of_adv_sets are externally visible */
8450 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8451 *instance = adv_instance->instance;
8454 rp->num_instances--;
8459 hci_dev_unlock(hdev);
8461 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8462 MGMT_STATUS_SUCCESS, rp, rp_len);
8469 static u8 calculate_name_len(struct hci_dev *hdev)
8471 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8473 return eir_append_local_name(hdev, buf, 0);
8476 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8479 u8 max_len = max_adv_len(hdev);
8482 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8483 MGMT_ADV_FLAG_LIMITED_DISCOV |
8484 MGMT_ADV_FLAG_MANAGED_FLAGS))
8487 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8490 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8491 max_len -= calculate_name_len(hdev);
8493 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8500 static bool flags_managed(u32 adv_flags)
8502 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8503 MGMT_ADV_FLAG_LIMITED_DISCOV |
8504 MGMT_ADV_FLAG_MANAGED_FLAGS);
8507 static bool tx_power_managed(u32 adv_flags)
8509 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8512 static bool name_managed(u32 adv_flags)
8514 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8517 static bool appearance_managed(u32 adv_flags)
8519 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8522 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8523 u8 len, bool is_adv_data)
8528 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8533 /* Make sure that the data is correctly formatted. */
8534 for (i = 0; i < len; i += (cur_len + 1)) {
8540 if (data[i + 1] == EIR_FLAGS &&
8541 (!is_adv_data || flags_managed(adv_flags)))
8544 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8547 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8550 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8553 if (data[i + 1] == EIR_APPEARANCE &&
8554 appearance_managed(adv_flags))
8557 /* If the current field length would exceed the total data
8558 * length, then it's invalid.
8560 if (i + cur_len >= len)
8567 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8569 u32 supported_flags, phy_flags;
8571 /* The current implementation only supports a subset of the specified
8572 * flags. Also need to check mutual exclusiveness of sec flags.
8574 supported_flags = get_supported_adv_flags(hdev);
8575 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8576 if (adv_flags & ~supported_flags ||
8577 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8583 static bool adv_busy(struct hci_dev *hdev)
8585 return pending_find(MGMT_OP_SET_LE, hdev);
8588 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8591 struct adv_info *adv, *n;
8593 bt_dev_dbg(hdev, "err %d", err);
8597 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8604 adv->pending = false;
8608 instance = adv->instance;
8610 if (hdev->cur_adv_instance == instance)
8611 cancel_adv_timeout(hdev);
8613 hci_remove_adv_instance(hdev, instance);
8614 mgmt_advertising_removed(sk, hdev, instance);
8617 hci_dev_unlock(hdev);
8620 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8622 struct mgmt_pending_cmd *cmd = data;
8623 struct mgmt_cp_add_advertising *cp = cmd->param;
8624 struct mgmt_rp_add_advertising rp;
8626 memset(&rp, 0, sizeof(rp));
8628 rp.instance = cp->instance;
8631 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8634 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8635 mgmt_status(err), &rp, sizeof(rp));
8637 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8639 mgmt_pending_free(cmd);
8642 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8644 struct mgmt_pending_cmd *cmd = data;
8645 struct mgmt_cp_add_advertising *cp = cmd->param;
8647 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8650 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8651 void *data, u16 data_len)
8653 struct mgmt_cp_add_advertising *cp = data;
8654 struct mgmt_rp_add_advertising rp;
8657 u16 timeout, duration;
8658 unsigned int prev_instance_cnt;
8659 u8 schedule_instance = 0;
8660 struct adv_info *adv, *next_instance;
8662 struct mgmt_pending_cmd *cmd;
8664 bt_dev_dbg(hdev, "sock %p", sk);
8666 status = mgmt_le_support(hdev);
8668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8671 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8673 MGMT_STATUS_INVALID_PARAMS);
8675 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677 MGMT_STATUS_INVALID_PARAMS);
8679 flags = __le32_to_cpu(cp->flags);
8680 timeout = __le16_to_cpu(cp->timeout);
8681 duration = __le16_to_cpu(cp->duration);
8683 if (!requested_adv_flags_are_valid(hdev, flags))
8684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8685 MGMT_STATUS_INVALID_PARAMS);
8689 if (timeout && !hdev_is_powered(hdev)) {
8690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8691 MGMT_STATUS_REJECTED);
8695 if (adv_busy(hdev)) {
8696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8701 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8702 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8703 cp->scan_rsp_len, false)) {
8704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8705 MGMT_STATUS_INVALID_PARAMS);
8709 prev_instance_cnt = hdev->adv_instance_cnt;
8711 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8712 cp->adv_data_len, cp->data,
8714 cp->data + cp->adv_data_len,
8716 HCI_ADV_TX_POWER_NO_PREFERENCE,
8717 hdev->le_adv_min_interval,
8718 hdev->le_adv_max_interval, 0);
8720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8721 MGMT_STATUS_FAILED);
8725 /* Only trigger an advertising added event if a new instance was
8728 if (hdev->adv_instance_cnt > prev_instance_cnt)
8729 mgmt_advertising_added(sk, hdev, cp->instance);
8731 if (hdev->cur_adv_instance == cp->instance) {
8732 /* If the currently advertised instance is being changed then
8733 * cancel the current advertising and schedule the next
8734 * instance. If there is only one instance then the overridden
8735 * advertising data will be visible right away.
8737 cancel_adv_timeout(hdev);
8739 next_instance = hci_get_next_instance(hdev, cp->instance);
8741 schedule_instance = next_instance->instance;
8742 } else if (!hdev->adv_instance_timeout) {
8743 /* Immediately advertise the new instance if no other
8744 * instance is currently being advertised.
8746 schedule_instance = cp->instance;
8749 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8750 * there is no instance to be advertised then we have no HCI
8751 * communication to make. Simply return.
8753 if (!hdev_is_powered(hdev) ||
8754 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8755 !schedule_instance) {
8756 rp.instance = cp->instance;
8757 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8758 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8762 /* We're good to go, update advertising data, parameters, and start
8765 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8772 cp->instance = schedule_instance;
8774 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8775 add_advertising_complete);
8777 mgmt_pending_free(cmd);
8780 hci_dev_unlock(hdev);
8785 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8788 struct mgmt_pending_cmd *cmd = data;
8789 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8790 struct mgmt_rp_add_ext_adv_params rp;
8791 struct adv_info *adv;
8794 BT_DBG("%s", hdev->name);
8798 adv = hci_find_adv_instance(hdev, cp->instance);
8802 rp.instance = cp->instance;
8803 rp.tx_power = adv->tx_power;
8805 /* While we're at it, inform userspace of the available space for this
8806 * advertisement, given the flags that will be used.
8808 flags = __le32_to_cpu(cp->flags);
8809 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8810 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8813 /* If this advertisement was previously advertising and we
8814 * failed to update it, we signal that it has been removed and
8815 * delete its structure
8818 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8820 hci_remove_adv_instance(hdev, cp->instance);
8822 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8825 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8826 mgmt_status(err), &rp, sizeof(rp));
8831 mgmt_pending_free(cmd);
8833 hci_dev_unlock(hdev);
8836 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8838 struct mgmt_pending_cmd *cmd = data;
8839 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8841 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8844 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8845 void *data, u16 data_len)
8847 struct mgmt_cp_add_ext_adv_params *cp = data;
8848 struct mgmt_rp_add_ext_adv_params rp;
8849 struct mgmt_pending_cmd *cmd = NULL;
8850 struct adv_info *adv;
8851 u32 flags, min_interval, max_interval;
8852 u16 timeout, duration;
8857 BT_DBG("%s", hdev->name);
8859 status = mgmt_le_support(hdev);
8861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8864 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8866 MGMT_STATUS_INVALID_PARAMS);
8868 /* The purpose of breaking add_advertising into two separate MGMT calls
8869 * for params and data is to allow more parameters to be added to this
8870 * structure in the future. For this reason, we verify that we have the
8871 * bare minimum structure we know of when the interface was defined. Any
8872 * extra parameters we don't know about will be ignored in this request.
8874 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8876 MGMT_STATUS_INVALID_PARAMS);
8878 flags = __le32_to_cpu(cp->flags);
8880 if (!requested_adv_flags_are_valid(hdev, flags))
8881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8882 MGMT_STATUS_INVALID_PARAMS);
8886 /* In new interface, we require that we are powered to register */
8887 if (!hdev_is_powered(hdev)) {
8888 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8889 MGMT_STATUS_REJECTED);
8893 if (adv_busy(hdev)) {
8894 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8899 /* Parse defined parameters from request, use defaults otherwise */
8900 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8901 __le16_to_cpu(cp->timeout) : 0;
8903 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8904 __le16_to_cpu(cp->duration) :
8905 hdev->def_multi_adv_rotation_duration;
8907 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8908 __le32_to_cpu(cp->min_interval) :
8909 hdev->le_adv_min_interval;
8911 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8912 __le32_to_cpu(cp->max_interval) :
8913 hdev->le_adv_max_interval;
8915 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8917 HCI_ADV_TX_POWER_NO_PREFERENCE;
8919 /* Create advertising instance with no advertising or response data */
8920 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8921 timeout, duration, tx_power, min_interval,
8925 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8926 MGMT_STATUS_FAILED);
8930 /* Submit request for advertising params if ext adv available */
8931 if (ext_adv_capable(hdev)) {
8932 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8936 hci_remove_adv_instance(hdev, cp->instance);
8940 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8941 add_ext_adv_params_complete);
8943 mgmt_pending_free(cmd);
8945 rp.instance = cp->instance;
8946 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8947 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8948 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8949 err = mgmt_cmd_complete(sk, hdev->id,
8950 MGMT_OP_ADD_EXT_ADV_PARAMS,
8951 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8955 hci_dev_unlock(hdev);
8960 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8962 struct mgmt_pending_cmd *cmd = data;
8963 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8964 struct mgmt_rp_add_advertising rp;
8966 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8968 memset(&rp, 0, sizeof(rp));
8970 rp.instance = cp->instance;
8973 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8976 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8977 mgmt_status(err), &rp, sizeof(rp));
8979 mgmt_pending_free(cmd);
8982 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8984 struct mgmt_pending_cmd *cmd = data;
8985 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8988 if (ext_adv_capable(hdev)) {
8989 err = hci_update_adv_data_sync(hdev, cp->instance);
8993 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8997 return hci_enable_ext_advertising_sync(hdev, cp->instance);
9000 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9003 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9006 struct mgmt_cp_add_ext_adv_data *cp = data;
9007 struct mgmt_rp_add_ext_adv_data rp;
9008 u8 schedule_instance = 0;
9009 struct adv_info *next_instance;
9010 struct adv_info *adv_instance;
9012 struct mgmt_pending_cmd *cmd;
9014 BT_DBG("%s", hdev->name);
9018 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9020 if (!adv_instance) {
9021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9022 MGMT_STATUS_INVALID_PARAMS);
9026 /* In new interface, we require that we are powered to register */
9027 if (!hdev_is_powered(hdev)) {
9028 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9029 MGMT_STATUS_REJECTED);
9030 goto clear_new_instance;
9033 if (adv_busy(hdev)) {
9034 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9036 goto clear_new_instance;
9039 /* Validate new data */
9040 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9041 cp->adv_data_len, true) ||
9042 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9043 cp->adv_data_len, cp->scan_rsp_len, false)) {
9044 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9045 MGMT_STATUS_INVALID_PARAMS);
9046 goto clear_new_instance;
9049 /* Set the data in the advertising instance */
9050 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9051 cp->data, cp->scan_rsp_len,
9052 cp->data + cp->adv_data_len);
9054 /* If using software rotation, determine next instance to use */
9055 if (hdev->cur_adv_instance == cp->instance) {
9056 /* If the currently advertised instance is being changed
9057 * then cancel the current advertising and schedule the
9058 * next instance. If there is only one instance then the
9059 * overridden advertising data will be visible right
9062 cancel_adv_timeout(hdev);
9064 next_instance = hci_get_next_instance(hdev, cp->instance);
9066 schedule_instance = next_instance->instance;
9067 } else if (!hdev->adv_instance_timeout) {
9068 /* Immediately advertise the new instance if no other
9069 * instance is currently being advertised.
9071 schedule_instance = cp->instance;
9074 /* If the HCI_ADVERTISING flag is set or there is no instance to
9075 * be advertised then we have no HCI communication to make.
9078 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9079 if (adv_instance->pending) {
9080 mgmt_advertising_added(sk, hdev, cp->instance);
9081 adv_instance->pending = false;
9083 rp.instance = cp->instance;
9084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9085 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9089 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9093 goto clear_new_instance;
9096 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9097 add_ext_adv_data_complete);
9099 mgmt_pending_free(cmd);
9100 goto clear_new_instance;
9103 /* We were successful in updating data, so trigger advertising_added
9104 * event if this is an instance that wasn't previously advertising. If
9105 * a failure occurs in the requests we initiated, we will remove the
9106 * instance again in add_advertising_complete
9108 if (adv_instance->pending)
9109 mgmt_advertising_added(sk, hdev, cp->instance);
9114 hci_remove_adv_instance(hdev, cp->instance);
9117 hci_dev_unlock(hdev);
9122 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9125 struct mgmt_pending_cmd *cmd = data;
9126 struct mgmt_cp_remove_advertising *cp = cmd->param;
9127 struct mgmt_rp_remove_advertising rp;
9129 bt_dev_dbg(hdev, "err %d", err);
9131 memset(&rp, 0, sizeof(rp));
9132 rp.instance = cp->instance;
9135 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9138 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9139 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9141 mgmt_pending_free(cmd);
9144 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9146 struct mgmt_pending_cmd *cmd = data;
9147 struct mgmt_cp_remove_advertising *cp = cmd->param;
9150 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9154 if (list_empty(&hdev->adv_instances))
9155 err = hci_disable_advertising_sync(hdev);
9160 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9161 void *data, u16 data_len)
9163 struct mgmt_cp_remove_advertising *cp = data;
9164 struct mgmt_pending_cmd *cmd;
9167 bt_dev_dbg(hdev, "sock %p", sk);
9171 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9172 err = mgmt_cmd_status(sk, hdev->id,
9173 MGMT_OP_REMOVE_ADVERTISING,
9174 MGMT_STATUS_INVALID_PARAMS);
9178 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9179 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9184 if (list_empty(&hdev->adv_instances)) {
9185 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9186 MGMT_STATUS_INVALID_PARAMS);
9190 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9197 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9198 remove_advertising_complete);
9200 mgmt_pending_free(cmd);
9203 hci_dev_unlock(hdev);
9208 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9209 void *data, u16 data_len)
9211 struct mgmt_cp_get_adv_size_info *cp = data;
9212 struct mgmt_rp_get_adv_size_info rp;
9213 u32 flags, supported_flags;
9215 bt_dev_dbg(hdev, "sock %p", sk);
9217 if (!lmp_le_capable(hdev))
9218 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 MGMT_STATUS_REJECTED);
9221 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9222 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9223 MGMT_STATUS_INVALID_PARAMS);
9225 flags = __le32_to_cpu(cp->flags);
9227 /* The current implementation only supports a subset of the specified
9230 supported_flags = get_supported_adv_flags(hdev);
9231 if (flags & ~supported_flags)
9232 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233 MGMT_STATUS_INVALID_PARAMS);
9235 rp.instance = cp->instance;
9236 rp.flags = cp->flags;
9237 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9238 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9241 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9244 static const struct hci_mgmt_handler mgmt_handlers[] = {
9245 { NULL }, /* 0x0000 (no command) */
9246 { read_version, MGMT_READ_VERSION_SIZE,
9248 HCI_MGMT_UNTRUSTED },
9249 { read_commands, MGMT_READ_COMMANDS_SIZE,
9251 HCI_MGMT_UNTRUSTED },
9252 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9254 HCI_MGMT_UNTRUSTED },
9255 { read_controller_info, MGMT_READ_INFO_SIZE,
9256 HCI_MGMT_UNTRUSTED },
9257 { set_powered, MGMT_SETTING_SIZE },
9258 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9259 { set_connectable, MGMT_SETTING_SIZE },
9260 { set_fast_connectable, MGMT_SETTING_SIZE },
9261 { set_bondable, MGMT_SETTING_SIZE },
9262 { set_link_security, MGMT_SETTING_SIZE },
9263 { set_ssp, MGMT_SETTING_SIZE },
9264 { set_hs, MGMT_SETTING_SIZE },
9265 { set_le, MGMT_SETTING_SIZE },
9266 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9267 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9268 { add_uuid, MGMT_ADD_UUID_SIZE },
9269 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9270 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9272 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9274 { disconnect, MGMT_DISCONNECT_SIZE },
9275 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9276 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9277 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9278 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9279 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9280 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9281 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9282 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9283 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9284 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9285 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9286 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9287 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9289 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9290 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9291 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9292 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9293 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9294 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9295 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9296 { set_advertising, MGMT_SETTING_SIZE },
9297 { set_bredr, MGMT_SETTING_SIZE },
9298 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9299 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9300 { set_secure_conn, MGMT_SETTING_SIZE },
9301 { set_debug_keys, MGMT_SETTING_SIZE },
9302 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9303 { load_irks, MGMT_LOAD_IRKS_SIZE,
9305 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9306 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9307 { add_device, MGMT_ADD_DEVICE_SIZE },
9308 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9309 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9311 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9313 HCI_MGMT_UNTRUSTED },
9314 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9315 HCI_MGMT_UNCONFIGURED |
9316 HCI_MGMT_UNTRUSTED },
9317 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9318 HCI_MGMT_UNCONFIGURED },
9319 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9320 HCI_MGMT_UNCONFIGURED },
9321 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9323 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9324 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9326 HCI_MGMT_UNTRUSTED },
9327 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9328 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9330 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9331 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9332 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9333 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9334 HCI_MGMT_UNTRUSTED },
9335 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9336 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9337 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9338 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9340 { set_wideband_speech, MGMT_SETTING_SIZE },
9341 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9342 HCI_MGMT_UNTRUSTED },
9343 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9344 HCI_MGMT_UNTRUSTED |
9345 HCI_MGMT_HDEV_OPTIONAL },
9346 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9348 HCI_MGMT_HDEV_OPTIONAL },
9349 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9350 HCI_MGMT_UNTRUSTED },
9351 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9353 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9354 HCI_MGMT_UNTRUSTED },
9355 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9357 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9358 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9359 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9360 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9362 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9363 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9365 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9367 { add_adv_patterns_monitor_rssi,
9368 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9370 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9372 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9373 { mesh_send, MGMT_MESH_SEND_SIZE,
9375 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9378 void mgmt_index_added(struct hci_dev *hdev)
9380 struct mgmt_ev_ext_index ev;
9382 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9385 switch (hdev->dev_type) {
9387 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9388 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9389 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9392 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9393 HCI_MGMT_INDEX_EVENTS);
9406 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9407 HCI_MGMT_EXT_INDEX_EVENTS);
9410 void mgmt_index_removed(struct hci_dev *hdev)
9412 struct mgmt_ev_ext_index ev;
9413 u8 status = MGMT_STATUS_INVALID_INDEX;
9415 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9418 switch (hdev->dev_type) {
9420 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9422 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9423 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9424 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9427 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9428 HCI_MGMT_INDEX_EVENTS);
9441 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9442 HCI_MGMT_EXT_INDEX_EVENTS);
9444 /* Cancel any remaining timed work */
9445 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9447 cancel_delayed_work_sync(&hdev->discov_off);
9448 cancel_delayed_work_sync(&hdev->service_cache);
9449 cancel_delayed_work_sync(&hdev->rpa_expired);
9452 void mgmt_power_on(struct hci_dev *hdev, int err)
9454 struct cmd_lookup match = { NULL, hdev };
9456 bt_dev_dbg(hdev, "err %d", err);
9461 restart_le_actions(hdev);
9462 hci_update_passive_scan(hdev);
9465 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9467 new_settings(hdev, match.sk);
9472 hci_dev_unlock(hdev);
9475 void __mgmt_power_off(struct hci_dev *hdev)
9477 struct cmd_lookup match = { NULL, hdev };
9478 u8 status, zero_cod[] = { 0, 0, 0 };
9480 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9482 /* If the power off is because of hdev unregistration let
9483 * use the appropriate INVALID_INDEX status. Otherwise use
9484 * NOT_POWERED. We cover both scenarios here since later in
9485 * mgmt_index_removed() any hci_conn callbacks will have already
9486 * been triggered, potentially causing misleading DISCONNECTED
9489 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9490 status = MGMT_STATUS_INVALID_INDEX;
9492 status = MGMT_STATUS_NOT_POWERED;
9494 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9496 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9497 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9498 zero_cod, sizeof(zero_cod),
9499 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9500 ext_info_changed(hdev, NULL);
9503 new_settings(hdev, match.sk);
9509 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9511 struct mgmt_pending_cmd *cmd;
9514 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9518 if (err == -ERFKILL)
9519 status = MGMT_STATUS_RFKILLED;
9521 status = MGMT_STATUS_FAILED;
9523 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9525 mgmt_pending_remove(cmd);
9528 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9531 struct mgmt_ev_new_link_key ev;
9533 memset(&ev, 0, sizeof(ev));
9535 ev.store_hint = persistent;
9536 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9537 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9538 ev.key.type = key->type;
9539 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9540 ev.key.pin_len = key->pin_len;
9542 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9545 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9547 switch (ltk->type) {
9549 case SMP_LTK_RESPONDER:
9550 if (ltk->authenticated)
9551 return MGMT_LTK_AUTHENTICATED;
9552 return MGMT_LTK_UNAUTHENTICATED;
9554 if (ltk->authenticated)
9555 return MGMT_LTK_P256_AUTH;
9556 return MGMT_LTK_P256_UNAUTH;
9557 case SMP_LTK_P256_DEBUG:
9558 return MGMT_LTK_P256_DEBUG;
9561 return MGMT_LTK_UNAUTHENTICATED;
9564 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9566 struct mgmt_ev_new_long_term_key ev;
9568 memset(&ev, 0, sizeof(ev));
9570 /* Devices using resolvable or non-resolvable random addresses
9571 * without providing an identity resolving key don't require
9572 * to store long term keys. Their addresses will change the
9575 * Only when a remote device provides an identity address
9576 * make sure the long term key is stored. If the remote
9577 * identity is known, the long term keys are internally
9578 * mapped to the identity address. So allow static random
9579 * and public addresses here.
9581 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9582 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9583 ev.store_hint = 0x00;
9585 ev.store_hint = persistent;
9587 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9588 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9589 ev.key.type = mgmt_ltk_type(key);
9590 ev.key.enc_size = key->enc_size;
9591 ev.key.ediv = key->ediv;
9592 ev.key.rand = key->rand;
9594 if (key->type == SMP_LTK)
9595 ev.key.initiator = 1;
9597 /* Make sure we copy only the significant bytes based on the
9598 * encryption key size, and set the rest of the value to zeroes.
9600 memcpy(ev.key.val, key->val, key->enc_size);
9601 memset(ev.key.val + key->enc_size, 0,
9602 sizeof(ev.key.val) - key->enc_size);
9604 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9607 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9609 struct mgmt_ev_new_irk ev;
9611 memset(&ev, 0, sizeof(ev));
9613 ev.store_hint = persistent;
9615 bacpy(&ev.rpa, &irk->rpa);
9616 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9617 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9618 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9620 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9623 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9626 struct mgmt_ev_new_csrk ev;
9628 memset(&ev, 0, sizeof(ev));
9630 /* Devices using resolvable or non-resolvable random addresses
9631 * without providing an identity resolving key don't require
9632 * to store signature resolving keys. Their addresses will change
9633 * the next time around.
9635 * Only when a remote device provides an identity address
9636 * make sure the signature resolving key is stored. So allow
9637 * static random and public addresses here.
9639 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9640 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9641 ev.store_hint = 0x00;
9643 ev.store_hint = persistent;
9645 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9646 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9647 ev.key.type = csrk->type;
9648 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9650 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9653 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9654 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9655 u16 max_interval, u16 latency, u16 timeout)
9657 struct mgmt_ev_new_conn_param ev;
9659 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9662 memset(&ev, 0, sizeof(ev));
9663 bacpy(&ev.addr.bdaddr, bdaddr);
9664 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9665 ev.store_hint = store_hint;
9666 ev.min_interval = cpu_to_le16(min_interval);
9667 ev.max_interval = cpu_to_le16(max_interval);
9668 ev.latency = cpu_to_le16(latency);
9669 ev.timeout = cpu_to_le16(timeout);
9671 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9674 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9675 u8 *name, u8 name_len)
9677 struct sk_buff *skb;
9678 struct mgmt_ev_device_connected *ev;
9682 /* allocate buff for LE or BR/EDR adv */
9683 if (conn->le_adv_data_len > 0)
9684 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9685 sizeof(*ev) + conn->le_adv_data_len);
9687 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9688 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9689 eir_precalc_len(sizeof(conn->dev_class)));
9691 ev = skb_put(skb, sizeof(*ev));
9692 bacpy(&ev->addr.bdaddr, &conn->dst);
9693 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9696 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9698 ev->flags = __cpu_to_le32(flags);
9700 /* We must ensure that the EIR Data fields are ordered and
9701 * unique. Keep it simple for now and avoid the problem by not
9702 * adding any BR/EDR data to the LE adv.
9704 if (conn->le_adv_data_len > 0) {
9705 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9706 eir_len = conn->le_adv_data_len;
9709 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9711 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9712 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9713 conn->dev_class, sizeof(conn->dev_class));
9716 ev->eir_len = cpu_to_le16(eir_len);
9718 mgmt_event_skb(skb, NULL);
9721 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9723 struct sock **sk = data;
9725 cmd->cmd_complete(cmd, 0);
9730 mgmt_pending_remove(cmd);
9733 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9735 struct hci_dev *hdev = data;
9736 struct mgmt_cp_unpair_device *cp = cmd->param;
9738 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9740 cmd->cmd_complete(cmd, 0);
9741 mgmt_pending_remove(cmd);
9744 bool mgmt_powering_down(struct hci_dev *hdev)
9746 struct mgmt_pending_cmd *cmd;
9747 struct mgmt_mode *cp;
9749 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9760 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9761 u8 link_type, u8 addr_type, u8 reason,
9762 bool mgmt_connected)
9764 struct mgmt_ev_device_disconnected ev;
9765 struct sock *sk = NULL;
9767 /* The connection is still in hci_conn_hash so test for 1
9768 * instead of 0 to know if this is the last one.
9770 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9771 cancel_delayed_work(&hdev->power_off);
9772 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9775 if (!mgmt_connected)
9778 if (link_type != ACL_LINK && link_type != LE_LINK)
9781 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9783 bacpy(&ev.addr.bdaddr, bdaddr);
9784 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9787 /* Report disconnects due to suspend */
9788 if (hdev->suspended)
9789 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9791 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9796 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9800 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9801 u8 link_type, u8 addr_type, u8 status)
9803 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9804 struct mgmt_cp_disconnect *cp;
9805 struct mgmt_pending_cmd *cmd;
9807 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9810 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9816 if (bacmp(bdaddr, &cp->addr.bdaddr))
9819 if (cp->addr.type != bdaddr_type)
9822 cmd->cmd_complete(cmd, mgmt_status(status));
9823 mgmt_pending_remove(cmd);
9826 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9827 u8 addr_type, u8 status)
9829 struct mgmt_ev_connect_failed ev;
9831 /* The connection is still in hci_conn_hash so test for 1
9832 * instead of 0 to know if this is the last one.
9834 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9835 cancel_delayed_work(&hdev->power_off);
9836 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9839 bacpy(&ev.addr.bdaddr, bdaddr);
9840 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9841 ev.status = mgmt_status(status);
9843 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9846 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9848 struct mgmt_ev_pin_code_request ev;
9850 bacpy(&ev.addr.bdaddr, bdaddr);
9851 ev.addr.type = BDADDR_BREDR;
9854 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9857 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 struct mgmt_pending_cmd *cmd;
9862 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9866 cmd->cmd_complete(cmd, mgmt_status(status));
9867 mgmt_pending_remove(cmd);
9870 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 struct mgmt_pending_cmd *cmd;
9875 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9879 cmd->cmd_complete(cmd, mgmt_status(status));
9880 mgmt_pending_remove(cmd);
9883 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 u8 link_type, u8 addr_type, u32 value,
9887 struct mgmt_ev_user_confirm_request ev;
9889 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9891 bacpy(&ev.addr.bdaddr, bdaddr);
9892 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9893 ev.confirm_hint = confirm_hint;
9894 ev.value = cpu_to_le32(value);
9896 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9900 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901 u8 link_type, u8 addr_type)
9903 struct mgmt_ev_user_passkey_request ev;
9905 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9907 bacpy(&ev.addr.bdaddr, bdaddr);
9908 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9910 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9914 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9915 u8 link_type, u8 addr_type, u8 status,
9918 struct mgmt_pending_cmd *cmd;
9920 cmd = pending_find(opcode, hdev);
9924 cmd->cmd_complete(cmd, mgmt_status(status));
9925 mgmt_pending_remove(cmd);
9930 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9931 u8 link_type, u8 addr_type, u8 status)
9933 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9934 status, MGMT_OP_USER_CONFIRM_REPLY);
9937 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9938 u8 link_type, u8 addr_type, u8 status)
9940 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9942 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9945 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9946 u8 link_type, u8 addr_type, u8 status)
9948 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9949 status, MGMT_OP_USER_PASSKEY_REPLY);
9952 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953 u8 link_type, u8 addr_type, u8 status)
9955 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9957 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9960 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9961 u8 link_type, u8 addr_type, u32 passkey,
9964 struct mgmt_ev_passkey_notify ev;
9966 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9968 bacpy(&ev.addr.bdaddr, bdaddr);
9969 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9970 ev.passkey = __cpu_to_le32(passkey);
9971 ev.entered = entered;
9973 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9976 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9978 struct mgmt_ev_auth_failed ev;
9979 struct mgmt_pending_cmd *cmd;
9980 u8 status = mgmt_status(hci_status);
9982 bacpy(&ev.addr.bdaddr, &conn->dst);
9983 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9986 cmd = find_pairing(conn);
9988 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9989 cmd ? cmd->sk : NULL);
9992 cmd->cmd_complete(cmd, status);
9993 mgmt_pending_remove(cmd);
9997 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9999 struct cmd_lookup match = { NULL, hdev };
10003 u8 mgmt_err = mgmt_status(status);
10004 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10005 cmd_status_rsp, &mgmt_err);
10009 if (test_bit(HCI_AUTH, &hdev->flags))
10010 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10012 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10014 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10018 new_settings(hdev, match.sk);
10021 sock_put(match.sk);
10024 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10026 struct cmd_lookup *match = data;
10028 if (match->sk == NULL) {
10029 match->sk = cmd->sk;
10030 sock_hold(match->sk);
10034 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10037 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10039 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10040 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10041 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10044 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10045 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10046 ext_info_changed(hdev, NULL);
10050 sock_put(match.sk);
10053 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10055 struct mgmt_cp_set_local_name ev;
10056 struct mgmt_pending_cmd *cmd;
10061 memset(&ev, 0, sizeof(ev));
10062 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10063 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10065 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10067 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10069 /* If this is a HCI command related to powering on the
10070 * HCI dev don't send any mgmt signals.
10072 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10076 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10077 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10078 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10081 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10085 for (i = 0; i < uuid_count; i++) {
10086 if (!memcmp(uuid, uuids[i], 16))
10093 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10097 while (parsed < eir_len) {
10098 u8 field_len = eir[0];
10102 if (field_len == 0)
10105 if (eir_len - parsed < field_len + 1)
10109 case EIR_UUID16_ALL:
10110 case EIR_UUID16_SOME:
10111 for (i = 0; i + 3 <= field_len; i += 2) {
10112 memcpy(uuid, bluetooth_base_uuid, 16);
10113 uuid[13] = eir[i + 3];
10114 uuid[12] = eir[i + 2];
10115 if (has_uuid(uuid, uuid_count, uuids))
10119 case EIR_UUID32_ALL:
10120 case EIR_UUID32_SOME:
10121 for (i = 0; i + 5 <= field_len; i += 4) {
10122 memcpy(uuid, bluetooth_base_uuid, 16);
10123 uuid[15] = eir[i + 5];
10124 uuid[14] = eir[i + 4];
10125 uuid[13] = eir[i + 3];
10126 uuid[12] = eir[i + 2];
10127 if (has_uuid(uuid, uuid_count, uuids))
10131 case EIR_UUID128_ALL:
10132 case EIR_UUID128_SOME:
10133 for (i = 0; i + 17 <= field_len; i += 16) {
10134 memcpy(uuid, eir + i + 2, 16);
10135 if (has_uuid(uuid, uuid_count, uuids))
10141 parsed += field_len + 1;
10142 eir += field_len + 1;
10148 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10149 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10151 /* If a RSSI threshold has been specified, and
10152 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10153 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10154 * is set, let it through for further processing, as we might need to
10155 * restart the scan.
10157 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10158 * the results are also dropped.
10160 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10161 (rssi == HCI_RSSI_INVALID ||
10162 (rssi < hdev->discovery.rssi &&
10163 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10166 if (hdev->discovery.uuid_count != 0) {
10167 /* If a list of UUIDs is provided in filter, results with no
10168 * matching UUID should be dropped.
10170 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10171 hdev->discovery.uuids) &&
10172 !eir_has_uuids(scan_rsp, scan_rsp_len,
10173 hdev->discovery.uuid_count,
10174 hdev->discovery.uuids))
10178 /* If duplicate filtering does not report RSSI changes, then restart
10179 * scanning to ensure updated result with updated RSSI values.
10181 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10182 /* Validate RSSI value against the RSSI threshold once more. */
10183 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10184 rssi < hdev->discovery.rssi)
10191 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10192 bdaddr_t *bdaddr, u8 addr_type)
10194 struct mgmt_ev_adv_monitor_device_lost ev;
10196 ev.monitor_handle = cpu_to_le16(handle);
10197 bacpy(&ev.addr.bdaddr, bdaddr);
10198 ev.addr.type = addr_type;
10200 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10204 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10205 struct sk_buff *skb,
10206 struct sock *skip_sk,
10209 struct sk_buff *advmon_skb;
10210 size_t advmon_skb_len;
10211 __le16 *monitor_handle;
10216 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10217 sizeof(struct mgmt_ev_device_found)) + skb->len;
10218 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10223 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10224 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10225 * store monitor_handle of the matched monitor.
10227 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10228 *monitor_handle = cpu_to_le16(handle);
10229 skb_put_data(advmon_skb, skb->data, skb->len);
10231 mgmt_event_skb(advmon_skb, skip_sk);
10234 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10235 bdaddr_t *bdaddr, bool report_device,
10236 struct sk_buff *skb,
10237 struct sock *skip_sk)
10239 struct monitored_device *dev, *tmp;
10240 bool matched = false;
10241 bool notified = false;
10243 /* We have received the Advertisement Report because:
10244 * 1. the kernel has initiated active discovery
10245 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10247 * 3. if none of the above is true, we have one or more active
10248 * Advertisement Monitor
10250 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10251 * and report ONLY one advertisement per device for the matched Monitor
10252 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10254 * For case 3, since we are not active scanning and all advertisements
10255 * received are due to a matched Advertisement Monitor, report all
10256 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10258 if (report_device && !hdev->advmon_pend_notify) {
10259 mgmt_event_skb(skb, skip_sk);
10263 hdev->advmon_pend_notify = false;
10265 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10266 if (!bacmp(&dev->bdaddr, bdaddr)) {
10269 if (!dev->notified) {
10270 mgmt_send_adv_monitor_device_found(hdev, skb,
10274 dev->notified = true;
10278 if (!dev->notified)
10279 hdev->advmon_pend_notify = true;
10282 if (!report_device &&
10283 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10284 /* Handle 0 indicates that we are not active scanning and this
10285 * is a subsequent advertisement report for an already matched
10286 * Advertisement Monitor or the controller offloading support
10287 * is not available.
10289 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10293 mgmt_event_skb(skb, skip_sk);
10298 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10299 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10300 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10303 struct sk_buff *skb;
10304 struct mgmt_ev_mesh_device_found *ev;
10307 if (!hdev->mesh_ad_types[0])
10310 /* Scan for requested AD types */
10312 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10313 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10314 if (!hdev->mesh_ad_types[j])
10317 if (hdev->mesh_ad_types[j] == eir[i + 1])
10323 if (scan_rsp_len > 0) {
10324 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10325 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10326 if (!hdev->mesh_ad_types[j])
10329 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10338 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10339 sizeof(*ev) + eir_len + scan_rsp_len);
10343 ev = skb_put(skb, sizeof(*ev));
10345 bacpy(&ev->addr.bdaddr, bdaddr);
10346 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10348 ev->flags = cpu_to_le32(flags);
10349 ev->instant = cpu_to_le64(instant);
10352 /* Copy EIR or advertising data into event */
10353 skb_put_data(skb, eir, eir_len);
10355 if (scan_rsp_len > 0)
10356 /* Append scan response data to event */
10357 skb_put_data(skb, scan_rsp, scan_rsp_len);
10359 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10361 mgmt_event_skb(skb, NULL);
10364 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10365 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10366 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10369 struct sk_buff *skb;
10370 struct mgmt_ev_device_found *ev;
10371 bool report_device = hci_discovery_active(hdev);
10373 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10374 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10375 eir, eir_len, scan_rsp, scan_rsp_len,
10378 /* Don't send events for a non-kernel initiated discovery. With
10379 * LE one exception is if we have pend_le_reports > 0 in which
10380 * case we're doing passive scanning and want these events.
10382 if (!hci_discovery_active(hdev)) {
10383 if (link_type == ACL_LINK)
10385 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10386 report_device = true;
10387 else if (!hci_is_adv_monitoring(hdev))
10391 if (hdev->discovery.result_filtering) {
10392 /* We are using service discovery */
10393 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10398 if (hdev->discovery.limited) {
10399 /* Check for limited discoverable bit */
10401 if (!(dev_class[1] & 0x20))
10404 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10405 if (!flags || !(flags[0] & LE_AD_LIMITED))
10410 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10411 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10412 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10416 ev = skb_put(skb, sizeof(*ev));
10418 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10419 * RSSI value was reported as 0 when not available. This behavior
10420 * is kept when using device discovery. This is required for full
10421 * backwards compatibility with the API.
10423 * However when using service discovery, the value 127 will be
10424 * returned when the RSSI is not available.
10426 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10427 link_type == ACL_LINK)
10430 bacpy(&ev->addr.bdaddr, bdaddr);
10431 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10433 ev->flags = cpu_to_le32(flags);
10436 /* Copy EIR or advertising data into event */
10437 skb_put_data(skb, eir, eir_len);
10439 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10442 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10444 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10447 if (scan_rsp_len > 0)
10448 /* Append scan response data to event */
10449 skb_put_data(skb, scan_rsp, scan_rsp_len);
10451 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10453 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10456 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10457 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10459 struct sk_buff *skb;
10460 struct mgmt_ev_device_found *ev;
10464 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10465 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10467 ev = skb_put(skb, sizeof(*ev));
10468 bacpy(&ev->addr.bdaddr, bdaddr);
10469 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10473 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10475 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10477 ev->eir_len = cpu_to_le16(eir_len);
10478 ev->flags = cpu_to_le32(flags);
10480 mgmt_event_skb(skb, NULL);
10483 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10485 struct mgmt_ev_discovering ev;
10487 bt_dev_dbg(hdev, "discovering %u", discovering);
10489 memset(&ev, 0, sizeof(ev));
10490 ev.type = hdev->discovery.type;
10491 ev.discovering = discovering;
10493 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10496 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10498 struct mgmt_ev_controller_suspend ev;
10500 ev.suspend_state = state;
10501 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10504 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10507 struct mgmt_ev_controller_resume ev;
10509 ev.wake_reason = reason;
10511 bacpy(&ev.addr.bdaddr, bdaddr);
10512 ev.addr.type = addr_type;
10514 memset(&ev.addr, 0, sizeof(ev.addr));
10517 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10520 static struct hci_mgmt_chan chan = {
10521 .channel = HCI_CHANNEL_CONTROL,
10522 .handler_count = ARRAY_SIZE(mgmt_handlers),
10523 .handlers = mgmt_handlers,
10524 .hdev_init = mgmt_init_hdev,
10527 int mgmt_init(void)
10529 return hci_mgmt_chan_register(&chan);
10532 void mgmt_exit(void)
10534 hci_mgmt_chan_unregister(&chan);
10537 void mgmt_cleanup(struct sock *sk)
10539 struct mgmt_mesh_tx *mesh_tx;
10540 struct hci_dev *hdev;
10542 read_lock(&hci_dev_list_lock);
10544 list_for_each_entry(hdev, &hci_dev_list, list) {
10546 mesh_tx = mgmt_mesh_next(hdev, sk);
10549 mesh_send_complete(hdev, mesh_tx, true);
10553 read_unlock(&hci_dev_list_lock);