2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 bt_dev_dbg(hdev, "result 0x%2.2x", result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 bt_dev_dbg(hdev, "err 0x%2.2x", err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
130 struct hci_request req;
134 bt_dev_dbg(hdev, "");
136 hci_req_init(&req, hdev);
138 hci_req_add_ev(&req, opcode, plen, param, event);
140 hdev->req_status = HCI_REQ_PEND;
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
152 switch (hdev->req_status) {
154 err = -bt_to_errno(hdev->req_result);
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
166 hdev->req_status = hdev->req_result = 0;
168 hdev->req_skb = NULL;
170 bt_dev_dbg(hdev, "end: err %d", err);
178 return ERR_PTR(-ENODATA);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 EXPORT_SYMBOL(__hci_cmd_sync);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt, u32 timeout, u8 *hci_status)
196 struct hci_request req;
199 bt_dev_dbg(hdev, "start");
201 hci_req_init(&req, hdev);
203 hdev->req_status = HCI_REQ_PEND;
205 err = func(&req, opt);
208 *hci_status = HCI_ERROR_UNSPECIFIED;
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err == -ENODATA) {
228 *hci_status = HCI_ERROR_UNSPECIFIED;
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
236 if (err == -ERESTARTSYS)
239 switch (hdev->req_status) {
241 err = -bt_to_errno(hdev->req_result);
243 *hci_status = hdev->req_result;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
249 *hci_status = HCI_ERROR_UNSPECIFIED;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
263 bt_dev_dbg(hdev, "end: err %d", err);
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt, u32 timeout, u8 *hci_status)
274 if (!test_bit(HCI_UP, &hdev->flags))
277 /* Serialize all requests */
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
301 skb_put_data(skb, param, plen);
303 bt_dev_dbg(hdev, "skb len %d", skb->len);
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
315 struct hci_dev *hdev = req->hdev;
318 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
337 bt_cb(skb)->hci.req_event = event;
339 skb_queue_tail(&req->cmd_q, skb);
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
345 hci_req_add_ev(req, opcode, plen, param, 0);
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
361 type = PAGE_SCAN_TYPE_INTERLACED;
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381 static void start_interleave_scan(struct hci_dev *hdev)
383 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
384 queue_delayed_work(hdev->req_workqueue,
385 &hdev->interleave_scan, 0);
388 static bool is_interleave_scanning(struct hci_dev *hdev)
390 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
393 static void cancel_interleave_scan(struct hci_dev *hdev)
395 bt_dev_dbg(hdev, "cancelling interleave scan");
397 cancel_delayed_work_sync(&hdev->interleave_scan);
399 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
402 /* Return true if interleave_scan wasn't started until exiting this function,
403 * otherwise, return false
405 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
407 /* If there is at least one ADV monitors and one pending LE connection
408 * or one device to be scanned for, we should alternate between
409 * allowlist scan and one without any filters to save power.
411 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
412 !(list_empty(&hdev->pend_le_conns) &&
413 list_empty(&hdev->pend_le_reports));
414 bool is_interleaving = is_interleave_scanning(hdev);
416 if (use_interleaving && !is_interleaving) {
417 start_interleave_scan(hdev);
418 bt_dev_dbg(hdev, "starting interleave scan");
422 if (!use_interleaving && is_interleaving)
423 cancel_interleave_scan(hdev);
428 /* This function controls the background scanning based on hdev->pend_le_conns
429 * list. If there are pending LE connection we start the background scanning,
430 * otherwise we stop it.
432 * This function requires the caller holds hdev->lock.
434 static void __hci_update_background_scan(struct hci_request *req)
436 struct hci_dev *hdev = req->hdev;
438 if (!test_bit(HCI_UP, &hdev->flags) ||
439 test_bit(HCI_INIT, &hdev->flags) ||
440 hci_dev_test_flag(hdev, HCI_SETUP) ||
441 hci_dev_test_flag(hdev, HCI_CONFIG) ||
442 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
443 hci_dev_test_flag(hdev, HCI_UNREGISTER))
446 /* No point in doing scanning if LE support hasn't been enabled */
447 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
450 /* If discovery is active don't interfere with it */
451 if (hdev->discovery.state != DISCOVERY_STOPPED)
454 /* Reset RSSI and UUID filters when starting background scanning
455 * since these filters are meant for service discovery only.
457 * The Start Discovery and Start Service Discovery operations
458 * ensure to set proper values for RSSI threshold and UUID
459 * filter list. So it is safe to just reset them here.
461 hci_discovery_filter_clear(hdev);
463 bt_dev_dbg(hdev, "ADV monitoring is %s",
464 hci_is_adv_monitoring(hdev) ? "on" : "off");
466 if (list_empty(&hdev->pend_le_conns) &&
467 list_empty(&hdev->pend_le_reports) &&
468 !hci_is_adv_monitoring(hdev)) {
469 /* If there is no pending LE connections or devices
470 * to be scanned for or no ADV monitors, we should stop the
471 * background scanning.
474 /* If controller is not scanning we are done. */
475 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
478 hci_req_add_le_scan_disable(req, false);
480 bt_dev_dbg(hdev, "stopping background scanning");
482 /* If there is at least one pending LE connection, we should
483 * keep the background scan running.
486 /* If controller is connecting, we should not start scanning
487 * since some controllers are not able to scan and connect at
490 if (hci_lookup_le_connect(hdev))
493 /* If controller is currently scanning, we stop it to ensure we
494 * don't miss any advertising (due to duplicates filter).
496 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
497 hci_req_add_le_scan_disable(req, false);
499 hci_req_add_le_passive_scan(req);
500 bt_dev_dbg(hdev, "starting background scanning");
504 void __hci_req_update_name(struct hci_request *req)
506 struct hci_dev *hdev = req->hdev;
507 struct hci_cp_write_local_name cp;
509 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
511 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
514 #define PNP_INFO_SVCLASS_ID 0x1200
516 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
524 list_for_each_entry(uuid, &hdev->uuids, list) {
527 if (uuid->size != 16)
530 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
534 if (uuid16 == PNP_INFO_SVCLASS_ID)
540 uuids_start[1] = EIR_UUID16_ALL;
544 /* Stop if not enough space to put next UUID */
545 if ((ptr - data) + sizeof(u16) > len) {
546 uuids_start[1] = EIR_UUID16_SOME;
550 *ptr++ = (uuid16 & 0x00ff);
551 *ptr++ = (uuid16 & 0xff00) >> 8;
552 uuids_start[0] += sizeof(uuid16);
558 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
560 u8 *ptr = data, *uuids_start = NULL;
561 struct bt_uuid *uuid;
566 list_for_each_entry(uuid, &hdev->uuids, list) {
567 if (uuid->size != 32)
573 uuids_start[1] = EIR_UUID32_ALL;
577 /* Stop if not enough space to put next UUID */
578 if ((ptr - data) + sizeof(u32) > len) {
579 uuids_start[1] = EIR_UUID32_SOME;
583 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
585 uuids_start[0] += sizeof(u32);
591 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
593 u8 *ptr = data, *uuids_start = NULL;
594 struct bt_uuid *uuid;
599 list_for_each_entry(uuid, &hdev->uuids, list) {
600 if (uuid->size != 128)
606 uuids_start[1] = EIR_UUID128_ALL;
610 /* Stop if not enough space to put next UUID */
611 if ((ptr - data) + 16 > len) {
612 uuids_start[1] = EIR_UUID128_SOME;
616 memcpy(ptr, uuid->uuid, 16);
618 uuids_start[0] += 16;
624 static void create_eir(struct hci_dev *hdev, u8 *data)
629 name_len = strlen(hdev->dev_name);
635 ptr[1] = EIR_NAME_SHORT;
637 ptr[1] = EIR_NAME_COMPLETE;
639 /* EIR Data length */
640 ptr[0] = name_len + 1;
642 memcpy(ptr + 2, hdev->dev_name, name_len);
644 ptr += (name_len + 2);
647 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
649 ptr[1] = EIR_TX_POWER;
650 ptr[2] = (u8) hdev->inq_tx_power;
655 if (hdev->devid_source > 0) {
657 ptr[1] = EIR_DEVICE_ID;
659 put_unaligned_le16(hdev->devid_source, ptr + 2);
660 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
661 put_unaligned_le16(hdev->devid_product, ptr + 6);
662 put_unaligned_le16(hdev->devid_version, ptr + 8);
667 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
668 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
669 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
672 void __hci_req_update_eir(struct hci_request *req)
674 struct hci_dev *hdev = req->hdev;
675 struct hci_cp_write_eir cp;
677 if (!hdev_is_powered(hdev))
680 if (!lmp_ext_inq_capable(hdev))
683 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
686 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
689 memset(&cp, 0, sizeof(cp));
691 create_eir(hdev, cp.data);
693 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
696 memcpy(hdev->eir, cp.data, sizeof(cp.data));
698 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
701 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
703 struct hci_dev *hdev = req->hdev;
705 if (hdev->scanning_paused) {
706 bt_dev_dbg(hdev, "Scanning is paused for suspend");
711 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
713 if (use_ext_scan(hdev)) {
714 struct hci_cp_le_set_ext_scan_enable cp;
716 memset(&cp, 0, sizeof(cp));
717 cp.enable = LE_SCAN_DISABLE;
718 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
721 struct hci_cp_le_set_scan_enable cp;
723 memset(&cp, 0, sizeof(cp));
724 cp.enable = LE_SCAN_DISABLE;
725 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
728 /* Disable address resolution */
729 if (use_ll_privacy(hdev) &&
730 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
731 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
734 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
738 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
741 struct hci_cp_le_del_from_white_list cp;
743 cp.bdaddr_type = bdaddr_type;
744 bacpy(&cp.bdaddr, bdaddr);
746 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
748 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
750 if (use_ll_privacy(req->hdev) &&
751 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
754 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
756 struct hci_cp_le_del_from_resolv_list cp;
758 cp.bdaddr_type = bdaddr_type;
759 bacpy(&cp.bdaddr, bdaddr);
761 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
767 /* Adds connection to white list if needed. On error, returns -1. */
768 static int add_to_white_list(struct hci_request *req,
769 struct hci_conn_params *params, u8 *num_entries,
772 struct hci_cp_le_add_to_white_list cp;
773 struct hci_dev *hdev = req->hdev;
775 /* Already in white list */
776 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
780 /* Select filter policy to accept all advertising */
781 if (*num_entries >= hdev->le_white_list_size)
784 /* White list can not be used with RPAs */
786 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
787 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
791 /* During suspend, only wakeable devices can be in whitelist */
792 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
793 params->current_flags))
797 cp.bdaddr_type = params->addr_type;
798 bacpy(&cp.bdaddr, ¶ms->addr);
800 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
802 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
804 if (use_ll_privacy(hdev) &&
805 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
808 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
811 struct hci_cp_le_add_to_resolv_list cp;
813 cp.bdaddr_type = params->addr_type;
814 bacpy(&cp.bdaddr, ¶ms->addr);
815 memcpy(cp.peer_irk, irk->val, 16);
817 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
818 memcpy(cp.local_irk, hdev->irk, 16);
820 memset(cp.local_irk, 0, 16);
822 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
830 static u8 update_white_list(struct hci_request *req)
832 struct hci_dev *hdev = req->hdev;
833 struct hci_conn_params *params;
834 struct bdaddr_list *b;
836 bool pend_conn, pend_report;
837 /* We allow whitelisting even with RPAs in suspend. In the worst case,
838 * we won't be able to wake from devices that use the privacy1.2
839 * features. Additionally, once we support privacy1.2 and IRK
840 * offloading, we can update this to also check for those conditions.
842 bool allow_rpa = hdev->suspended;
844 /* Go through the current white list programmed into the
845 * controller one by one and check if that address is still
846 * in the list of pending connections or list of devices to
847 * report. If not present in either list, then queue the
848 * command to remove it from the controller.
850 list_for_each_entry(b, &hdev->le_white_list, list) {
851 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
854 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
858 /* If the device is not likely to connect or report,
859 * remove it from the whitelist.
861 if (!pend_conn && !pend_report) {
862 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
866 /* White list can not be used with RPAs */
868 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
869 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
876 /* Since all no longer valid white list entries have been
877 * removed, walk through the list of pending connections
878 * and ensure that any new device gets programmed into
881 * If the list of the devices is larger than the list of
882 * available white list entries in the controller, then
883 * just abort and return filer policy value to not use the
886 list_for_each_entry(params, &hdev->pend_le_conns, action) {
887 if (add_to_white_list(req, params, &num_entries, allow_rpa))
891 /* After adding all new pending connections, walk through
892 * the list of pending reports and also add these to the
893 * white list if there is still space. Abort if space runs out.
895 list_for_each_entry(params, &hdev->pend_le_reports, action) {
896 if (add_to_white_list(req, params, &num_entries, allow_rpa))
900 /* Use the allowlist unless the following conditions are all true:
901 * - We are not currently suspending
902 * - There are 1 or more ADV monitors registered
903 * - Interleaved scanning is not currently using the allowlist
905 * Once the controller offloading of advertisement monitor is in place,
906 * the above condition should include the support of MSFT extension
909 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
910 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
913 /* Select filter policy to use white list */
917 static bool scan_use_rpa(struct hci_dev *hdev)
919 return hci_dev_test_flag(hdev, HCI_PRIVACY);
922 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
923 u16 window, u8 own_addr_type, u8 filter_policy,
926 struct hci_dev *hdev = req->hdev;
928 if (hdev->scanning_paused) {
929 bt_dev_dbg(hdev, "Scanning is paused for suspend");
933 if (use_ll_privacy(hdev) &&
934 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
938 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
941 /* Use ext scanning if set ext scan param and ext scan enable is
944 if (use_ext_scan(hdev)) {
945 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
946 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
947 struct hci_cp_le_scan_phy_params *phy_params;
948 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
951 ext_param_cp = (void *)data;
952 phy_params = (void *)ext_param_cp->data;
954 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
955 ext_param_cp->own_addr_type = own_addr_type;
956 ext_param_cp->filter_policy = filter_policy;
958 plen = sizeof(*ext_param_cp);
960 if (scan_1m(hdev) || scan_2m(hdev)) {
961 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
963 memset(phy_params, 0, sizeof(*phy_params));
964 phy_params->type = type;
965 phy_params->interval = cpu_to_le16(interval);
966 phy_params->window = cpu_to_le16(window);
968 plen += sizeof(*phy_params);
972 if (scan_coded(hdev)) {
973 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
975 memset(phy_params, 0, sizeof(*phy_params));
976 phy_params->type = type;
977 phy_params->interval = cpu_to_le16(interval);
978 phy_params->window = cpu_to_le16(window);
980 plen += sizeof(*phy_params);
984 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
987 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
988 ext_enable_cp.enable = LE_SCAN_ENABLE;
989 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
991 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
992 sizeof(ext_enable_cp), &ext_enable_cp);
994 struct hci_cp_le_set_scan_param param_cp;
995 struct hci_cp_le_set_scan_enable enable_cp;
997 memset(¶m_cp, 0, sizeof(param_cp));
998 param_cp.type = type;
999 param_cp.interval = cpu_to_le16(interval);
1000 param_cp.window = cpu_to_le16(window);
1001 param_cp.own_address_type = own_addr_type;
1002 param_cp.filter_policy = filter_policy;
1003 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1006 memset(&enable_cp, 0, sizeof(enable_cp));
1007 enable_cp.enable = LE_SCAN_ENABLE;
1008 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1009 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1014 /* Returns true if an le connection is in the scanning state */
1015 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1017 struct hci_conn_hash *h = &hdev->conn_hash;
1022 list_for_each_entry_rcu(c, &h->list, list) {
1023 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1024 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1035 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1036 * controller based address resolution to be able to reconfigure
1039 void hci_req_add_le_passive_scan(struct hci_request *req)
1041 struct hci_dev *hdev = req->hdev;
1044 u16 window, interval;
1045 /* Background scanning should run with address resolution */
1046 bool addr_resolv = true;
1048 if (hdev->scanning_paused) {
1049 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1053 /* Set require_privacy to false since no SCAN_REQ are send
1054 * during passive scanning. Not using an non-resolvable address
1055 * here is important so that peer devices using direct
1056 * advertising with our address will be correctly reported
1057 * by the controller.
1059 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1063 if (hdev->enable_advmon_interleave_scan &&
1064 __hci_update_interleaved_scan(hdev))
1067 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1068 /* Adding or removing entries from the white list must
1069 * happen before enabling scanning. The controller does
1070 * not allow white list modification while scanning.
1072 filter_policy = update_white_list(req);
1074 /* When the controller is using random resolvable addresses and
1075 * with that having LE privacy enabled, then controllers with
1076 * Extended Scanner Filter Policies support can now enable support
1077 * for handling directed advertising.
1079 * So instead of using filter polices 0x00 (no whitelist)
1080 * and 0x01 (whitelist enabled) use the new filter policies
1081 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1083 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1084 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1085 filter_policy |= 0x02;
1087 if (hdev->suspended) {
1088 window = hdev->le_scan_window_suspend;
1089 interval = hdev->le_scan_int_suspend;
1090 } else if (hci_is_le_conn_scanning(hdev)) {
1091 window = hdev->le_scan_window_connect;
1092 interval = hdev->le_scan_int_connect;
1093 } else if (hci_is_adv_monitoring(hdev)) {
1094 window = hdev->le_scan_window_adv_monitor;
1095 interval = hdev->le_scan_int_adv_monitor;
1097 window = hdev->le_scan_window;
1098 interval = hdev->le_scan_interval;
1101 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1102 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1103 own_addr_type, filter_policy, addr_resolv);
1106 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1108 struct adv_info *adv_instance;
1110 /* Instance 0x00 always set local name */
1111 if (instance == 0x00)
1114 adv_instance = hci_find_adv_instance(hdev, instance);
1118 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1119 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1122 return adv_instance->scan_rsp_len ? true : false;
1125 static void hci_req_clear_event_filter(struct hci_request *req)
1127 struct hci_cp_set_event_filter f;
1129 memset(&f, 0, sizeof(f));
1130 f.flt_type = HCI_FLT_CLEAR_ALL;
1131 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1133 /* Update page scan state (since we may have modified it when setting
1134 * the event filter).
1136 __hci_req_update_scan(req);
1139 static void hci_req_set_event_filter(struct hci_request *req)
1141 struct bdaddr_list_with_flags *b;
1142 struct hci_cp_set_event_filter f;
1143 struct hci_dev *hdev = req->hdev;
1144 u8 scan = SCAN_DISABLED;
1146 /* Always clear event filter when starting */
1147 hci_req_clear_event_filter(req);
1149 list_for_each_entry(b, &hdev->whitelist, list) {
1150 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1154 memset(&f, 0, sizeof(f));
1155 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1156 f.flt_type = HCI_FLT_CONN_SETUP;
1157 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1158 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1160 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1161 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1166 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1168 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1170 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1173 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1175 /* Before changing params disable scan if enabled */
1176 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1177 hci_req_add_le_scan_disable(req, false);
1179 /* Configure params and enable scanning */
1180 hci_req_add_le_passive_scan(req);
1182 /* Block suspend notifier on response */
1183 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1186 static void cancel_adv_timeout(struct hci_dev *hdev)
1188 if (hdev->adv_instance_timeout) {
1189 hdev->adv_instance_timeout = 0;
1190 cancel_delayed_work(&hdev->adv_instance_expire);
1194 /* This function requires the caller holds hdev->lock */
1195 void __hci_req_pause_adv_instances(struct hci_request *req)
1197 bt_dev_dbg(req->hdev, "Pausing advertising instances");
1199 /* Call to disable any advertisements active on the controller.
1200 * This will succeed even if no advertisements are configured.
1202 __hci_req_disable_advertising(req);
1204 /* If we are using software rotation, pause the loop */
1205 if (!ext_adv_capable(req->hdev))
1206 cancel_adv_timeout(req->hdev);
1209 /* This function requires the caller holds hdev->lock */
1210 static void __hci_req_resume_adv_instances(struct hci_request *req)
1212 struct adv_info *adv;
1214 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1216 if (ext_adv_capable(req->hdev)) {
1217 /* Call for each tracked instance to be re-enabled */
1218 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1219 __hci_req_enable_ext_advertising(req,
1224 /* Schedule for most recent instance to be restarted and begin
1225 * the software rotation loop
1227 __hci_req_schedule_adv_instance(req,
1228 req->hdev->cur_adv_instance,
1233 /* This function requires the caller holds hdev->lock */
1234 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1236 struct hci_request req;
1238 hci_req_init(&req, hdev);
1239 __hci_req_resume_adv_instances(&req);
1241 return hci_req_run(&req, NULL);
1244 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1246 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1248 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1249 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1250 wake_up(&hdev->suspend_wait_q);
1254 /* Call with hci_dev_lock */
1255 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1258 struct hci_conn *conn;
1259 struct hci_request req;
1261 int disconnect_counter;
1263 if (next == hdev->suspend_state) {
1264 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1268 hdev->suspend_state = next;
1269 hci_req_init(&req, hdev);
1271 if (next == BT_SUSPEND_DISCONNECT) {
1272 /* Mark device as suspended */
1273 hdev->suspended = true;
1275 /* Pause discovery if not already stopped */
1276 old_state = hdev->discovery.state;
1277 if (old_state != DISCOVERY_STOPPED) {
1278 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1279 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1280 queue_work(hdev->req_workqueue, &hdev->discov_update);
1283 hdev->discovery_paused = true;
1284 hdev->discovery_old_state = old_state;
1286 /* Stop directed advertising */
1287 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1289 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1290 cancel_delayed_work(&hdev->discov_off);
1291 queue_delayed_work(hdev->req_workqueue,
1292 &hdev->discov_off, 0);
1295 /* Pause other advertisements */
1296 if (hdev->adv_instance_cnt)
1297 __hci_req_pause_adv_instances(&req);
1299 hdev->advertising_paused = true;
1300 hdev->advertising_old_state = old_state;
1301 /* Disable page scan */
1302 page_scan = SCAN_DISABLED;
1303 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1305 /* Disable LE passive scan if enabled */
1306 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1307 cancel_interleave_scan(hdev);
1308 hci_req_add_le_scan_disable(&req, false);
1311 /* Mark task needing completion */
1312 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1314 /* Prevent disconnects from causing scanning to be re-enabled */
1315 hdev->scanning_paused = true;
1317 /* Run commands before disconnecting */
1318 hci_req_run(&req, suspend_req_complete);
1320 disconnect_counter = 0;
1321 /* Soft disconnect everything (power off) */
1322 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1323 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1324 disconnect_counter++;
1327 if (disconnect_counter > 0) {
1329 "Had %d disconnects. Will wait on them",
1330 disconnect_counter);
1331 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1333 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1334 /* Unpause to take care of updating scanning params */
1335 hdev->scanning_paused = false;
1336 /* Enable event filter for paired devices */
1337 hci_req_set_event_filter(&req);
1338 /* Enable passive scan at lower duty cycle */
1339 hci_req_config_le_suspend_scan(&req);
1340 /* Pause scan changes again. */
1341 hdev->scanning_paused = true;
1342 hci_req_run(&req, suspend_req_complete);
1344 hdev->suspended = false;
1345 hdev->scanning_paused = false;
1347 hci_req_clear_event_filter(&req);
1348 /* Reset passive/background scanning to normal */
1349 hci_req_config_le_suspend_scan(&req);
1351 /* Unpause directed advertising */
1352 hdev->advertising_paused = false;
1353 if (hdev->advertising_old_state) {
1354 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1355 hdev->suspend_tasks);
1356 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1357 queue_work(hdev->req_workqueue,
1358 &hdev->discoverable_update);
1359 hdev->advertising_old_state = 0;
1362 /* Resume other advertisements */
1363 if (hdev->adv_instance_cnt)
1364 __hci_req_resume_adv_instances(&req);
1366 /* Unpause discovery */
1367 hdev->discovery_paused = false;
1368 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1369 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1370 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1371 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1372 queue_work(hdev->req_workqueue, &hdev->discov_update);
1375 hci_req_run(&req, suspend_req_complete);
1378 hdev->suspend_state = next;
1381 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1382 wake_up(&hdev->suspend_wait_q);
1385 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1387 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1390 void __hci_req_disable_advertising(struct hci_request *req)
1392 if (ext_adv_capable(req->hdev)) {
1393 __hci_req_disable_ext_adv_instance(req, 0x00);
1398 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1402 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1405 struct adv_info *adv_instance;
1407 if (instance == 0x00) {
1408 /* Instance 0 always manages the "Tx Power" and "Flags"
1411 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1413 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1414 * corresponds to the "connectable" instance flag.
1416 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1417 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1419 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1420 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1421 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1422 flags |= MGMT_ADV_FLAG_DISCOV;
1427 adv_instance = hci_find_adv_instance(hdev, instance);
1429 /* Return 0 when we got an invalid instance identifier. */
1433 return adv_instance->flags;
1436 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1438 /* If privacy is not enabled don't use RPA */
1439 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1442 /* If basic privacy mode is enabled use RPA */
1443 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1446 /* If limited privacy mode is enabled don't use RPA if we're
1447 * both discoverable and bondable.
1449 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1450 hci_dev_test_flag(hdev, HCI_BONDABLE))
1453 /* We're neither bondable nor discoverable in the limited
1454 * privacy mode, therefore use RPA.
1459 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1461 /* If there is no connection we are OK to advertise. */
1462 if (hci_conn_num(hdev, LE_LINK) == 0)
1465 /* Check le_states if there is any connection in slave role. */
1466 if (hdev->conn_hash.le_num_slave > 0) {
1467 /* Slave connection state and non connectable mode bit 20. */
1468 if (!connectable && !(hdev->le_states[2] & 0x10))
1471 /* Slave connection state and connectable mode bit 38
1472 * and scannable bit 21.
1474 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1475 !(hdev->le_states[2] & 0x20)))
1479 /* Check le_states if there is any connection in master role. */
1480 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1481 /* Master connection state and non connectable mode bit 18. */
1482 if (!connectable && !(hdev->le_states[2] & 0x02))
1485 /* Master connection state and connectable mode bit 35 and
1488 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1489 !(hdev->le_states[2] & 0x08)))
1496 void __hci_req_enable_advertising(struct hci_request *req)
1498 struct hci_dev *hdev = req->hdev;
1499 struct adv_info *adv_instance;
1500 struct hci_cp_le_set_adv_param cp;
1501 u8 own_addr_type, enable = 0x01;
1503 u16 adv_min_interval, adv_max_interval;
1506 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1507 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1509 /* If the "connectable" instance flag was not set, then choose between
1510 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1512 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1513 mgmt_get_connectable(hdev);
1515 if (!is_advertising_allowed(hdev, connectable))
1518 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1519 __hci_req_disable_advertising(req);
1521 /* Clear the HCI_LE_ADV bit temporarily so that the
1522 * hci_update_random_address knows that it's safe to go ahead
1523 * and write a new random address. The flag will be set back on
1524 * as soon as the SET_ADV_ENABLE HCI command completes.
1526 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1528 /* Set require_privacy to true only when non-connectable
1529 * advertising is used. In that case it is fine to use a
1530 * non-resolvable private address.
1532 if (hci_update_random_address(req, !connectable,
1533 adv_use_rpa(hdev, flags),
1534 &own_addr_type) < 0)
1537 memset(&cp, 0, sizeof(cp));
1540 adv_min_interval = adv_instance->min_interval;
1541 adv_max_interval = adv_instance->max_interval;
1543 adv_min_interval = hdev->le_adv_min_interval;
1544 adv_max_interval = hdev->le_adv_max_interval;
1548 cp.type = LE_ADV_IND;
1550 if (adv_cur_instance_is_scannable(hdev))
1551 cp.type = LE_ADV_SCAN_IND;
1553 cp.type = LE_ADV_NONCONN_IND;
1555 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1556 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1557 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1558 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1562 cp.min_interval = cpu_to_le16(adv_min_interval);
1563 cp.max_interval = cpu_to_le16(adv_max_interval);
1564 cp.own_address_type = own_addr_type;
1565 cp.channel_map = hdev->le_adv_channel_map;
1567 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1569 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1572 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1575 size_t complete_len;
1577 /* no space left for name (+ NULL + type + len) */
1578 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1581 /* use complete name if present and fits */
1582 complete_len = strlen(hdev->dev_name);
1583 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1584 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1585 hdev->dev_name, complete_len + 1);
1587 /* use short name if present */
1588 short_len = strlen(hdev->short_name);
1590 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1591 hdev->short_name, short_len + 1);
1593 /* use shortened full name if present, we already know that name
1594 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1597 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1599 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1600 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1602 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1609 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1611 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1614 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1616 u8 scan_rsp_len = 0;
1618 if (hdev->appearance) {
1619 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1622 return append_local_name(hdev, ptr, scan_rsp_len);
1625 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1628 struct adv_info *adv_instance;
1630 u8 scan_rsp_len = 0;
1632 adv_instance = hci_find_adv_instance(hdev, instance);
1636 instance_flags = adv_instance->flags;
1638 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1639 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1642 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1643 adv_instance->scan_rsp_len);
1645 scan_rsp_len += adv_instance->scan_rsp_len;
1647 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1648 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1650 return scan_rsp_len;
1653 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1655 struct hci_dev *hdev = req->hdev;
1658 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1661 if (ext_adv_capable(hdev)) {
1662 struct hci_cp_le_set_ext_scan_rsp_data cp;
1664 memset(&cp, 0, sizeof(cp));
1667 len = create_instance_scan_rsp_data(hdev, instance,
1670 len = create_default_scan_rsp_data(hdev, cp.data);
1672 if (hdev->scan_rsp_data_len == len &&
1673 !memcmp(cp.data, hdev->scan_rsp_data, len))
1676 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1677 hdev->scan_rsp_data_len = len;
1679 cp.handle = instance;
1681 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1682 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1684 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1687 struct hci_cp_le_set_scan_rsp_data cp;
1689 memset(&cp, 0, sizeof(cp));
1692 len = create_instance_scan_rsp_data(hdev, instance,
1695 len = create_default_scan_rsp_data(hdev, cp.data);
1697 if (hdev->scan_rsp_data_len == len &&
1698 !memcmp(cp.data, hdev->scan_rsp_data, len))
1701 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1702 hdev->scan_rsp_data_len = len;
1706 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1710 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1712 struct adv_info *adv_instance = NULL;
1713 u8 ad_len = 0, flags = 0;
1716 /* Return 0 when the current instance identifier is invalid. */
1718 adv_instance = hci_find_adv_instance(hdev, instance);
1723 instance_flags = get_adv_instance_flags(hdev, instance);
1725 /* If instance already has the flags set skip adding it once
1728 if (adv_instance && eir_get_data(adv_instance->adv_data,
1729 adv_instance->adv_data_len, EIR_FLAGS,
1733 /* The Add Advertising command allows userspace to set both the general
1734 * and limited discoverable flags.
1736 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1737 flags |= LE_AD_GENERAL;
1739 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1740 flags |= LE_AD_LIMITED;
1742 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 flags |= LE_AD_NO_BREDR;
1745 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1746 /* If a discovery flag wasn't provided, simply use the global
1750 flags |= mgmt_get_adv_discov_flags(hdev);
1752 /* If flags would still be empty, then there is no need to
1753 * include the "Flags" AD field".
1767 memcpy(ptr, adv_instance->adv_data,
1768 adv_instance->adv_data_len);
1769 ad_len += adv_instance->adv_data_len;
1770 ptr += adv_instance->adv_data_len;
1773 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1776 if (ext_adv_capable(hdev)) {
1778 adv_tx_power = adv_instance->tx_power;
1780 adv_tx_power = hdev->adv_tx_power;
1782 adv_tx_power = hdev->adv_tx_power;
1785 /* Provide Tx Power only if we can provide a valid value for it */
1786 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1788 ptr[1] = EIR_TX_POWER;
1789 ptr[2] = (u8)adv_tx_power;
1799 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1801 struct hci_dev *hdev = req->hdev;
1804 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1807 if (ext_adv_capable(hdev)) {
1808 struct hci_cp_le_set_ext_adv_data cp;
1810 memset(&cp, 0, sizeof(cp));
1812 len = create_instance_adv_data(hdev, instance, cp.data);
1814 /* There's nothing to do if the data hasn't changed */
1815 if (hdev->adv_data_len == len &&
1816 memcmp(cp.data, hdev->adv_data, len) == 0)
1819 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1820 hdev->adv_data_len = len;
1823 cp.handle = instance;
1824 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1825 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1827 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1829 struct hci_cp_le_set_adv_data cp;
1831 memset(&cp, 0, sizeof(cp));
1833 len = create_instance_adv_data(hdev, instance, cp.data);
1835 /* There's nothing to do if the data hasn't changed */
1836 if (hdev->adv_data_len == len &&
1837 memcmp(cp.data, hdev->adv_data, len) == 0)
1840 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1841 hdev->adv_data_len = len;
1845 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1849 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1851 struct hci_request req;
1853 hci_req_init(&req, hdev);
1854 __hci_req_update_adv_data(&req, instance);
1856 return hci_req_run(&req, NULL);
1859 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1862 BT_DBG("%s status %u", hdev->name, status);
1865 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1867 struct hci_request req;
1870 if (!use_ll_privacy(hdev) &&
1871 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1874 hci_req_init(&req, hdev);
1876 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1878 hci_req_run(&req, enable_addr_resolution_complete);
1881 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1883 bt_dev_dbg(hdev, "status %u", status);
1886 void hci_req_reenable_advertising(struct hci_dev *hdev)
1888 struct hci_request req;
1890 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1891 list_empty(&hdev->adv_instances))
1894 hci_req_init(&req, hdev);
1896 if (hdev->cur_adv_instance) {
1897 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1900 if (ext_adv_capable(hdev)) {
1901 __hci_req_start_ext_adv(&req, 0x00);
1903 __hci_req_update_adv_data(&req, 0x00);
1904 __hci_req_update_scan_rsp_data(&req, 0x00);
1905 __hci_req_enable_advertising(&req);
1909 hci_req_run(&req, adv_enable_complete);
1912 static void adv_timeout_expire(struct work_struct *work)
1914 struct hci_dev *hdev = container_of(work, struct hci_dev,
1915 adv_instance_expire.work);
1917 struct hci_request req;
1920 bt_dev_dbg(hdev, "");
1924 hdev->adv_instance_timeout = 0;
1926 instance = hdev->cur_adv_instance;
1927 if (instance == 0x00)
1930 hci_req_init(&req, hdev);
1932 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1934 if (list_empty(&hdev->adv_instances))
1935 __hci_req_disable_advertising(&req);
1937 hci_req_run(&req, NULL);
1940 hci_dev_unlock(hdev);
1943 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1946 struct hci_dev *hdev = req->hdev;
1951 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1952 hci_req_add_le_scan_disable(req, false);
1953 hci_req_add_le_passive_scan(req);
1955 switch (hdev->interleave_scan_state) {
1956 case INTERLEAVE_SCAN_ALLOWLIST:
1957 bt_dev_dbg(hdev, "next state: allowlist");
1958 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1960 case INTERLEAVE_SCAN_NO_FILTER:
1961 bt_dev_dbg(hdev, "next state: no filter");
1962 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1964 case INTERLEAVE_SCAN_NONE:
1965 BT_ERR("unexpected error");
1969 hci_dev_unlock(hdev);
1974 static void interleave_scan_work(struct work_struct *work)
1976 struct hci_dev *hdev = container_of(work, struct hci_dev,
1977 interleave_scan.work);
1979 unsigned long timeout;
1981 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1982 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1983 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1984 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1986 bt_dev_err(hdev, "unexpected error");
1990 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1991 HCI_CMD_TIMEOUT, &status);
1993 /* Don't continue interleaving if it was canceled */
1994 if (is_interleave_scanning(hdev))
1995 queue_delayed_work(hdev->req_workqueue,
1996 &hdev->interleave_scan, timeout);
1999 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2000 bool use_rpa, struct adv_info *adv_instance,
2001 u8 *own_addr_type, bdaddr_t *rand_addr)
2005 bacpy(rand_addr, BDADDR_ANY);
2007 /* If privacy is enabled use a resolvable private address. If
2008 * current RPA has expired then generate a new one.
2013 /* If Controller supports LL Privacy use own address type is
2016 if (use_ll_privacy(hdev))
2017 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2019 *own_addr_type = ADDR_LE_DEV_RANDOM;
2022 if (!adv_instance->rpa_expired &&
2023 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2026 adv_instance->rpa_expired = false;
2028 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2029 !bacmp(&hdev->random_addr, &hdev->rpa))
2033 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2035 bt_dev_err(hdev, "failed to generate new RPA");
2039 bacpy(rand_addr, &hdev->rpa);
2041 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2043 queue_delayed_work(hdev->workqueue,
2044 &adv_instance->rpa_expired_cb, to);
2046 queue_delayed_work(hdev->workqueue,
2047 &hdev->rpa_expired, to);
2052 /* In case of required privacy without resolvable private address,
2053 * use an non-resolvable private address. This is useful for
2054 * non-connectable advertising.
2056 if (require_privacy) {
2060 /* The non-resolvable private address is generated
2061 * from random six bytes with the two most significant
2064 get_random_bytes(&nrpa, 6);
2067 /* The non-resolvable private address shall not be
2068 * equal to the public address.
2070 if (bacmp(&hdev->bdaddr, &nrpa))
2074 *own_addr_type = ADDR_LE_DEV_RANDOM;
2075 bacpy(rand_addr, &nrpa);
2080 /* No privacy so use a public address. */
2081 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2086 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2088 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2091 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2093 struct hci_cp_le_set_ext_adv_params cp;
2094 struct hci_dev *hdev = req->hdev;
2097 bdaddr_t random_addr;
2100 struct adv_info *adv_instance;
2104 adv_instance = hci_find_adv_instance(hdev, instance);
2108 adv_instance = NULL;
2111 flags = get_adv_instance_flags(hdev, instance);
2113 /* If the "connectable" instance flag was not set, then choose between
2114 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2116 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2117 mgmt_get_connectable(hdev);
2119 if (!is_advertising_allowed(hdev, connectable))
2122 /* Set require_privacy to true only when non-connectable
2123 * advertising is used. In that case it is fine to use a
2124 * non-resolvable private address.
2126 err = hci_get_random_address(hdev, !connectable,
2127 adv_use_rpa(hdev, flags), adv_instance,
2128 &own_addr_type, &random_addr);
2132 memset(&cp, 0, sizeof(cp));
2135 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2136 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2137 cp.tx_power = adv_instance->tx_power;
2139 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2140 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2141 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2144 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2148 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2150 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2151 } else if (adv_instance_is_scannable(hdev, instance)) {
2153 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2155 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2158 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2160 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2163 cp.own_addr_type = own_addr_type;
2164 cp.channel_map = hdev->le_adv_channel_map;
2165 cp.handle = instance;
2167 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2168 cp.primary_phy = HCI_ADV_PHY_1M;
2169 cp.secondary_phy = HCI_ADV_PHY_2M;
2170 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2171 cp.primary_phy = HCI_ADV_PHY_CODED;
2172 cp.secondary_phy = HCI_ADV_PHY_CODED;
2174 /* In all other cases use 1M */
2175 cp.primary_phy = HCI_ADV_PHY_1M;
2176 cp.secondary_phy = HCI_ADV_PHY_1M;
2179 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2181 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2182 bacmp(&random_addr, BDADDR_ANY)) {
2183 struct hci_cp_le_set_adv_set_rand_addr cp;
2185 /* Check if random address need to be updated */
2187 if (!bacmp(&random_addr, &adv_instance->random_addr))
2190 if (!bacmp(&random_addr, &hdev->random_addr))
2194 memset(&cp, 0, sizeof(cp));
2196 cp.handle = instance;
2197 bacpy(&cp.bdaddr, &random_addr);
2200 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2207 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2209 struct hci_dev *hdev = req->hdev;
2210 struct hci_cp_le_set_ext_adv_enable *cp;
2211 struct hci_cp_ext_adv_set *adv_set;
2212 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2213 struct adv_info *adv_instance;
2216 adv_instance = hci_find_adv_instance(hdev, instance);
2220 adv_instance = NULL;
2224 adv_set = (void *) cp->data;
2226 memset(cp, 0, sizeof(*cp));
2229 cp->num_of_sets = 0x01;
2231 memset(adv_set, 0, sizeof(*adv_set));
2233 adv_set->handle = instance;
2235 /* Set duration per instance since controller is responsible for
2238 if (adv_instance && adv_instance->duration) {
2239 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2241 /* Time = N * 10 ms */
2242 adv_set->duration = cpu_to_le16(duration / 10);
2245 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2246 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2252 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2254 struct hci_dev *hdev = req->hdev;
2255 struct hci_cp_le_set_ext_adv_enable *cp;
2256 struct hci_cp_ext_adv_set *adv_set;
2257 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2260 /* If request specifies an instance that doesn't exist, fail */
2261 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2264 memset(data, 0, sizeof(data));
2267 adv_set = (void *)cp->data;
2269 /* Instance 0x00 indicates all advertising instances will be disabled */
2270 cp->num_of_sets = !!instance;
2273 adv_set->handle = instance;
2275 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2276 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2281 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2283 struct hci_dev *hdev = req->hdev;
2285 /* If request specifies an instance that doesn't exist, fail */
2286 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2289 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2294 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2296 struct hci_dev *hdev = req->hdev;
2297 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2300 /* If instance isn't pending, the chip knows about it, and it's safe to
2303 if (adv_instance && !adv_instance->pending)
2304 __hci_req_disable_ext_adv_instance(req, instance);
2306 err = __hci_req_setup_ext_adv_instance(req, instance);
2310 __hci_req_update_scan_rsp_data(req, instance);
2311 __hci_req_enable_ext_advertising(req, instance);
2316 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2319 struct hci_dev *hdev = req->hdev;
2320 struct adv_info *adv_instance = NULL;
2323 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2324 list_empty(&hdev->adv_instances))
2327 if (hdev->adv_instance_timeout)
2330 adv_instance = hci_find_adv_instance(hdev, instance);
2334 /* A zero timeout means unlimited advertising. As long as there is
2335 * only one instance, duration should be ignored. We still set a timeout
2336 * in case further instances are being added later on.
2338 * If the remaining lifetime of the instance is more than the duration
2339 * then the timeout corresponds to the duration, otherwise it will be
2340 * reduced to the remaining instance lifetime.
2342 if (adv_instance->timeout == 0 ||
2343 adv_instance->duration <= adv_instance->remaining_time)
2344 timeout = adv_instance->duration;
2346 timeout = adv_instance->remaining_time;
2348 /* The remaining time is being reduced unless the instance is being
2349 * advertised without time limit.
2351 if (adv_instance->timeout)
2352 adv_instance->remaining_time =
2353 adv_instance->remaining_time - timeout;
2355 /* Only use work for scheduling instances with legacy advertising */
2356 if (!ext_adv_capable(hdev)) {
2357 hdev->adv_instance_timeout = timeout;
2358 queue_delayed_work(hdev->req_workqueue,
2359 &hdev->adv_instance_expire,
2360 msecs_to_jiffies(timeout * 1000));
2363 /* If we're just re-scheduling the same instance again then do not
2364 * execute any HCI commands. This happens when a single instance is
2367 if (!force && hdev->cur_adv_instance == instance &&
2368 hci_dev_test_flag(hdev, HCI_LE_ADV))
2371 hdev->cur_adv_instance = instance;
2372 if (ext_adv_capable(hdev)) {
2373 __hci_req_start_ext_adv(req, instance);
2375 __hci_req_update_adv_data(req, instance);
2376 __hci_req_update_scan_rsp_data(req, instance);
2377 __hci_req_enable_advertising(req);
2383 /* For a single instance:
2384 * - force == true: The instance will be removed even when its remaining
2385 * lifetime is not zero.
2386 * - force == false: the instance will be deactivated but kept stored unless
2387 * the remaining lifetime is zero.
2389 * For instance == 0x00:
2390 * - force == true: All instances will be removed regardless of their timeout
2392 * - force == false: Only instances that have a timeout will be removed.
2394 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2395 struct hci_request *req, u8 instance,
2398 struct adv_info *adv_instance, *n, *next_instance = NULL;
2402 /* Cancel any timeout concerning the removed instance(s). */
2403 if (!instance || hdev->cur_adv_instance == instance)
2404 cancel_adv_timeout(hdev);
2406 /* Get the next instance to advertise BEFORE we remove
2407 * the current one. This can be the same instance again
2408 * if there is only one instance.
2410 if (instance && hdev->cur_adv_instance == instance)
2411 next_instance = hci_get_next_instance(hdev, instance);
2413 if (instance == 0x00) {
2414 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2416 if (!(force || adv_instance->timeout))
2419 rem_inst = adv_instance->instance;
2420 err = hci_remove_adv_instance(hdev, rem_inst);
2422 mgmt_advertising_removed(sk, hdev, rem_inst);
2425 adv_instance = hci_find_adv_instance(hdev, instance);
2427 if (force || (adv_instance && adv_instance->timeout &&
2428 !adv_instance->remaining_time)) {
2429 /* Don't advertise a removed instance. */
2430 if (next_instance &&
2431 next_instance->instance == instance)
2432 next_instance = NULL;
2434 err = hci_remove_adv_instance(hdev, instance);
2436 mgmt_advertising_removed(sk, hdev, instance);
2440 if (!req || !hdev_is_powered(hdev) ||
2441 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2444 if (next_instance && !ext_adv_capable(hdev))
2445 __hci_req_schedule_adv_instance(req, next_instance->instance,
2449 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2451 struct hci_dev *hdev = req->hdev;
2453 /* If we're advertising or initiating an LE connection we can't
2454 * go ahead and change the random address at this time. This is
2455 * because the eventual initiator address used for the
2456 * subsequently created connection will be undefined (some
2457 * controllers use the new address and others the one we had
2458 * when the operation started).
2460 * In this kind of scenario skip the update and let the random
2461 * address be updated at the next cycle.
2463 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2464 hci_lookup_le_connect(hdev)) {
2465 bt_dev_dbg(hdev, "Deferring random address update");
2466 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2470 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2473 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2474 bool use_rpa, u8 *own_addr_type)
2476 struct hci_dev *hdev = req->hdev;
2479 /* If privacy is enabled use a resolvable private address. If
2480 * current RPA has expired or there is something else than
2481 * the current RPA in use, then generate a new one.
2486 /* If Controller supports LL Privacy use own address type is
2489 if (use_ll_privacy(hdev))
2490 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2492 *own_addr_type = ADDR_LE_DEV_RANDOM;
2494 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2495 !bacmp(&hdev->random_addr, &hdev->rpa))
2498 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2500 bt_dev_err(hdev, "failed to generate new RPA");
2504 set_random_addr(req, &hdev->rpa);
2506 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2507 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2512 /* In case of required privacy without resolvable private address,
2513 * use an non-resolvable private address. This is useful for active
2514 * scanning and non-connectable advertising.
2516 if (require_privacy) {
2520 /* The non-resolvable private address is generated
2521 * from random six bytes with the two most significant
2524 get_random_bytes(&nrpa, 6);
2527 /* The non-resolvable private address shall not be
2528 * equal to the public address.
2530 if (bacmp(&hdev->bdaddr, &nrpa))
2534 *own_addr_type = ADDR_LE_DEV_RANDOM;
2535 set_random_addr(req, &nrpa);
2539 /* If forcing static address is in use or there is no public
2540 * address use the static address as random address (but skip
2541 * the HCI command if the current random address is already the
2544 * In case BR/EDR has been disabled on a dual-mode controller
2545 * and a static address has been configured, then use that
2546 * address instead of the public BR/EDR address.
2548 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2549 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2550 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2551 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2552 *own_addr_type = ADDR_LE_DEV_RANDOM;
2553 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2554 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2555 &hdev->static_addr);
2559 /* Neither privacy nor static address is being used so use a
2562 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2567 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2569 struct bdaddr_list *b;
2571 list_for_each_entry(b, &hdev->whitelist, list) {
2572 struct hci_conn *conn;
2574 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2578 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2585 void __hci_req_update_scan(struct hci_request *req)
2587 struct hci_dev *hdev = req->hdev;
2590 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2593 if (!hdev_is_powered(hdev))
2596 if (mgmt_powering_down(hdev))
2599 if (hdev->scanning_paused)
2602 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2603 disconnected_whitelist_entries(hdev))
2606 scan = SCAN_DISABLED;
2608 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2609 scan |= SCAN_INQUIRY;
2611 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2612 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2615 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2618 static int update_scan(struct hci_request *req, unsigned long opt)
2620 hci_dev_lock(req->hdev);
2621 __hci_req_update_scan(req);
2622 hci_dev_unlock(req->hdev);
2626 static void scan_update_work(struct work_struct *work)
2628 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2630 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2633 static int connectable_update(struct hci_request *req, unsigned long opt)
2635 struct hci_dev *hdev = req->hdev;
2639 __hci_req_update_scan(req);
2641 /* If BR/EDR is not enabled and we disable advertising as a
2642 * by-product of disabling connectable, we need to update the
2643 * advertising flags.
2645 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2646 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2648 /* Update the advertising parameters if necessary */
2649 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2650 !list_empty(&hdev->adv_instances)) {
2651 if (ext_adv_capable(hdev))
2652 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2654 __hci_req_enable_advertising(req);
2657 __hci_update_background_scan(req);
2659 hci_dev_unlock(hdev);
2664 static void connectable_update_work(struct work_struct *work)
2666 struct hci_dev *hdev = container_of(work, struct hci_dev,
2667 connectable_update);
2670 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2671 mgmt_set_connectable_complete(hdev, status);
2674 static u8 get_service_classes(struct hci_dev *hdev)
2676 struct bt_uuid *uuid;
2679 list_for_each_entry(uuid, &hdev->uuids, list)
2680 val |= uuid->svc_hint;
2685 void __hci_req_update_class(struct hci_request *req)
2687 struct hci_dev *hdev = req->hdev;
2690 bt_dev_dbg(hdev, "");
2692 if (!hdev_is_powered(hdev))
2695 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2698 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2701 cod[0] = hdev->minor_class;
2702 cod[1] = hdev->major_class;
2703 cod[2] = get_service_classes(hdev);
2705 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2708 if (memcmp(cod, hdev->dev_class, 3) == 0)
2711 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2714 static void write_iac(struct hci_request *req)
2716 struct hci_dev *hdev = req->hdev;
2717 struct hci_cp_write_current_iac_lap cp;
2719 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2722 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2723 /* Limited discoverable mode */
2724 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2725 cp.iac_lap[0] = 0x00; /* LIAC */
2726 cp.iac_lap[1] = 0x8b;
2727 cp.iac_lap[2] = 0x9e;
2728 cp.iac_lap[3] = 0x33; /* GIAC */
2729 cp.iac_lap[4] = 0x8b;
2730 cp.iac_lap[5] = 0x9e;
2732 /* General discoverable mode */
2734 cp.iac_lap[0] = 0x33; /* GIAC */
2735 cp.iac_lap[1] = 0x8b;
2736 cp.iac_lap[2] = 0x9e;
2739 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2740 (cp.num_iac * 3) + 1, &cp);
2743 static int discoverable_update(struct hci_request *req, unsigned long opt)
2745 struct hci_dev *hdev = req->hdev;
2749 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2751 __hci_req_update_scan(req);
2752 __hci_req_update_class(req);
2755 /* Advertising instances don't use the global discoverable setting, so
2756 * only update AD if advertising was enabled using Set Advertising.
2758 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2759 __hci_req_update_adv_data(req, 0x00);
2761 /* Discoverable mode affects the local advertising
2762 * address in limited privacy mode.
2764 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2765 if (ext_adv_capable(hdev))
2766 __hci_req_start_ext_adv(req, 0x00);
2768 __hci_req_enable_advertising(req);
2772 hci_dev_unlock(hdev);
2777 static void discoverable_update_work(struct work_struct *work)
2779 struct hci_dev *hdev = container_of(work, struct hci_dev,
2780 discoverable_update);
2783 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2784 mgmt_set_discoverable_complete(hdev, status);
2787 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2790 switch (conn->state) {
2793 if (conn->type == AMP_LINK) {
2794 struct hci_cp_disconn_phy_link cp;
2796 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2798 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2801 struct hci_cp_disconnect dc;
2803 dc.handle = cpu_to_le16(conn->handle);
2805 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2808 conn->state = BT_DISCONN;
2812 if (conn->type == LE_LINK) {
2813 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2815 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2817 } else if (conn->type == ACL_LINK) {
2818 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2820 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2825 if (conn->type == ACL_LINK) {
2826 struct hci_cp_reject_conn_req rej;
2828 bacpy(&rej.bdaddr, &conn->dst);
2829 rej.reason = reason;
2831 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2833 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2834 struct hci_cp_reject_sync_conn_req rej;
2836 bacpy(&rej.bdaddr, &conn->dst);
2838 /* SCO rejection has its own limited set of
2839 * allowed error values (0x0D-0x0F) which isn't
2840 * compatible with most values passed to this
2841 * function. To be safe hard-code one of the
2842 * values that's suitable for SCO.
2844 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2846 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2851 conn->state = BT_CLOSED;
2856 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2859 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2862 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2864 struct hci_request req;
2867 hci_req_init(&req, conn->hdev);
2869 __hci_abort_conn(&req, conn, reason);
2871 err = hci_req_run(&req, abort_conn_complete);
2872 if (err && err != -ENODATA) {
2873 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2880 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2882 hci_dev_lock(req->hdev);
2883 __hci_update_background_scan(req);
2884 hci_dev_unlock(req->hdev);
2888 static void bg_scan_update(struct work_struct *work)
2890 struct hci_dev *hdev = container_of(work, struct hci_dev,
2892 struct hci_conn *conn;
2896 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2902 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2904 hci_le_conn_failed(conn, status);
2906 hci_dev_unlock(hdev);
2909 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2911 hci_req_add_le_scan_disable(req, false);
2915 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2918 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2919 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2920 struct hci_cp_inquiry cp;
2922 bt_dev_dbg(req->hdev, "");
2924 hci_dev_lock(req->hdev);
2925 hci_inquiry_cache_flush(req->hdev);
2926 hci_dev_unlock(req->hdev);
2928 memset(&cp, 0, sizeof(cp));
2930 if (req->hdev->discovery.limited)
2931 memcpy(&cp.lap, liac, sizeof(cp.lap));
2933 memcpy(&cp.lap, giac, sizeof(cp.lap));
2937 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2942 static void le_scan_disable_work(struct work_struct *work)
2944 struct hci_dev *hdev = container_of(work, struct hci_dev,
2945 le_scan_disable.work);
2948 bt_dev_dbg(hdev, "");
2950 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2953 cancel_delayed_work(&hdev->le_scan_restart);
2955 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2957 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2962 hdev->discovery.scan_start = 0;
2964 /* If we were running LE only scan, change discovery state. If
2965 * we were running both LE and BR/EDR inquiry simultaneously,
2966 * and BR/EDR inquiry is already finished, stop discovery,
2967 * otherwise BR/EDR inquiry will stop discovery when finished.
2968 * If we will resolve remote device name, do not change
2972 if (hdev->discovery.type == DISCOV_TYPE_LE)
2973 goto discov_stopped;
2975 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2978 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2979 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2980 hdev->discovery.state != DISCOVERY_RESOLVING)
2981 goto discov_stopped;
2986 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2987 HCI_CMD_TIMEOUT, &status);
2989 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2990 goto discov_stopped;
2997 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2998 hci_dev_unlock(hdev);
3001 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3003 struct hci_dev *hdev = req->hdev;
3005 /* If controller is not scanning we are done. */
3006 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3009 if (hdev->scanning_paused) {
3010 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3014 hci_req_add_le_scan_disable(req, false);
3016 if (use_ext_scan(hdev)) {
3017 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3019 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3020 ext_enable_cp.enable = LE_SCAN_ENABLE;
3021 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3023 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3024 sizeof(ext_enable_cp), &ext_enable_cp);
3026 struct hci_cp_le_set_scan_enable cp;
3028 memset(&cp, 0, sizeof(cp));
3029 cp.enable = LE_SCAN_ENABLE;
3030 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3031 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3037 static void le_scan_restart_work(struct work_struct *work)
3039 struct hci_dev *hdev = container_of(work, struct hci_dev,
3040 le_scan_restart.work);
3041 unsigned long timeout, duration, scan_start, now;
3044 bt_dev_dbg(hdev, "");
3046 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3048 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3055 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3056 !hdev->discovery.scan_start)
3059 /* When the scan was started, hdev->le_scan_disable has been queued
3060 * after duration from scan_start. During scan restart this job
3061 * has been canceled, and we need to queue it again after proper
3062 * timeout, to make sure that scan does not run indefinitely.
3064 duration = hdev->discovery.scan_duration;
3065 scan_start = hdev->discovery.scan_start;
3067 if (now - scan_start <= duration) {
3070 if (now >= scan_start)
3071 elapsed = now - scan_start;
3073 elapsed = ULONG_MAX - scan_start + now;
3075 timeout = duration - elapsed;
3080 queue_delayed_work(hdev->req_workqueue,
3081 &hdev->le_scan_disable, timeout);
3084 hci_dev_unlock(hdev);
3087 static int active_scan(struct hci_request *req, unsigned long opt)
3089 uint16_t interval = opt;
3090 struct hci_dev *hdev = req->hdev;
3092 /* White list is not used for discovery */
3093 u8 filter_policy = 0x00;
3094 /* Discovery doesn't require controller address resolution */
3095 bool addr_resolv = false;
3098 bt_dev_dbg(hdev, "");
3100 /* If controller is scanning, it means the background scanning is
3101 * running. Thus, we should temporarily stop it in order to set the
3102 * discovery scanning parameters.
3104 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3105 hci_req_add_le_scan_disable(req, false);
3106 cancel_interleave_scan(hdev);
3109 /* All active scans will be done with either a resolvable private
3110 * address (when privacy feature has been enabled) or non-resolvable
3113 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3116 own_addr_type = ADDR_LE_DEV_PUBLIC;
3118 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3119 hdev->le_scan_window_discovery, own_addr_type,
3120 filter_policy, addr_resolv);
3124 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3128 bt_dev_dbg(req->hdev, "");
3130 err = active_scan(req, opt);
3134 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3137 static void start_discovery(struct hci_dev *hdev, u8 *status)
3139 unsigned long timeout;
3141 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3143 switch (hdev->discovery.type) {
3144 case DISCOV_TYPE_BREDR:
3145 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3146 hci_req_sync(hdev, bredr_inquiry,
3147 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3150 case DISCOV_TYPE_INTERLEAVED:
3151 /* When running simultaneous discovery, the LE scanning time
3152 * should occupy the whole discovery time sine BR/EDR inquiry
3153 * and LE scanning are scheduled by the controller.
3155 * For interleaving discovery in comparison, BR/EDR inquiry
3156 * and LE scanning are done sequentially with separate
3159 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3161 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3162 /* During simultaneous discovery, we double LE scan
3163 * interval. We must leave some time for the controller
3164 * to do BR/EDR inquiry.
3166 hci_req_sync(hdev, interleaved_discov,
3167 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3172 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3173 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3174 HCI_CMD_TIMEOUT, status);
3176 case DISCOV_TYPE_LE:
3177 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3178 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3179 HCI_CMD_TIMEOUT, status);
3182 *status = HCI_ERROR_UNSPECIFIED;
3189 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3191 /* When service discovery is used and the controller has a
3192 * strict duplicate filter, it is important to remember the
3193 * start and duration of the scan. This is required for
3194 * restarting scanning during the discovery phase.
3196 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3197 hdev->discovery.result_filtering) {
3198 hdev->discovery.scan_start = jiffies;
3199 hdev->discovery.scan_duration = timeout;
3202 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3206 bool hci_req_stop_discovery(struct hci_request *req)
3208 struct hci_dev *hdev = req->hdev;
3209 struct discovery_state *d = &hdev->discovery;
3210 struct hci_cp_remote_name_req_cancel cp;
3211 struct inquiry_entry *e;
3214 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3216 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3217 if (test_bit(HCI_INQUIRY, &hdev->flags))
3218 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3220 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3221 cancel_delayed_work(&hdev->le_scan_disable);
3222 hci_req_add_le_scan_disable(req, false);
3227 /* Passive scanning */
3228 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3229 hci_req_add_le_scan_disable(req, false);
3234 /* No further actions needed for LE-only discovery */
3235 if (d->type == DISCOV_TYPE_LE)
3238 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3239 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3244 bacpy(&cp.bdaddr, &e->data.bdaddr);
3245 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3253 static int stop_discovery(struct hci_request *req, unsigned long opt)
3255 hci_dev_lock(req->hdev);
3256 hci_req_stop_discovery(req);
3257 hci_dev_unlock(req->hdev);
3262 static void discov_update(struct work_struct *work)
3264 struct hci_dev *hdev = container_of(work, struct hci_dev,
3268 switch (hdev->discovery.state) {
3269 case DISCOVERY_STARTING:
3270 start_discovery(hdev, &status);
3271 mgmt_start_discovery_complete(hdev, status);
3273 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3275 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3277 case DISCOVERY_STOPPING:
3278 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3279 mgmt_stop_discovery_complete(hdev, status);
3281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3283 case DISCOVERY_STOPPED:
3289 static void discov_off(struct work_struct *work)
3291 struct hci_dev *hdev = container_of(work, struct hci_dev,
3294 bt_dev_dbg(hdev, "");
3298 /* When discoverable timeout triggers, then just make sure
3299 * the limited discoverable flag is cleared. Even in the case
3300 * of a timeout triggered from general discoverable, it is
3301 * safe to unconditionally clear the flag.
3303 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3304 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3305 hdev->discov_timeout = 0;
3307 hci_dev_unlock(hdev);
3309 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3310 mgmt_new_settings(hdev);
3313 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3315 struct hci_dev *hdev = req->hdev;
3320 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3321 !lmp_host_ssp_capable(hdev)) {
3324 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3326 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3329 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3330 sizeof(support), &support);
3334 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3335 lmp_bredr_capable(hdev)) {
3336 struct hci_cp_write_le_host_supported cp;
3341 /* Check first if we already have the right
3342 * host state (host features set)
3344 if (cp.le != lmp_host_le_capable(hdev) ||
3345 cp.simul != lmp_host_le_br_capable(hdev))
3346 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3350 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3351 /* Make sure the controller has a good default for
3352 * advertising data. This also applies to the case
3353 * where BR/EDR was toggled during the AUTO_OFF phase.
3355 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3356 list_empty(&hdev->adv_instances)) {
3359 if (ext_adv_capable(hdev)) {
3360 err = __hci_req_setup_ext_adv_instance(req,
3363 __hci_req_update_scan_rsp_data(req,
3367 __hci_req_update_adv_data(req, 0x00);
3368 __hci_req_update_scan_rsp_data(req, 0x00);
3371 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3372 if (!ext_adv_capable(hdev))
3373 __hci_req_enable_advertising(req);
3375 __hci_req_enable_ext_advertising(req,
3378 } else if (!list_empty(&hdev->adv_instances)) {
3379 struct adv_info *adv_instance;
3381 adv_instance = list_first_entry(&hdev->adv_instances,
3382 struct adv_info, list);
3383 __hci_req_schedule_adv_instance(req,
3384 adv_instance->instance,
3389 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3390 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3391 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3392 sizeof(link_sec), &link_sec);
3394 if (lmp_bredr_capable(hdev)) {
3395 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3396 __hci_req_write_fast_connectable(req, true);
3398 __hci_req_write_fast_connectable(req, false);
3399 __hci_req_update_scan(req);
3400 __hci_req_update_class(req);
3401 __hci_req_update_name(req);
3402 __hci_req_update_eir(req);
3405 hci_dev_unlock(hdev);
3409 int __hci_req_hci_power_on(struct hci_dev *hdev)
3411 /* Register the available SMP channels (BR/EDR and LE) only when
3412 * successfully powering on the controller. This late
3413 * registration is required so that LE SMP can clearly decide if
3414 * the public address or static address is used.
3418 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3422 void hci_request_setup(struct hci_dev *hdev)
3424 INIT_WORK(&hdev->discov_update, discov_update);
3425 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3426 INIT_WORK(&hdev->scan_update, scan_update_work);
3427 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3428 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3429 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3430 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3431 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3432 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3433 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3436 void hci_request_cancel_all(struct hci_dev *hdev)
3438 hci_req_sync_cancel(hdev, ENODEV);
3440 cancel_work_sync(&hdev->discov_update);
3441 cancel_work_sync(&hdev->bg_scan_update);
3442 cancel_work_sync(&hdev->scan_update);
3443 cancel_work_sync(&hdev->connectable_update);
3444 cancel_work_sync(&hdev->discoverable_update);
3445 cancel_delayed_work_sync(&hdev->discov_off);
3446 cancel_delayed_work_sync(&hdev->le_scan_disable);
3447 cancel_delayed_work_sync(&hdev->le_scan_restart);
3449 if (hdev->adv_instance_timeout) {
3450 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3451 hdev->adv_instance_timeout = 0;
3454 cancel_interleave_scan(hdev);