2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
34 #define HCI_REQ_DONE 0
35 #define HCI_REQ_PEND 1
36 #define HCI_REQ_CANCELED 2
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 skb_queue_head_init(&req->cmd_q);
45 void hci_req_purge(struct hci_request *req)
47 skb_queue_purge(&req->cmd_q);
50 bool hci_req_status_pend(struct hci_dev *hdev)
52 return hdev->req_status == HCI_REQ_PEND;
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
58 struct hci_dev *hdev = req->hdev;
62 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
68 skb_queue_purge(&req->cmd_q);
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
76 skb = skb_peek_tail(&req->cmd_q);
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88 queue_work(hdev->workqueue, &hdev->cmd_work);
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95 return req_run(req, complete, NULL);
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100 return req_run(req, NULL, complete);
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
131 struct hci_request req;
135 bt_dev_dbg(hdev, "");
137 hci_req_init(&req, hdev);
139 hci_req_add_ev(&req, opcode, plen, param, event);
141 hdev->req_status = HCI_REQ_PEND;
143 err = hci_req_run_skb(&req, hci_req_sync_complete);
147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
150 if (err == -ERESTARTSYS)
151 return ERR_PTR(-EINTR);
153 switch (hdev->req_status) {
155 err = -bt_to_errno(hdev->req_result);
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
167 hdev->req_status = hdev->req_result = 0;
169 hdev->req_skb = NULL;
171 bt_dev_dbg(hdev, "end: err %d", err);
179 return ERR_PTR(-ENODATA);
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 EXPORT_SYMBOL(__hci_cmd_sync);
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt, u32 timeout, u8 *hci_status)
197 struct hci_request req;
200 bt_dev_dbg(hdev, "start");
202 hci_req_init(&req, hdev);
204 hdev->req_status = HCI_REQ_PEND;
206 err = func(&req, opt);
209 *hci_status = HCI_ERROR_UNSPECIFIED;
213 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 hdev->req_status = 0;
217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
222 if (err == -ENODATA) {
229 *hci_status = HCI_ERROR_UNSPECIFIED;
234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
237 if (err == -ERESTARTSYS)
240 switch (hdev->req_status) {
242 err = -bt_to_errno(hdev->req_result);
244 *hci_status = hdev->req_result;
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
250 *hci_status = HCI_ERROR_UNSPECIFIED;
256 *hci_status = HCI_ERROR_UNSPECIFIED;
260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
262 hdev->req_status = hdev->req_result = 0;
264 bt_dev_dbg(hdev, "end: err %d", err);
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt, u32 timeout, u8 *hci_status)
275 /* Serialize all requests */
276 hci_req_sync_lock(hdev);
277 /* check the state after obtaing the lock to protect the HCI_UP
278 * against any races from hci_dev_do_close when the controller
281 if (test_bit(HCI_UP, &hdev->flags))
282 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
285 hci_req_sync_unlock(hdev);
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
293 int len = HCI_COMMAND_HDR_SIZE + plen;
294 struct hci_command_hdr *hdr;
297 skb = bt_skb_alloc(len, GFP_ATOMIC);
301 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302 hdr->opcode = cpu_to_le16(opcode);
306 skb_put_data(skb, param, plen);
308 bt_dev_dbg(hdev, "skb len %d", skb->len);
310 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311 hci_skb_opcode(skb) = opcode;
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318 const void *param, u8 event)
320 struct hci_dev *hdev = req->hdev;
323 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
325 /* If an error occurred during request building, there is no point in
326 * queueing the HCI command. We can simply return.
331 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
339 if (skb_queue_empty(&req->cmd_q))
340 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342 bt_cb(skb)->hci.req_event = event;
344 skb_queue_tail(&req->cmd_q, skb);
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
350 hci_req_add_ev(req, opcode, plen, param, 0);
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_write_page_scan_activity acp;
359 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
362 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
366 type = PAGE_SCAN_TYPE_INTERLACED;
368 /* 160 msec page scan interval */
369 acp.interval = cpu_to_le16(0x0100);
371 type = hdev->def_page_scan_type;
372 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
375 acp.window = cpu_to_le16(hdev->def_page_scan_window);
377 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378 __cpu_to_le16(hdev->page_scan_window) != acp.window)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
382 if (hdev->page_scan_type != type)
383 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
386 static void start_interleave_scan(struct hci_dev *hdev)
388 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389 queue_delayed_work(hdev->req_workqueue,
390 &hdev->interleave_scan, 0);
393 static bool is_interleave_scanning(struct hci_dev *hdev)
395 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
398 static void cancel_interleave_scan(struct hci_dev *hdev)
400 bt_dev_dbg(hdev, "cancelling interleave scan");
402 cancel_delayed_work_sync(&hdev->interleave_scan);
404 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
407 /* Return true if interleave_scan wasn't started until exiting this function,
408 * otherwise, return false
410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412 /* Do interleaved scan only if all of the following are true:
413 * - There is at least one ADV monitor
414 * - At least one pending LE connection or one device to be scanned for
415 * - Monitor offloading is not supported
416 * If so, we should alternate between allowlist scan and one without
417 * any filters to save power.
419 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420 !(list_empty(&hdev->pend_le_conns) &&
421 list_empty(&hdev->pend_le_reports)) &&
422 hci_get_adv_monitor_offload_ext(hdev) ==
423 HCI_ADV_MONITOR_EXT_NONE;
424 bool is_interleaving = is_interleave_scanning(hdev);
426 if (use_interleaving && !is_interleaving) {
427 start_interleave_scan(hdev);
428 bt_dev_dbg(hdev, "starting interleave scan");
432 if (!use_interleaving && is_interleaving)
433 cancel_interleave_scan(hdev);
438 /* This function controls the background scanning based on hdev->pend_le_conns
439 * list. If there are pending LE connection we start the background scanning,
440 * otherwise we stop it.
442 * This function requires the caller holds hdev->lock.
444 static void __hci_update_background_scan(struct hci_request *req)
446 struct hci_dev *hdev = req->hdev;
448 if (!test_bit(HCI_UP, &hdev->flags) ||
449 test_bit(HCI_INIT, &hdev->flags) ||
450 hci_dev_test_flag(hdev, HCI_SETUP) ||
451 hci_dev_test_flag(hdev, HCI_CONFIG) ||
452 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453 hci_dev_test_flag(hdev, HCI_UNREGISTER))
456 /* No point in doing scanning if LE support hasn't been enabled */
457 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
460 /* If discovery is active don't interfere with it */
461 if (hdev->discovery.state != DISCOVERY_STOPPED)
464 /* Reset RSSI and UUID filters when starting background scanning
465 * since these filters are meant for service discovery only.
467 * The Start Discovery and Start Service Discovery operations
468 * ensure to set proper values for RSSI threshold and UUID
469 * filter list. So it is safe to just reset them here.
471 hci_discovery_filter_clear(hdev);
473 bt_dev_dbg(hdev, "ADV monitoring is %s",
474 hci_is_adv_monitoring(hdev) ? "on" : "off");
476 if (list_empty(&hdev->pend_le_conns) &&
477 list_empty(&hdev->pend_le_reports) &&
478 !hci_is_adv_monitoring(hdev)) {
479 /* If there is no pending LE connections or devices
480 * to be scanned for or no ADV monitors, we should stop the
481 * background scanning.
484 /* If controller is not scanning we are done. */
485 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
488 hci_req_add_le_scan_disable(req, false);
490 bt_dev_dbg(hdev, "stopping background scanning");
492 /* If there is at least one pending LE connection, we should
493 * keep the background scan running.
496 /* If controller is connecting, we should not start scanning
497 * since some controllers are not able to scan and connect at
500 if (hci_lookup_le_connect(hdev))
503 /* If controller is currently scanning, we stop it to ensure we
504 * don't miss any advertising (due to duplicates filter).
506 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507 hci_req_add_le_scan_disable(req, false);
509 hci_req_add_le_passive_scan(req);
510 bt_dev_dbg(hdev, "starting background scanning");
514 void __hci_req_update_name(struct hci_request *req)
516 struct hci_dev *hdev = req->hdev;
517 struct hci_cp_write_local_name cp;
519 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
521 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
524 #define PNP_INFO_SVCLASS_ID 0x1200
526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
534 list_for_each_entry(uuid, &hdev->uuids, list) {
537 if (uuid->size != 16)
540 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
544 if (uuid16 == PNP_INFO_SVCLASS_ID)
550 uuids_start[1] = EIR_UUID16_ALL;
554 /* Stop if not enough space to put next UUID */
555 if ((ptr - data) + sizeof(u16) > len) {
556 uuids_start[1] = EIR_UUID16_SOME;
560 *ptr++ = (uuid16 & 0x00ff);
561 *ptr++ = (uuid16 & 0xff00) >> 8;
562 uuids_start[0] += sizeof(uuid16);
568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
570 u8 *ptr = data, *uuids_start = NULL;
571 struct bt_uuid *uuid;
576 list_for_each_entry(uuid, &hdev->uuids, list) {
577 if (uuid->size != 32)
583 uuids_start[1] = EIR_UUID32_ALL;
587 /* Stop if not enough space to put next UUID */
588 if ((ptr - data) + sizeof(u32) > len) {
589 uuids_start[1] = EIR_UUID32_SOME;
593 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
595 uuids_start[0] += sizeof(u32);
601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
603 u8 *ptr = data, *uuids_start = NULL;
604 struct bt_uuid *uuid;
609 list_for_each_entry(uuid, &hdev->uuids, list) {
610 if (uuid->size != 128)
616 uuids_start[1] = EIR_UUID128_ALL;
620 /* Stop if not enough space to put next UUID */
621 if ((ptr - data) + 16 > len) {
622 uuids_start[1] = EIR_UUID128_SOME;
626 memcpy(ptr, uuid->uuid, 16);
628 uuids_start[0] += 16;
634 static void create_eir(struct hci_dev *hdev, u8 *data)
639 name_len = strlen(hdev->dev_name);
645 ptr[1] = EIR_NAME_SHORT;
647 ptr[1] = EIR_NAME_COMPLETE;
649 /* EIR Data length */
650 ptr[0] = name_len + 1;
652 memcpy(ptr + 2, hdev->dev_name, name_len);
654 ptr += (name_len + 2);
657 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
659 ptr[1] = EIR_TX_POWER;
660 ptr[2] = (u8) hdev->inq_tx_power;
665 if (hdev->devid_source > 0) {
667 ptr[1] = EIR_DEVICE_ID;
669 put_unaligned_le16(hdev->devid_source, ptr + 2);
670 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671 put_unaligned_le16(hdev->devid_product, ptr + 6);
672 put_unaligned_le16(hdev->devid_version, ptr + 8);
677 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
682 void __hci_req_update_eir(struct hci_request *req)
684 struct hci_dev *hdev = req->hdev;
685 struct hci_cp_write_eir cp;
687 if (!hdev_is_powered(hdev))
690 if (!lmp_ext_inq_capable(hdev))
693 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
696 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
699 memset(&cp, 0, sizeof(cp));
701 create_eir(hdev, cp.data);
703 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
706 memcpy(hdev->eir, cp.data, sizeof(cp.data));
708 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
713 struct hci_dev *hdev = req->hdev;
715 if (hdev->scanning_paused) {
716 bt_dev_dbg(hdev, "Scanning is paused for suspend");
721 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
723 if (use_ext_scan(hdev)) {
724 struct hci_cp_le_set_ext_scan_enable cp;
726 memset(&cp, 0, sizeof(cp));
727 cp.enable = LE_SCAN_DISABLE;
728 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
731 struct hci_cp_le_set_scan_enable cp;
733 memset(&cp, 0, sizeof(cp));
734 cp.enable = LE_SCAN_DISABLE;
735 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
738 /* Disable address resolution */
739 if (use_ll_privacy(hdev) &&
740 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
744 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
748 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
751 struct hci_cp_le_del_from_accept_list cp;
753 cp.bdaddr_type = bdaddr_type;
754 bacpy(&cp.bdaddr, bdaddr);
756 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
758 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
760 if (use_ll_privacy(req->hdev) &&
761 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
764 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
766 struct hci_cp_le_del_from_resolv_list cp;
768 cp.bdaddr_type = bdaddr_type;
769 bacpy(&cp.bdaddr, bdaddr);
771 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
777 /* Adds connection to accept list if needed. On error, returns -1. */
778 static int add_to_accept_list(struct hci_request *req,
779 struct hci_conn_params *params, u8 *num_entries,
782 struct hci_cp_le_add_to_accept_list cp;
783 struct hci_dev *hdev = req->hdev;
785 /* Already in accept list */
786 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
790 /* Select filter policy to accept all advertising */
791 if (*num_entries >= hdev->le_accept_list_size)
794 /* Accept list can not be used with RPAs */
796 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
801 /* During suspend, only wakeable devices can be in accept list */
802 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803 params->current_flags))
807 cp.bdaddr_type = params->addr_type;
808 bacpy(&cp.bdaddr, ¶ms->addr);
810 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
812 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
814 if (use_ll_privacy(hdev) &&
815 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
818 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
821 struct hci_cp_le_add_to_resolv_list cp;
823 cp.bdaddr_type = params->addr_type;
824 bacpy(&cp.bdaddr, ¶ms->addr);
825 memcpy(cp.peer_irk, irk->val, 16);
827 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828 memcpy(cp.local_irk, hdev->irk, 16);
830 memset(cp.local_irk, 0, 16);
832 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
840 static u8 update_accept_list(struct hci_request *req)
842 struct hci_dev *hdev = req->hdev;
843 struct hci_conn_params *params;
844 struct bdaddr_list *b;
846 bool pend_conn, pend_report;
847 /* We allow usage of accept list even with RPAs in suspend. In the worst
848 * case, we won't be able to wake from devices that use the privacy1.2
849 * features. Additionally, once we support privacy1.2 and IRK
850 * offloading, we can update this to also check for those conditions.
852 bool allow_rpa = hdev->suspended;
854 if (use_ll_privacy(hdev) &&
855 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
858 /* Go through the current accept list programmed into the
859 * controller one by one and check if that address is still
860 * in the list of pending connections or list of devices to
861 * report. If not present in either list, then queue the
862 * command to remove it from the controller.
864 list_for_each_entry(b, &hdev->le_accept_list, list) {
865 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
868 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
872 /* If the device is not likely to connect or report,
873 * remove it from the accept list.
875 if (!pend_conn && !pend_report) {
876 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
880 /* Accept list can not be used with RPAs */
882 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
883 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
890 /* Since all no longer valid accept list entries have been
891 * removed, walk through the list of pending connections
892 * and ensure that any new device gets programmed into
895 * If the list of the devices is larger than the list of
896 * available accept list entries in the controller, then
897 * just abort and return filer policy value to not use the
900 list_for_each_entry(params, &hdev->pend_le_conns, action) {
901 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
905 /* After adding all new pending connections, walk through
906 * the list of pending reports and also add these to the
907 * accept list if there is still space. Abort if space runs out.
909 list_for_each_entry(params, &hdev->pend_le_reports, action) {
910 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
914 /* Use the allowlist unless the following conditions are all true:
915 * - We are not currently suspending
916 * - There are 1 or more ADV monitors registered and it's not offloaded
917 * - Interleaved scanning is not currently using the allowlist
919 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
920 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
921 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
924 /* Select filter policy to use accept list */
928 static bool scan_use_rpa(struct hci_dev *hdev)
930 return hci_dev_test_flag(hdev, HCI_PRIVACY);
933 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
934 u16 window, u8 own_addr_type, u8 filter_policy,
935 bool filter_dup, bool addr_resolv)
937 struct hci_dev *hdev = req->hdev;
939 if (hdev->scanning_paused) {
940 bt_dev_dbg(hdev, "Scanning is paused for suspend");
944 if (use_ll_privacy(hdev) &&
945 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
949 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
952 /* Use ext scanning if set ext scan param and ext scan enable is
955 if (use_ext_scan(hdev)) {
956 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
957 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
958 struct hci_cp_le_scan_phy_params *phy_params;
959 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
962 ext_param_cp = (void *)data;
963 phy_params = (void *)ext_param_cp->data;
965 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
966 ext_param_cp->own_addr_type = own_addr_type;
967 ext_param_cp->filter_policy = filter_policy;
969 plen = sizeof(*ext_param_cp);
971 if (scan_1m(hdev) || scan_2m(hdev)) {
972 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
974 memset(phy_params, 0, sizeof(*phy_params));
975 phy_params->type = type;
976 phy_params->interval = cpu_to_le16(interval);
977 phy_params->window = cpu_to_le16(window);
979 plen += sizeof(*phy_params);
983 if (scan_coded(hdev)) {
984 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
986 memset(phy_params, 0, sizeof(*phy_params));
987 phy_params->type = type;
988 phy_params->interval = cpu_to_le16(interval);
989 phy_params->window = cpu_to_le16(window);
991 plen += sizeof(*phy_params);
995 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
998 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
999 ext_enable_cp.enable = LE_SCAN_ENABLE;
1000 ext_enable_cp.filter_dup = filter_dup;
1002 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003 sizeof(ext_enable_cp), &ext_enable_cp);
1005 struct hci_cp_le_set_scan_param param_cp;
1006 struct hci_cp_le_set_scan_enable enable_cp;
1008 memset(¶m_cp, 0, sizeof(param_cp));
1009 param_cp.type = type;
1010 param_cp.interval = cpu_to_le16(interval);
1011 param_cp.window = cpu_to_le16(window);
1012 param_cp.own_address_type = own_addr_type;
1013 param_cp.filter_policy = filter_policy;
1014 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1017 memset(&enable_cp, 0, sizeof(enable_cp));
1018 enable_cp.enable = LE_SCAN_ENABLE;
1019 enable_cp.filter_dup = filter_dup;
1020 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1025 /* Returns true if an le connection is in the scanning state */
1026 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1028 struct hci_conn_hash *h = &hdev->conn_hash;
1033 list_for_each_entry_rcu(c, &h->list, list) {
1034 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1046 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047 * controller based address resolution to be able to reconfigure
1050 void hci_req_add_le_passive_scan(struct hci_request *req)
1052 struct hci_dev *hdev = req->hdev;
1055 u16 window, interval;
1056 /* Default is to enable duplicates filter */
1057 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1058 /* Background scanning should run with address resolution */
1059 bool addr_resolv = true;
1061 if (hdev->scanning_paused) {
1062 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1066 /* Set require_privacy to false since no SCAN_REQ are send
1067 * during passive scanning. Not using an non-resolvable address
1068 * here is important so that peer devices using direct
1069 * advertising with our address will be correctly reported
1070 * by the controller.
1072 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1076 if (hdev->enable_advmon_interleave_scan &&
1077 __hci_update_interleaved_scan(hdev))
1080 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1081 /* Adding or removing entries from the accept list must
1082 * happen before enabling scanning. The controller does
1083 * not allow accept list modification while scanning.
1085 filter_policy = update_accept_list(req);
1087 /* When the controller is using random resolvable addresses and
1088 * with that having LE privacy enabled, then controllers with
1089 * Extended Scanner Filter Policies support can now enable support
1090 * for handling directed advertising.
1092 * So instead of using filter polices 0x00 (no accept list)
1093 * and 0x01 (accept list enabled) use the new filter policies
1094 * 0x02 (no accept list) and 0x03 (accept list enabled).
1096 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1097 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1098 filter_policy |= 0x02;
1100 if (hdev->suspended) {
1101 window = hdev->le_scan_window_suspend;
1102 interval = hdev->le_scan_int_suspend;
1104 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1105 } else if (hci_is_le_conn_scanning(hdev)) {
1106 window = hdev->le_scan_window_connect;
1107 interval = hdev->le_scan_int_connect;
1108 } else if (hci_is_adv_monitoring(hdev)) {
1109 window = hdev->le_scan_window_adv_monitor;
1110 interval = hdev->le_scan_int_adv_monitor;
1112 /* Disable duplicates filter when scanning for advertisement
1113 * monitor for the following reasons.
1115 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
1116 * controllers ignore RSSI_Sampling_Period when the duplicates
1117 * filter is enabled.
1119 * For SW pattern filtering, when we're not doing interleaved
1120 * scanning, it is necessary to disable duplicates filter,
1121 * otherwise hosts can only receive one advertisement and it's
1122 * impossible to know if a peer is still in range.
1124 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
1126 window = hdev->le_scan_window;
1127 interval = hdev->le_scan_interval;
1130 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1132 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1133 own_addr_type, filter_policy, filter_dup,
1137 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1139 struct adv_info *adv_instance;
1141 /* Instance 0x00 always set local name */
1142 if (instance == 0x00)
1145 adv_instance = hci_find_adv_instance(hdev, instance);
1149 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1150 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1153 return adv_instance->scan_rsp_len ? true : false;
1156 static void hci_req_clear_event_filter(struct hci_request *req)
1158 struct hci_cp_set_event_filter f;
1160 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1163 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1164 memset(&f, 0, sizeof(f));
1165 f.flt_type = HCI_FLT_CLEAR_ALL;
1166 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1170 static void hci_req_set_event_filter(struct hci_request *req)
1172 struct bdaddr_list_with_flags *b;
1173 struct hci_cp_set_event_filter f;
1174 struct hci_dev *hdev = req->hdev;
1175 u8 scan = SCAN_DISABLED;
1176 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1178 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1181 /* Always clear event filter when starting */
1182 hci_req_clear_event_filter(req);
1184 list_for_each_entry(b, &hdev->accept_list, list) {
1185 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1189 memset(&f, 0, sizeof(f));
1190 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1191 f.flt_type = HCI_FLT_CONN_SETUP;
1192 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1193 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1195 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1196 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1200 if (scan && !scanning) {
1201 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1202 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1203 } else if (!scan && scanning) {
1204 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1205 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1209 static void cancel_adv_timeout(struct hci_dev *hdev)
1211 if (hdev->adv_instance_timeout) {
1212 hdev->adv_instance_timeout = 0;
1213 cancel_delayed_work(&hdev->adv_instance_expire);
1217 /* This function requires the caller holds hdev->lock */
1218 void __hci_req_pause_adv_instances(struct hci_request *req)
1220 bt_dev_dbg(req->hdev, "Pausing advertising instances");
1222 /* Call to disable any advertisements active on the controller.
1223 * This will succeed even if no advertisements are configured.
1225 __hci_req_disable_advertising(req);
1227 /* If we are using software rotation, pause the loop */
1228 if (!ext_adv_capable(req->hdev))
1229 cancel_adv_timeout(req->hdev);
1232 /* This function requires the caller holds hdev->lock */
1233 static void __hci_req_resume_adv_instances(struct hci_request *req)
1235 struct adv_info *adv;
1237 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1239 if (ext_adv_capable(req->hdev)) {
1240 /* Call for each tracked instance to be re-enabled */
1241 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1242 __hci_req_enable_ext_advertising(req,
1247 /* Schedule for most recent instance to be restarted and begin
1248 * the software rotation loop
1250 __hci_req_schedule_adv_instance(req,
1251 req->hdev->cur_adv_instance,
1256 /* This function requires the caller holds hdev->lock */
1257 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1259 struct hci_request req;
1261 hci_req_init(&req, hdev);
1262 __hci_req_resume_adv_instances(&req);
1264 return hci_req_run(&req, NULL);
1267 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1269 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1271 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1272 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1273 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1274 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1275 wake_up(&hdev->suspend_wait_q);
1278 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1279 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1280 wake_up(&hdev->suspend_wait_q);
1284 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1287 struct hci_dev *hdev = req->hdev;
1289 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1290 case HCI_ADV_MONITOR_EXT_MSFT:
1291 msft_req_add_set_filter_enable(req, enable);
1297 /* No need to block when enabling since it's on resume path */
1298 if (hdev->suspended && !enable)
1299 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1302 /* Call with hci_dev_lock */
1303 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1306 struct hci_conn *conn;
1307 struct hci_request req;
1309 int disconnect_counter;
1311 if (next == hdev->suspend_state) {
1312 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1316 hdev->suspend_state = next;
1317 hci_req_init(&req, hdev);
1319 if (next == BT_SUSPEND_DISCONNECT) {
1320 /* Mark device as suspended */
1321 hdev->suspended = true;
1323 /* Pause discovery if not already stopped */
1324 old_state = hdev->discovery.state;
1325 if (old_state != DISCOVERY_STOPPED) {
1326 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1327 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1328 queue_work(hdev->req_workqueue, &hdev->discov_update);
1331 hdev->discovery_paused = true;
1332 hdev->discovery_old_state = old_state;
1334 /* Stop directed advertising */
1335 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1337 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1338 cancel_delayed_work(&hdev->discov_off);
1339 queue_delayed_work(hdev->req_workqueue,
1340 &hdev->discov_off, 0);
1343 /* Pause other advertisements */
1344 if (hdev->adv_instance_cnt)
1345 __hci_req_pause_adv_instances(&req);
1347 hdev->advertising_paused = true;
1348 hdev->advertising_old_state = old_state;
1350 /* Disable page scan if enabled */
1351 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1352 page_scan = SCAN_DISABLED;
1353 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1355 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1358 /* Disable LE passive scan if enabled */
1359 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1360 cancel_interleave_scan(hdev);
1361 hci_req_add_le_scan_disable(&req, false);
1364 /* Disable advertisement filters */
1365 hci_req_add_set_adv_filter_enable(&req, false);
1367 /* Prevent disconnects from causing scanning to be re-enabled */
1368 hdev->scanning_paused = true;
1370 /* Run commands before disconnecting */
1371 hci_req_run(&req, suspend_req_complete);
1373 disconnect_counter = 0;
1374 /* Soft disconnect everything (power off) */
1375 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1376 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1377 disconnect_counter++;
1380 if (disconnect_counter > 0) {
1382 "Had %d disconnects. Will wait on them",
1383 disconnect_counter);
1384 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1386 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1387 /* Unpause to take care of updating scanning params */
1388 hdev->scanning_paused = false;
1389 /* Enable event filter for paired devices */
1390 hci_req_set_event_filter(&req);
1391 /* Enable passive scan at lower duty cycle */
1392 __hci_update_background_scan(&req);
1393 /* Pause scan changes again. */
1394 hdev->scanning_paused = true;
1395 hci_req_run(&req, suspend_req_complete);
1397 hdev->suspended = false;
1398 hdev->scanning_paused = false;
1400 /* Clear any event filters and restore scan state */
1401 hci_req_clear_event_filter(&req);
1402 __hci_req_update_scan(&req);
1404 /* Reset passive/background scanning to normal */
1405 __hci_update_background_scan(&req);
1406 /* Enable all of the advertisement filters */
1407 hci_req_add_set_adv_filter_enable(&req, true);
1409 /* Unpause directed advertising */
1410 hdev->advertising_paused = false;
1411 if (hdev->advertising_old_state) {
1412 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1413 hdev->suspend_tasks);
1414 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1415 queue_work(hdev->req_workqueue,
1416 &hdev->discoverable_update);
1417 hdev->advertising_old_state = 0;
1420 /* Resume other advertisements */
1421 if (hdev->adv_instance_cnt)
1422 __hci_req_resume_adv_instances(&req);
1424 /* Unpause discovery */
1425 hdev->discovery_paused = false;
1426 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1427 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1428 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1429 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1430 queue_work(hdev->req_workqueue, &hdev->discov_update);
1433 hci_req_run(&req, suspend_req_complete);
1436 hdev->suspend_state = next;
1439 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1440 wake_up(&hdev->suspend_wait_q);
1443 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1445 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1448 void __hci_req_disable_advertising(struct hci_request *req)
1450 if (ext_adv_capable(req->hdev)) {
1451 __hci_req_disable_ext_adv_instance(req, 0x00);
1456 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1460 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1463 struct adv_info *adv_instance;
1465 if (instance == 0x00) {
1466 /* Instance 0 always manages the "Tx Power" and "Flags"
1469 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1471 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1472 * corresponds to the "connectable" instance flag.
1474 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1475 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1477 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1478 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1479 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1480 flags |= MGMT_ADV_FLAG_DISCOV;
1485 adv_instance = hci_find_adv_instance(hdev, instance);
1487 /* Return 0 when we got an invalid instance identifier. */
1491 return adv_instance->flags;
1494 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1496 /* If privacy is not enabled don't use RPA */
1497 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1500 /* If basic privacy mode is enabled use RPA */
1501 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1504 /* If limited privacy mode is enabled don't use RPA if we're
1505 * both discoverable and bondable.
1507 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1508 hci_dev_test_flag(hdev, HCI_BONDABLE))
1511 /* We're neither bondable nor discoverable in the limited
1512 * privacy mode, therefore use RPA.
1517 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1519 /* If there is no connection we are OK to advertise. */
1520 if (hci_conn_num(hdev, LE_LINK) == 0)
1523 /* Check le_states if there is any connection in peripheral role. */
1524 if (hdev->conn_hash.le_num_peripheral > 0) {
1525 /* Peripheral connection state and non connectable mode bit 20.
1527 if (!connectable && !(hdev->le_states[2] & 0x10))
1530 /* Peripheral connection state and connectable mode bit 38
1531 * and scannable bit 21.
1533 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1534 !(hdev->le_states[2] & 0x20)))
1538 /* Check le_states if there is any connection in central role. */
1539 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1540 /* Central connection state and non connectable mode bit 18. */
1541 if (!connectable && !(hdev->le_states[2] & 0x02))
1544 /* Central connection state and connectable mode bit 35 and
1547 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1548 !(hdev->le_states[2] & 0x08)))
1555 void __hci_req_enable_advertising(struct hci_request *req)
1557 struct hci_dev *hdev = req->hdev;
1558 struct adv_info *adv_instance;
1559 struct hci_cp_le_set_adv_param cp;
1560 u8 own_addr_type, enable = 0x01;
1562 u16 adv_min_interval, adv_max_interval;
1565 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1566 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1568 /* If the "connectable" instance flag was not set, then choose between
1569 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1571 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1572 mgmt_get_connectable(hdev);
1574 if (!is_advertising_allowed(hdev, connectable))
1577 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1578 __hci_req_disable_advertising(req);
1580 /* Clear the HCI_LE_ADV bit temporarily so that the
1581 * hci_update_random_address knows that it's safe to go ahead
1582 * and write a new random address. The flag will be set back on
1583 * as soon as the SET_ADV_ENABLE HCI command completes.
1585 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1587 /* Set require_privacy to true only when non-connectable
1588 * advertising is used. In that case it is fine to use a
1589 * non-resolvable private address.
1591 if (hci_update_random_address(req, !connectable,
1592 adv_use_rpa(hdev, flags),
1593 &own_addr_type) < 0)
1596 memset(&cp, 0, sizeof(cp));
1599 adv_min_interval = adv_instance->min_interval;
1600 adv_max_interval = adv_instance->max_interval;
1602 adv_min_interval = hdev->le_adv_min_interval;
1603 adv_max_interval = hdev->le_adv_max_interval;
1607 cp.type = LE_ADV_IND;
1609 if (adv_cur_instance_is_scannable(hdev))
1610 cp.type = LE_ADV_SCAN_IND;
1612 cp.type = LE_ADV_NONCONN_IND;
1614 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1615 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1616 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1617 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1621 cp.min_interval = cpu_to_le16(adv_min_interval);
1622 cp.max_interval = cpu_to_le16(adv_max_interval);
1623 cp.own_address_type = own_addr_type;
1624 cp.channel_map = hdev->le_adv_channel_map;
1626 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1628 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1631 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1634 size_t complete_len;
1636 /* no space left for name (+ NULL + type + len) */
1637 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1640 /* use complete name if present and fits */
1641 complete_len = strlen(hdev->dev_name);
1642 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1643 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1644 hdev->dev_name, complete_len + 1);
1646 /* use short name if present */
1647 short_len = strlen(hdev->short_name);
1649 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1650 hdev->short_name, short_len + 1);
1652 /* use shortened full name if present, we already know that name
1653 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1656 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1658 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1659 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1661 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1668 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1670 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1673 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1675 u8 scan_rsp_len = 0;
1677 if (hdev->appearance)
1678 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1680 return append_local_name(hdev, ptr, scan_rsp_len);
1683 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1686 struct adv_info *adv_instance;
1688 u8 scan_rsp_len = 0;
1690 adv_instance = hci_find_adv_instance(hdev, instance);
1694 instance_flags = adv_instance->flags;
1696 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1697 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1699 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1700 adv_instance->scan_rsp_len);
1702 scan_rsp_len += adv_instance->scan_rsp_len;
1704 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1705 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1707 return scan_rsp_len;
1710 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1712 struct hci_dev *hdev = req->hdev;
1715 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1718 if (ext_adv_capable(hdev)) {
1720 struct hci_cp_le_set_ext_scan_rsp_data cp;
1721 u8 data[HCI_MAX_EXT_AD_LENGTH];
1724 memset(&pdu, 0, sizeof(pdu));
1727 len = create_instance_scan_rsp_data(hdev, instance,
1730 len = create_default_scan_rsp_data(hdev, pdu.data);
1732 if (hdev->scan_rsp_data_len == len &&
1733 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1736 memcpy(hdev->scan_rsp_data, pdu.data, len);
1737 hdev->scan_rsp_data_len = len;
1739 pdu.cp.handle = instance;
1740 pdu.cp.length = len;
1741 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1742 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1744 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1745 sizeof(pdu.cp) + len, &pdu.cp);
1747 struct hci_cp_le_set_scan_rsp_data cp;
1749 memset(&cp, 0, sizeof(cp));
1752 len = create_instance_scan_rsp_data(hdev, instance,
1755 len = create_default_scan_rsp_data(hdev, cp.data);
1757 if (hdev->scan_rsp_data_len == len &&
1758 !memcmp(cp.data, hdev->scan_rsp_data, len))
1761 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1762 hdev->scan_rsp_data_len = len;
1766 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1770 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1772 struct adv_info *adv_instance = NULL;
1773 u8 ad_len = 0, flags = 0;
1776 /* Return 0 when the current instance identifier is invalid. */
1778 adv_instance = hci_find_adv_instance(hdev, instance);
1783 instance_flags = get_adv_instance_flags(hdev, instance);
1785 /* If instance already has the flags set skip adding it once
1788 if (adv_instance && eir_get_data(adv_instance->adv_data,
1789 adv_instance->adv_data_len, EIR_FLAGS,
1793 /* The Add Advertising command allows userspace to set both the general
1794 * and limited discoverable flags.
1796 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1797 flags |= LE_AD_GENERAL;
1799 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1800 flags |= LE_AD_LIMITED;
1802 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1803 flags |= LE_AD_NO_BREDR;
1805 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1806 /* If a discovery flag wasn't provided, simply use the global
1810 flags |= mgmt_get_adv_discov_flags(hdev);
1812 /* If flags would still be empty, then there is no need to
1813 * include the "Flags" AD field".
1827 memcpy(ptr, adv_instance->adv_data,
1828 adv_instance->adv_data_len);
1829 ad_len += adv_instance->adv_data_len;
1830 ptr += adv_instance->adv_data_len;
1833 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1836 if (ext_adv_capable(hdev)) {
1838 adv_tx_power = adv_instance->tx_power;
1840 adv_tx_power = hdev->adv_tx_power;
1842 adv_tx_power = hdev->adv_tx_power;
1845 /* Provide Tx Power only if we can provide a valid value for it */
1846 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1848 ptr[1] = EIR_TX_POWER;
1849 ptr[2] = (u8)adv_tx_power;
1859 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1861 struct hci_dev *hdev = req->hdev;
1864 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1867 if (ext_adv_capable(hdev)) {
1869 struct hci_cp_le_set_ext_adv_data cp;
1870 u8 data[HCI_MAX_EXT_AD_LENGTH];
1873 memset(&pdu, 0, sizeof(pdu));
1875 len = create_instance_adv_data(hdev, instance, pdu.data);
1877 /* There's nothing to do if the data hasn't changed */
1878 if (hdev->adv_data_len == len &&
1879 memcmp(pdu.data, hdev->adv_data, len) == 0)
1882 memcpy(hdev->adv_data, pdu.data, len);
1883 hdev->adv_data_len = len;
1885 pdu.cp.length = len;
1886 pdu.cp.handle = instance;
1887 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1888 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1890 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1891 sizeof(pdu.cp) + len, &pdu.cp);
1893 struct hci_cp_le_set_adv_data cp;
1895 memset(&cp, 0, sizeof(cp));
1897 len = create_instance_adv_data(hdev, instance, cp.data);
1899 /* There's nothing to do if the data hasn't changed */
1900 if (hdev->adv_data_len == len &&
1901 memcmp(cp.data, hdev->adv_data, len) == 0)
1904 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1905 hdev->adv_data_len = len;
1909 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1913 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1915 struct hci_request req;
1917 hci_req_init(&req, hdev);
1918 __hci_req_update_adv_data(&req, instance);
1920 return hci_req_run(&req, NULL);
1923 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1926 BT_DBG("%s status %u", hdev->name, status);
1929 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1931 struct hci_request req;
1934 if (!use_ll_privacy(hdev) &&
1935 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1938 hci_req_init(&req, hdev);
1940 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1942 hci_req_run(&req, enable_addr_resolution_complete);
1945 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1947 bt_dev_dbg(hdev, "status %u", status);
1950 void hci_req_reenable_advertising(struct hci_dev *hdev)
1952 struct hci_request req;
1954 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1955 list_empty(&hdev->adv_instances))
1958 hci_req_init(&req, hdev);
1960 if (hdev->cur_adv_instance) {
1961 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1964 if (ext_adv_capable(hdev)) {
1965 __hci_req_start_ext_adv(&req, 0x00);
1967 __hci_req_update_adv_data(&req, 0x00);
1968 __hci_req_update_scan_rsp_data(&req, 0x00);
1969 __hci_req_enable_advertising(&req);
1973 hci_req_run(&req, adv_enable_complete);
1976 static void adv_timeout_expire(struct work_struct *work)
1978 struct hci_dev *hdev = container_of(work, struct hci_dev,
1979 adv_instance_expire.work);
1981 struct hci_request req;
1984 bt_dev_dbg(hdev, "");
1988 hdev->adv_instance_timeout = 0;
1990 instance = hdev->cur_adv_instance;
1991 if (instance == 0x00)
1994 hci_req_init(&req, hdev);
1996 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1998 if (list_empty(&hdev->adv_instances))
1999 __hci_req_disable_advertising(&req);
2001 hci_req_run(&req, NULL);
2004 hci_dev_unlock(hdev);
2007 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
2010 struct hci_dev *hdev = req->hdev;
2015 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2016 hci_req_add_le_scan_disable(req, false);
2017 hci_req_add_le_passive_scan(req);
2019 switch (hdev->interleave_scan_state) {
2020 case INTERLEAVE_SCAN_ALLOWLIST:
2021 bt_dev_dbg(hdev, "next state: allowlist");
2022 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2024 case INTERLEAVE_SCAN_NO_FILTER:
2025 bt_dev_dbg(hdev, "next state: no filter");
2026 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2028 case INTERLEAVE_SCAN_NONE:
2029 BT_ERR("unexpected error");
2033 hci_dev_unlock(hdev);
2038 static void interleave_scan_work(struct work_struct *work)
2040 struct hci_dev *hdev = container_of(work, struct hci_dev,
2041 interleave_scan.work);
2043 unsigned long timeout;
2045 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2046 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2047 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2048 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2050 bt_dev_err(hdev, "unexpected error");
2054 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2055 HCI_CMD_TIMEOUT, &status);
2057 /* Don't continue interleaving if it was canceled */
2058 if (is_interleave_scanning(hdev))
2059 queue_delayed_work(hdev->req_workqueue,
2060 &hdev->interleave_scan, timeout);
2063 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2064 bool use_rpa, struct adv_info *adv_instance,
2065 u8 *own_addr_type, bdaddr_t *rand_addr)
2069 bacpy(rand_addr, BDADDR_ANY);
2071 /* If privacy is enabled use a resolvable private address. If
2072 * current RPA has expired then generate a new one.
2077 /* If Controller supports LL Privacy use own address type is
2080 if (use_ll_privacy(hdev) &&
2081 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2082 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2084 *own_addr_type = ADDR_LE_DEV_RANDOM;
2087 if (!adv_instance->rpa_expired &&
2088 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2091 adv_instance->rpa_expired = false;
2093 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2094 !bacmp(&hdev->random_addr, &hdev->rpa))
2098 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2100 bt_dev_err(hdev, "failed to generate new RPA");
2104 bacpy(rand_addr, &hdev->rpa);
2106 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2108 queue_delayed_work(hdev->workqueue,
2109 &adv_instance->rpa_expired_cb, to);
2111 queue_delayed_work(hdev->workqueue,
2112 &hdev->rpa_expired, to);
2117 /* In case of required privacy without resolvable private address,
2118 * use an non-resolvable private address. This is useful for
2119 * non-connectable advertising.
2121 if (require_privacy) {
2125 /* The non-resolvable private address is generated
2126 * from random six bytes with the two most significant
2129 get_random_bytes(&nrpa, 6);
2132 /* The non-resolvable private address shall not be
2133 * equal to the public address.
2135 if (bacmp(&hdev->bdaddr, &nrpa))
2139 *own_addr_type = ADDR_LE_DEV_RANDOM;
2140 bacpy(rand_addr, &nrpa);
2145 /* No privacy so use a public address. */
2146 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2151 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2153 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2156 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2158 struct hci_cp_le_set_ext_adv_params cp;
2159 struct hci_dev *hdev = req->hdev;
2162 bdaddr_t random_addr;
2165 struct adv_info *adv_instance;
2169 adv_instance = hci_find_adv_instance(hdev, instance);
2173 adv_instance = NULL;
2176 flags = get_adv_instance_flags(hdev, instance);
2178 /* If the "connectable" instance flag was not set, then choose between
2179 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2181 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2182 mgmt_get_connectable(hdev);
2184 if (!is_advertising_allowed(hdev, connectable))
2187 /* Set require_privacy to true only when non-connectable
2188 * advertising is used. In that case it is fine to use a
2189 * non-resolvable private address.
2191 err = hci_get_random_address(hdev, !connectable,
2192 adv_use_rpa(hdev, flags), adv_instance,
2193 &own_addr_type, &random_addr);
2197 memset(&cp, 0, sizeof(cp));
2200 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2201 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2202 cp.tx_power = adv_instance->tx_power;
2204 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2205 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2206 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2209 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2213 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2215 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2216 } else if (adv_instance_is_scannable(hdev, instance) ||
2217 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2219 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2221 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2224 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2226 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2229 cp.own_addr_type = own_addr_type;
2230 cp.channel_map = hdev->le_adv_channel_map;
2231 cp.handle = instance;
2233 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2234 cp.primary_phy = HCI_ADV_PHY_1M;
2235 cp.secondary_phy = HCI_ADV_PHY_2M;
2236 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2237 cp.primary_phy = HCI_ADV_PHY_CODED;
2238 cp.secondary_phy = HCI_ADV_PHY_CODED;
2240 /* In all other cases use 1M */
2241 cp.primary_phy = HCI_ADV_PHY_1M;
2242 cp.secondary_phy = HCI_ADV_PHY_1M;
2245 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2247 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2248 bacmp(&random_addr, BDADDR_ANY)) {
2249 struct hci_cp_le_set_adv_set_rand_addr cp;
2251 /* Check if random address need to be updated */
2253 if (!bacmp(&random_addr, &adv_instance->random_addr))
2256 if (!bacmp(&random_addr, &hdev->random_addr))
2260 memset(&cp, 0, sizeof(cp));
2262 cp.handle = instance;
2263 bacpy(&cp.bdaddr, &random_addr);
2266 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2273 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2275 struct hci_dev *hdev = req->hdev;
2276 struct hci_cp_le_set_ext_adv_enable *cp;
2277 struct hci_cp_ext_adv_set *adv_set;
2278 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2279 struct adv_info *adv_instance;
2282 adv_instance = hci_find_adv_instance(hdev, instance);
2286 adv_instance = NULL;
2290 adv_set = (void *) cp->data;
2292 memset(cp, 0, sizeof(*cp));
2295 cp->num_of_sets = 0x01;
2297 memset(adv_set, 0, sizeof(*adv_set));
2299 adv_set->handle = instance;
2301 /* Set duration per instance since controller is responsible for
2304 if (adv_instance && adv_instance->duration) {
2305 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2307 /* Time = N * 10 ms */
2308 adv_set->duration = cpu_to_le16(duration / 10);
2311 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2312 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2318 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2320 struct hci_dev *hdev = req->hdev;
2321 struct hci_cp_le_set_ext_adv_enable *cp;
2322 struct hci_cp_ext_adv_set *adv_set;
2323 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2326 /* If request specifies an instance that doesn't exist, fail */
2327 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2330 memset(data, 0, sizeof(data));
2333 adv_set = (void *)cp->data;
2335 /* Instance 0x00 indicates all advertising instances will be disabled */
2336 cp->num_of_sets = !!instance;
2339 adv_set->handle = instance;
2341 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2342 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2347 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2349 struct hci_dev *hdev = req->hdev;
2351 /* If request specifies an instance that doesn't exist, fail */
2352 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2355 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2360 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2362 struct hci_dev *hdev = req->hdev;
2363 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2366 /* If instance isn't pending, the chip knows about it, and it's safe to
2369 if (adv_instance && !adv_instance->pending)
2370 __hci_req_disable_ext_adv_instance(req, instance);
2372 err = __hci_req_setup_ext_adv_instance(req, instance);
2376 __hci_req_update_scan_rsp_data(req, instance);
2377 __hci_req_enable_ext_advertising(req, instance);
2382 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2385 struct hci_dev *hdev = req->hdev;
2386 struct adv_info *adv_instance = NULL;
2389 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2390 list_empty(&hdev->adv_instances))
2393 if (hdev->adv_instance_timeout)
2396 adv_instance = hci_find_adv_instance(hdev, instance);
2400 /* A zero timeout means unlimited advertising. As long as there is
2401 * only one instance, duration should be ignored. We still set a timeout
2402 * in case further instances are being added later on.
2404 * If the remaining lifetime of the instance is more than the duration
2405 * then the timeout corresponds to the duration, otherwise it will be
2406 * reduced to the remaining instance lifetime.
2408 if (adv_instance->timeout == 0 ||
2409 adv_instance->duration <= adv_instance->remaining_time)
2410 timeout = adv_instance->duration;
2412 timeout = adv_instance->remaining_time;
2414 /* The remaining time is being reduced unless the instance is being
2415 * advertised without time limit.
2417 if (adv_instance->timeout)
2418 adv_instance->remaining_time =
2419 adv_instance->remaining_time - timeout;
2421 /* Only use work for scheduling instances with legacy advertising */
2422 if (!ext_adv_capable(hdev)) {
2423 hdev->adv_instance_timeout = timeout;
2424 queue_delayed_work(hdev->req_workqueue,
2425 &hdev->adv_instance_expire,
2426 msecs_to_jiffies(timeout * 1000));
2429 /* If we're just re-scheduling the same instance again then do not
2430 * execute any HCI commands. This happens when a single instance is
2433 if (!force && hdev->cur_adv_instance == instance &&
2434 hci_dev_test_flag(hdev, HCI_LE_ADV))
2437 hdev->cur_adv_instance = instance;
2438 if (ext_adv_capable(hdev)) {
2439 __hci_req_start_ext_adv(req, instance);
2441 __hci_req_update_adv_data(req, instance);
2442 __hci_req_update_scan_rsp_data(req, instance);
2443 __hci_req_enable_advertising(req);
2449 /* For a single instance:
2450 * - force == true: The instance will be removed even when its remaining
2451 * lifetime is not zero.
2452 * - force == false: the instance will be deactivated but kept stored unless
2453 * the remaining lifetime is zero.
2455 * For instance == 0x00:
2456 * - force == true: All instances will be removed regardless of their timeout
2458 * - force == false: Only instances that have a timeout will be removed.
2460 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2461 struct hci_request *req, u8 instance,
2464 struct adv_info *adv_instance, *n, *next_instance = NULL;
2468 /* Cancel any timeout concerning the removed instance(s). */
2469 if (!instance || hdev->cur_adv_instance == instance)
2470 cancel_adv_timeout(hdev);
2472 /* Get the next instance to advertise BEFORE we remove
2473 * the current one. This can be the same instance again
2474 * if there is only one instance.
2476 if (instance && hdev->cur_adv_instance == instance)
2477 next_instance = hci_get_next_instance(hdev, instance);
2479 if (instance == 0x00) {
2480 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2482 if (!(force || adv_instance->timeout))
2485 rem_inst = adv_instance->instance;
2486 err = hci_remove_adv_instance(hdev, rem_inst);
2488 mgmt_advertising_removed(sk, hdev, rem_inst);
2491 adv_instance = hci_find_adv_instance(hdev, instance);
2493 if (force || (adv_instance && adv_instance->timeout &&
2494 !adv_instance->remaining_time)) {
2495 /* Don't advertise a removed instance. */
2496 if (next_instance &&
2497 next_instance->instance == instance)
2498 next_instance = NULL;
2500 err = hci_remove_adv_instance(hdev, instance);
2502 mgmt_advertising_removed(sk, hdev, instance);
2506 if (!req || !hdev_is_powered(hdev) ||
2507 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2510 if (next_instance && !ext_adv_capable(hdev))
2511 __hci_req_schedule_adv_instance(req, next_instance->instance,
2515 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2517 struct hci_dev *hdev = req->hdev;
2519 /* If we're advertising or initiating an LE connection we can't
2520 * go ahead and change the random address at this time. This is
2521 * because the eventual initiator address used for the
2522 * subsequently created connection will be undefined (some
2523 * controllers use the new address and others the one we had
2524 * when the operation started).
2526 * In this kind of scenario skip the update and let the random
2527 * address be updated at the next cycle.
2529 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2530 hci_lookup_le_connect(hdev)) {
2531 bt_dev_dbg(hdev, "Deferring random address update");
2532 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2536 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2539 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2540 bool use_rpa, u8 *own_addr_type)
2542 struct hci_dev *hdev = req->hdev;
2545 /* If privacy is enabled use a resolvable private address. If
2546 * current RPA has expired or there is something else than
2547 * the current RPA in use, then generate a new one.
2552 /* If Controller supports LL Privacy use own address type is
2555 if (use_ll_privacy(hdev) &&
2556 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2557 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2559 *own_addr_type = ADDR_LE_DEV_RANDOM;
2561 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2562 !bacmp(&hdev->random_addr, &hdev->rpa))
2565 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2567 bt_dev_err(hdev, "failed to generate new RPA");
2571 set_random_addr(req, &hdev->rpa);
2573 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2574 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2579 /* In case of required privacy without resolvable private address,
2580 * use an non-resolvable private address. This is useful for active
2581 * scanning and non-connectable advertising.
2583 if (require_privacy) {
2587 /* The non-resolvable private address is generated
2588 * from random six bytes with the two most significant
2591 get_random_bytes(&nrpa, 6);
2594 /* The non-resolvable private address shall not be
2595 * equal to the public address.
2597 if (bacmp(&hdev->bdaddr, &nrpa))
2601 *own_addr_type = ADDR_LE_DEV_RANDOM;
2602 set_random_addr(req, &nrpa);
2606 /* If forcing static address is in use or there is no public
2607 * address use the static address as random address (but skip
2608 * the HCI command if the current random address is already the
2611 * In case BR/EDR has been disabled on a dual-mode controller
2612 * and a static address has been configured, then use that
2613 * address instead of the public BR/EDR address.
2615 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2616 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2617 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2618 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2619 *own_addr_type = ADDR_LE_DEV_RANDOM;
2620 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2621 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2622 &hdev->static_addr);
2626 /* Neither privacy nor static address is being used so use a
2629 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2634 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2636 struct bdaddr_list *b;
2638 list_for_each_entry(b, &hdev->accept_list, list) {
2639 struct hci_conn *conn;
2641 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2645 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2652 void __hci_req_update_scan(struct hci_request *req)
2654 struct hci_dev *hdev = req->hdev;
2657 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2660 if (!hdev_is_powered(hdev))
2663 if (mgmt_powering_down(hdev))
2666 if (hdev->scanning_paused)
2669 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2670 disconnected_accept_list_entries(hdev))
2673 scan = SCAN_DISABLED;
2675 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2676 scan |= SCAN_INQUIRY;
2678 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2679 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2682 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2685 static int update_scan(struct hci_request *req, unsigned long opt)
2687 hci_dev_lock(req->hdev);
2688 __hci_req_update_scan(req);
2689 hci_dev_unlock(req->hdev);
2693 static void scan_update_work(struct work_struct *work)
2695 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2697 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2700 static int connectable_update(struct hci_request *req, unsigned long opt)
2702 struct hci_dev *hdev = req->hdev;
2706 __hci_req_update_scan(req);
2708 /* If BR/EDR is not enabled and we disable advertising as a
2709 * by-product of disabling connectable, we need to update the
2710 * advertising flags.
2712 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2713 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2715 /* Update the advertising parameters if necessary */
2716 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2717 !list_empty(&hdev->adv_instances)) {
2718 if (ext_adv_capable(hdev))
2719 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2721 __hci_req_enable_advertising(req);
2724 __hci_update_background_scan(req);
2726 hci_dev_unlock(hdev);
2731 static void connectable_update_work(struct work_struct *work)
2733 struct hci_dev *hdev = container_of(work, struct hci_dev,
2734 connectable_update);
2737 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2738 mgmt_set_connectable_complete(hdev, status);
2741 static u8 get_service_classes(struct hci_dev *hdev)
2743 struct bt_uuid *uuid;
2746 list_for_each_entry(uuid, &hdev->uuids, list)
2747 val |= uuid->svc_hint;
2752 void __hci_req_update_class(struct hci_request *req)
2754 struct hci_dev *hdev = req->hdev;
2757 bt_dev_dbg(hdev, "");
2759 if (!hdev_is_powered(hdev))
2762 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2765 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2768 cod[0] = hdev->minor_class;
2769 cod[1] = hdev->major_class;
2770 cod[2] = get_service_classes(hdev);
2772 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2775 if (memcmp(cod, hdev->dev_class, 3) == 0)
2778 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2781 static void write_iac(struct hci_request *req)
2783 struct hci_dev *hdev = req->hdev;
2784 struct hci_cp_write_current_iac_lap cp;
2786 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2789 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2790 /* Limited discoverable mode */
2791 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2792 cp.iac_lap[0] = 0x00; /* LIAC */
2793 cp.iac_lap[1] = 0x8b;
2794 cp.iac_lap[2] = 0x9e;
2795 cp.iac_lap[3] = 0x33; /* GIAC */
2796 cp.iac_lap[4] = 0x8b;
2797 cp.iac_lap[5] = 0x9e;
2799 /* General discoverable mode */
2801 cp.iac_lap[0] = 0x33; /* GIAC */
2802 cp.iac_lap[1] = 0x8b;
2803 cp.iac_lap[2] = 0x9e;
2806 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2807 (cp.num_iac * 3) + 1, &cp);
2810 static int discoverable_update(struct hci_request *req, unsigned long opt)
2812 struct hci_dev *hdev = req->hdev;
2816 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2818 __hci_req_update_scan(req);
2819 __hci_req_update_class(req);
2822 /* Advertising instances don't use the global discoverable setting, so
2823 * only update AD if advertising was enabled using Set Advertising.
2825 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2826 __hci_req_update_adv_data(req, 0x00);
2828 /* Discoverable mode affects the local advertising
2829 * address in limited privacy mode.
2831 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2832 if (ext_adv_capable(hdev))
2833 __hci_req_start_ext_adv(req, 0x00);
2835 __hci_req_enable_advertising(req);
2839 hci_dev_unlock(hdev);
2844 static void discoverable_update_work(struct work_struct *work)
2846 struct hci_dev *hdev = container_of(work, struct hci_dev,
2847 discoverable_update);
2850 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2851 mgmt_set_discoverable_complete(hdev, status);
2854 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2857 switch (conn->state) {
2860 if (conn->type == AMP_LINK) {
2861 struct hci_cp_disconn_phy_link cp;
2863 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2865 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2868 struct hci_cp_disconnect dc;
2870 dc.handle = cpu_to_le16(conn->handle);
2872 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2875 conn->state = BT_DISCONN;
2879 if (conn->type == LE_LINK) {
2880 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2882 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2884 } else if (conn->type == ACL_LINK) {
2885 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2887 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2892 if (conn->type == ACL_LINK) {
2893 struct hci_cp_reject_conn_req rej;
2895 bacpy(&rej.bdaddr, &conn->dst);
2896 rej.reason = reason;
2898 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2900 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2901 struct hci_cp_reject_sync_conn_req rej;
2903 bacpy(&rej.bdaddr, &conn->dst);
2905 /* SCO rejection has its own limited set of
2906 * allowed error values (0x0D-0x0F) which isn't
2907 * compatible with most values passed to this
2908 * function. To be safe hard-code one of the
2909 * values that's suitable for SCO.
2911 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2913 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2918 conn->state = BT_CLOSED;
2923 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2926 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2929 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2931 struct hci_request req;
2934 hci_req_init(&req, conn->hdev);
2936 __hci_abort_conn(&req, conn, reason);
2938 err = hci_req_run(&req, abort_conn_complete);
2939 if (err && err != -ENODATA) {
2940 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2947 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2949 hci_dev_lock(req->hdev);
2950 __hci_update_background_scan(req);
2951 hci_dev_unlock(req->hdev);
2955 static void bg_scan_update(struct work_struct *work)
2957 struct hci_dev *hdev = container_of(work, struct hci_dev,
2959 struct hci_conn *conn;
2963 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2969 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2971 hci_le_conn_failed(conn, status);
2973 hci_dev_unlock(hdev);
2976 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2978 hci_req_add_le_scan_disable(req, false);
2982 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2985 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2986 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2987 struct hci_cp_inquiry cp;
2989 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2992 bt_dev_dbg(req->hdev, "");
2994 hci_dev_lock(req->hdev);
2995 hci_inquiry_cache_flush(req->hdev);
2996 hci_dev_unlock(req->hdev);
2998 memset(&cp, 0, sizeof(cp));
3000 if (req->hdev->discovery.limited)
3001 memcpy(&cp.lap, liac, sizeof(cp.lap));
3003 memcpy(&cp.lap, giac, sizeof(cp.lap));
3007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3012 static void le_scan_disable_work(struct work_struct *work)
3014 struct hci_dev *hdev = container_of(work, struct hci_dev,
3015 le_scan_disable.work);
3018 bt_dev_dbg(hdev, "");
3020 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3023 cancel_delayed_work(&hdev->le_scan_restart);
3025 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3027 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3032 hdev->discovery.scan_start = 0;
3034 /* If we were running LE only scan, change discovery state. If
3035 * we were running both LE and BR/EDR inquiry simultaneously,
3036 * and BR/EDR inquiry is already finished, stop discovery,
3037 * otherwise BR/EDR inquiry will stop discovery when finished.
3038 * If we will resolve remote device name, do not change
3042 if (hdev->discovery.type == DISCOV_TYPE_LE)
3043 goto discov_stopped;
3045 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3048 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3049 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3050 hdev->discovery.state != DISCOVERY_RESOLVING)
3051 goto discov_stopped;
3056 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3057 HCI_CMD_TIMEOUT, &status);
3059 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3060 goto discov_stopped;
3067 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3068 hci_dev_unlock(hdev);
3071 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3073 struct hci_dev *hdev = req->hdev;
3075 /* If controller is not scanning we are done. */
3076 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3079 if (hdev->scanning_paused) {
3080 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3084 hci_req_add_le_scan_disable(req, false);
3086 if (use_ext_scan(hdev)) {
3087 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3089 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3090 ext_enable_cp.enable = LE_SCAN_ENABLE;
3091 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3093 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3094 sizeof(ext_enable_cp), &ext_enable_cp);
3096 struct hci_cp_le_set_scan_enable cp;
3098 memset(&cp, 0, sizeof(cp));
3099 cp.enable = LE_SCAN_ENABLE;
3100 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3101 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3107 static void le_scan_restart_work(struct work_struct *work)
3109 struct hci_dev *hdev = container_of(work, struct hci_dev,
3110 le_scan_restart.work);
3111 unsigned long timeout, duration, scan_start, now;
3114 bt_dev_dbg(hdev, "");
3116 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3118 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3125 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3126 !hdev->discovery.scan_start)
3129 /* When the scan was started, hdev->le_scan_disable has been queued
3130 * after duration from scan_start. During scan restart this job
3131 * has been canceled, and we need to queue it again after proper
3132 * timeout, to make sure that scan does not run indefinitely.
3134 duration = hdev->discovery.scan_duration;
3135 scan_start = hdev->discovery.scan_start;
3137 if (now - scan_start <= duration) {
3140 if (now >= scan_start)
3141 elapsed = now - scan_start;
3143 elapsed = ULONG_MAX - scan_start + now;
3145 timeout = duration - elapsed;
3150 queue_delayed_work(hdev->req_workqueue,
3151 &hdev->le_scan_disable, timeout);
3154 hci_dev_unlock(hdev);
3157 static int active_scan(struct hci_request *req, unsigned long opt)
3159 uint16_t interval = opt;
3160 struct hci_dev *hdev = req->hdev;
3162 /* Accept list is not used for discovery */
3163 u8 filter_policy = 0x00;
3164 /* Default is to enable duplicates filter */
3165 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3166 /* Discovery doesn't require controller address resolution */
3167 bool addr_resolv = false;
3170 bt_dev_dbg(hdev, "");
3172 /* If controller is scanning, it means the background scanning is
3173 * running. Thus, we should temporarily stop it in order to set the
3174 * discovery scanning parameters.
3176 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3177 hci_req_add_le_scan_disable(req, false);
3178 cancel_interleave_scan(hdev);
3181 /* All active scans will be done with either a resolvable private
3182 * address (when privacy feature has been enabled) or non-resolvable
3185 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3188 own_addr_type = ADDR_LE_DEV_PUBLIC;
3190 if (hci_is_adv_monitoring(hdev)) {
3191 /* Duplicate filter should be disabled when some advertisement
3192 * monitor is activated, otherwise AdvMon can only receive one
3193 * advertisement for one peer(*) during active scanning, and
3194 * might report loss to these peers.
3196 * Note that different controllers have different meanings of
3197 * |duplicate|. Some of them consider packets with the same
3198 * address as duplicate, and others consider packets with the
3199 * same address and the same RSSI as duplicate. Although in the
3200 * latter case we don't need to disable duplicate filter, but
3201 * it is common to have active scanning for a short period of
3202 * time, the power impact should be neglectable.
3204 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
3207 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3208 hdev->le_scan_window_discovery, own_addr_type,
3209 filter_policy, filter_dup, addr_resolv);
3213 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3217 bt_dev_dbg(req->hdev, "");
3219 err = active_scan(req, opt);
3223 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3226 static void start_discovery(struct hci_dev *hdev, u8 *status)
3228 unsigned long timeout;
3230 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3232 switch (hdev->discovery.type) {
3233 case DISCOV_TYPE_BREDR:
3234 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3235 hci_req_sync(hdev, bredr_inquiry,
3236 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3239 case DISCOV_TYPE_INTERLEAVED:
3240 /* When running simultaneous discovery, the LE scanning time
3241 * should occupy the whole discovery time sine BR/EDR inquiry
3242 * and LE scanning are scheduled by the controller.
3244 * For interleaving discovery in comparison, BR/EDR inquiry
3245 * and LE scanning are done sequentially with separate
3248 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3250 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3251 /* During simultaneous discovery, we double LE scan
3252 * interval. We must leave some time for the controller
3253 * to do BR/EDR inquiry.
3255 hci_req_sync(hdev, interleaved_discov,
3256 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3261 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3262 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3263 HCI_CMD_TIMEOUT, status);
3265 case DISCOV_TYPE_LE:
3266 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3267 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3268 HCI_CMD_TIMEOUT, status);
3271 *status = HCI_ERROR_UNSPECIFIED;
3278 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3280 /* When service discovery is used and the controller has a
3281 * strict duplicate filter, it is important to remember the
3282 * start and duration of the scan. This is required for
3283 * restarting scanning during the discovery phase.
3285 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3286 hdev->discovery.result_filtering) {
3287 hdev->discovery.scan_start = jiffies;
3288 hdev->discovery.scan_duration = timeout;
3291 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3295 bool hci_req_stop_discovery(struct hci_request *req)
3297 struct hci_dev *hdev = req->hdev;
3298 struct discovery_state *d = &hdev->discovery;
3299 struct hci_cp_remote_name_req_cancel cp;
3300 struct inquiry_entry *e;
3303 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3305 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3306 if (test_bit(HCI_INQUIRY, &hdev->flags))
3307 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3309 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3310 cancel_delayed_work(&hdev->le_scan_disable);
3311 cancel_delayed_work(&hdev->le_scan_restart);
3312 hci_req_add_le_scan_disable(req, false);
3317 /* Passive scanning */
3318 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3319 hci_req_add_le_scan_disable(req, false);
3324 /* No further actions needed for LE-only discovery */
3325 if (d->type == DISCOV_TYPE_LE)
3328 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3329 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3334 bacpy(&cp.bdaddr, &e->data.bdaddr);
3335 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3343 static int stop_discovery(struct hci_request *req, unsigned long opt)
3345 hci_dev_lock(req->hdev);
3346 hci_req_stop_discovery(req);
3347 hci_dev_unlock(req->hdev);
3352 static void discov_update(struct work_struct *work)
3354 struct hci_dev *hdev = container_of(work, struct hci_dev,
3358 switch (hdev->discovery.state) {
3359 case DISCOVERY_STARTING:
3360 start_discovery(hdev, &status);
3361 mgmt_start_discovery_complete(hdev, status);
3363 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3365 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3367 case DISCOVERY_STOPPING:
3368 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3369 mgmt_stop_discovery_complete(hdev, status);
3371 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3373 case DISCOVERY_STOPPED:
3379 static void discov_off(struct work_struct *work)
3381 struct hci_dev *hdev = container_of(work, struct hci_dev,
3384 bt_dev_dbg(hdev, "");
3388 /* When discoverable timeout triggers, then just make sure
3389 * the limited discoverable flag is cleared. Even in the case
3390 * of a timeout triggered from general discoverable, it is
3391 * safe to unconditionally clear the flag.
3393 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3394 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3395 hdev->discov_timeout = 0;
3397 hci_dev_unlock(hdev);
3399 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3400 mgmt_new_settings(hdev);
3403 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3405 struct hci_dev *hdev = req->hdev;
3410 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3411 !lmp_host_ssp_capable(hdev)) {
3414 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3416 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3419 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3420 sizeof(support), &support);
3424 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3425 lmp_bredr_capable(hdev)) {
3426 struct hci_cp_write_le_host_supported cp;
3431 /* Check first if we already have the right
3432 * host state (host features set)
3434 if (cp.le != lmp_host_le_capable(hdev) ||
3435 cp.simul != lmp_host_le_br_capable(hdev))
3436 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3440 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3441 /* Make sure the controller has a good default for
3442 * advertising data. This also applies to the case
3443 * where BR/EDR was toggled during the AUTO_OFF phase.
3445 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3446 list_empty(&hdev->adv_instances)) {
3449 if (ext_adv_capable(hdev)) {
3450 err = __hci_req_setup_ext_adv_instance(req,
3453 __hci_req_update_scan_rsp_data(req,
3457 __hci_req_update_adv_data(req, 0x00);
3458 __hci_req_update_scan_rsp_data(req, 0x00);
3461 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3462 if (!ext_adv_capable(hdev))
3463 __hci_req_enable_advertising(req);
3465 __hci_req_enable_ext_advertising(req,
3468 } else if (!list_empty(&hdev->adv_instances)) {
3469 struct adv_info *adv_instance;
3471 adv_instance = list_first_entry(&hdev->adv_instances,
3472 struct adv_info, list);
3473 __hci_req_schedule_adv_instance(req,
3474 adv_instance->instance,
3479 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3480 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3481 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3482 sizeof(link_sec), &link_sec);
3484 if (lmp_bredr_capable(hdev)) {
3485 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3486 __hci_req_write_fast_connectable(req, true);
3488 __hci_req_write_fast_connectable(req, false);
3489 __hci_req_update_scan(req);
3490 __hci_req_update_class(req);
3491 __hci_req_update_name(req);
3492 __hci_req_update_eir(req);
3495 hci_dev_unlock(hdev);
3499 int __hci_req_hci_power_on(struct hci_dev *hdev)
3501 /* Register the available SMP channels (BR/EDR and LE) only when
3502 * successfully powering on the controller. This late
3503 * registration is required so that LE SMP can clearly decide if
3504 * the public address or static address is used.
3508 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3512 void hci_request_setup(struct hci_dev *hdev)
3514 INIT_WORK(&hdev->discov_update, discov_update);
3515 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3516 INIT_WORK(&hdev->scan_update, scan_update_work);
3517 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3518 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3519 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3520 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3521 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3522 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3523 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3526 void hci_request_cancel_all(struct hci_dev *hdev)
3528 hci_req_sync_cancel(hdev, ENODEV);
3530 cancel_work_sync(&hdev->discov_update);
3531 cancel_work_sync(&hdev->bg_scan_update);
3532 cancel_work_sync(&hdev->scan_update);
3533 cancel_work_sync(&hdev->connectable_update);
3534 cancel_work_sync(&hdev->discoverable_update);
3535 cancel_delayed_work_sync(&hdev->discov_off);
3536 cancel_delayed_work_sync(&hdev->le_scan_disable);
3537 cancel_delayed_work_sync(&hdev->le_scan_restart);
3539 if (hdev->adv_instance_timeout) {
3540 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3541 hdev->adv_instance_timeout = 0;
3544 cancel_interleave_scan(hdev);