2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
34 #define HCI_REQ_DONE 0
35 #define HCI_REQ_PEND 1
36 #define HCI_REQ_CANCELED 2
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 skb_queue_head_init(&req->cmd_q);
45 void hci_req_purge(struct hci_request *req)
47 skb_queue_purge(&req->cmd_q);
50 bool hci_req_status_pend(struct hci_dev *hdev)
52 return hdev->req_status == HCI_REQ_PEND;
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
58 struct hci_dev *hdev = req->hdev;
62 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
68 skb_queue_purge(&req->cmd_q);
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
76 skb = skb_peek_tail(&req->cmd_q);
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88 queue_work(hdev->workqueue, &hdev->cmd_work);
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95 return req_run(req, complete, NULL);
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100 return req_run(req, NULL, complete);
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
131 struct hci_request req;
135 bt_dev_dbg(hdev, "");
137 hci_req_init(&req, hdev);
139 hci_req_add_ev(&req, opcode, plen, param, event);
141 hdev->req_status = HCI_REQ_PEND;
143 err = hci_req_run_skb(&req, hci_req_sync_complete);
147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
150 if (err == -ERESTARTSYS)
151 return ERR_PTR(-EINTR);
153 switch (hdev->req_status) {
155 err = -bt_to_errno(hdev->req_result);
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
167 hdev->req_status = hdev->req_result = 0;
169 hdev->req_skb = NULL;
171 bt_dev_dbg(hdev, "end: err %d", err);
179 return ERR_PTR(-ENODATA);
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 EXPORT_SYMBOL(__hci_cmd_sync);
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt, u32 timeout, u8 *hci_status)
197 struct hci_request req;
200 bt_dev_dbg(hdev, "start");
202 hci_req_init(&req, hdev);
204 hdev->req_status = HCI_REQ_PEND;
206 err = func(&req, opt);
209 *hci_status = HCI_ERROR_UNSPECIFIED;
213 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 hdev->req_status = 0;
217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
222 if (err == -ENODATA) {
229 *hci_status = HCI_ERROR_UNSPECIFIED;
234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
237 if (err == -ERESTARTSYS)
240 switch (hdev->req_status) {
242 err = -bt_to_errno(hdev->req_result);
244 *hci_status = hdev->req_result;
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
250 *hci_status = HCI_ERROR_UNSPECIFIED;
256 *hci_status = HCI_ERROR_UNSPECIFIED;
260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
262 hdev->req_status = hdev->req_result = 0;
264 bt_dev_dbg(hdev, "end: err %d", err);
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt, u32 timeout, u8 *hci_status)
275 if (!test_bit(HCI_UP, &hdev->flags))
278 /* Serialize all requests */
279 hci_req_sync_lock(hdev);
280 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
281 hci_req_sync_unlock(hdev);
286 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
289 int len = HCI_COMMAND_HDR_SIZE + plen;
290 struct hci_command_hdr *hdr;
293 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
298 hdr->opcode = cpu_to_le16(opcode);
302 skb_put_data(skb, param, plen);
304 bt_dev_dbg(hdev, "skb len %d", skb->len);
306 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307 hci_skb_opcode(skb) = opcode;
312 /* Queue a command to an asynchronous HCI request */
313 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314 const void *param, u8 event)
316 struct hci_dev *hdev = req->hdev;
319 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
321 /* If an error occurred during request building, there is no point in
322 * queueing the HCI command. We can simply return.
327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
329 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
335 if (skb_queue_empty(&req->cmd_q))
336 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
338 bt_cb(skb)->hci.req_event = event;
340 skb_queue_tail(&req->cmd_q, skb);
343 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
346 hci_req_add_ev(req, opcode, plen, param, 0);
349 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
351 struct hci_dev *hdev = req->hdev;
352 struct hci_cp_write_page_scan_activity acp;
355 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
358 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 type = PAGE_SCAN_TYPE_INTERLACED;
364 /* 160 msec page scan interval */
365 acp.interval = cpu_to_le16(0x0100);
367 type = hdev->def_page_scan_type;
368 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
371 acp.window = cpu_to_le16(hdev->def_page_scan_window);
373 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
374 __cpu_to_le16(hdev->page_scan_window) != acp.window)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
378 if (hdev->page_scan_type != type)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
382 static void start_interleave_scan(struct hci_dev *hdev)
384 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
385 queue_delayed_work(hdev->req_workqueue,
386 &hdev->interleave_scan, 0);
389 static bool is_interleave_scanning(struct hci_dev *hdev)
391 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
394 static void cancel_interleave_scan(struct hci_dev *hdev)
396 bt_dev_dbg(hdev, "cancelling interleave scan");
398 cancel_delayed_work_sync(&hdev->interleave_scan);
400 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
403 /* Return true if interleave_scan wasn't started until exiting this function,
404 * otherwise, return false
406 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
408 /* Do interleaved scan only if all of the following are true:
409 * - There is at least one ADV monitor
410 * - At least one pending LE connection or one device to be scanned for
411 * - Monitor offloading is not supported
412 * If so, we should alternate between allowlist scan and one without
413 * any filters to save power.
415 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416 !(list_empty(&hdev->pend_le_conns) &&
417 list_empty(&hdev->pend_le_reports)) &&
418 hci_get_adv_monitor_offload_ext(hdev) ==
419 HCI_ADV_MONITOR_EXT_NONE;
420 bool is_interleaving = is_interleave_scanning(hdev);
422 if (use_interleaving && !is_interleaving) {
423 start_interleave_scan(hdev);
424 bt_dev_dbg(hdev, "starting interleave scan");
428 if (!use_interleaving && is_interleaving)
429 cancel_interleave_scan(hdev);
434 /* This function controls the background scanning based on hdev->pend_le_conns
435 * list. If there are pending LE connection we start the background scanning,
436 * otherwise we stop it.
438 * This function requires the caller holds hdev->lock.
440 static void __hci_update_background_scan(struct hci_request *req)
442 struct hci_dev *hdev = req->hdev;
444 if (!test_bit(HCI_UP, &hdev->flags) ||
445 test_bit(HCI_INIT, &hdev->flags) ||
446 hci_dev_test_flag(hdev, HCI_SETUP) ||
447 hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 hci_dev_test_flag(hdev, HCI_UNREGISTER))
452 /* No point in doing scanning if LE support hasn't been enabled */
453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
456 /* If discovery is active don't interfere with it */
457 if (hdev->discovery.state != DISCOVERY_STOPPED)
460 /* Reset RSSI and UUID filters when starting background scanning
461 * since these filters are meant for service discovery only.
463 * The Start Discovery and Start Service Discovery operations
464 * ensure to set proper values for RSSI threshold and UUID
465 * filter list. So it is safe to just reset them here.
467 hci_discovery_filter_clear(hdev);
469 bt_dev_dbg(hdev, "ADV monitoring is %s",
470 hci_is_adv_monitoring(hdev) ? "on" : "off");
472 if (list_empty(&hdev->pend_le_conns) &&
473 list_empty(&hdev->pend_le_reports) &&
474 !hci_is_adv_monitoring(hdev)) {
475 /* If there is no pending LE connections or devices
476 * to be scanned for or no ADV monitors, we should stop the
477 * background scanning.
480 /* If controller is not scanning we are done. */
481 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
484 hci_req_add_le_scan_disable(req, false);
486 bt_dev_dbg(hdev, "stopping background scanning");
488 /* If there is at least one pending LE connection, we should
489 * keep the background scan running.
492 /* If controller is connecting, we should not start scanning
493 * since some controllers are not able to scan and connect at
496 if (hci_lookup_le_connect(hdev))
499 /* If controller is currently scanning, we stop it to ensure we
500 * don't miss any advertising (due to duplicates filter).
502 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
503 hci_req_add_le_scan_disable(req, false);
505 hci_req_add_le_passive_scan(req);
506 bt_dev_dbg(hdev, "starting background scanning");
510 void __hci_req_update_name(struct hci_request *req)
512 struct hci_dev *hdev = req->hdev;
513 struct hci_cp_write_local_name cp;
515 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
517 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
520 #define PNP_INFO_SVCLASS_ID 0x1200
522 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
524 u8 *ptr = data, *uuids_start = NULL;
525 struct bt_uuid *uuid;
530 list_for_each_entry(uuid, &hdev->uuids, list) {
533 if (uuid->size != 16)
536 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
540 if (uuid16 == PNP_INFO_SVCLASS_ID)
546 uuids_start[1] = EIR_UUID16_ALL;
550 /* Stop if not enough space to put next UUID */
551 if ((ptr - data) + sizeof(u16) > len) {
552 uuids_start[1] = EIR_UUID16_SOME;
556 *ptr++ = (uuid16 & 0x00ff);
557 *ptr++ = (uuid16 & 0xff00) >> 8;
558 uuids_start[0] += sizeof(uuid16);
564 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
566 u8 *ptr = data, *uuids_start = NULL;
567 struct bt_uuid *uuid;
572 list_for_each_entry(uuid, &hdev->uuids, list) {
573 if (uuid->size != 32)
579 uuids_start[1] = EIR_UUID32_ALL;
583 /* Stop if not enough space to put next UUID */
584 if ((ptr - data) + sizeof(u32) > len) {
585 uuids_start[1] = EIR_UUID32_SOME;
589 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
591 uuids_start[0] += sizeof(u32);
597 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
599 u8 *ptr = data, *uuids_start = NULL;
600 struct bt_uuid *uuid;
605 list_for_each_entry(uuid, &hdev->uuids, list) {
606 if (uuid->size != 128)
612 uuids_start[1] = EIR_UUID128_ALL;
616 /* Stop if not enough space to put next UUID */
617 if ((ptr - data) + 16 > len) {
618 uuids_start[1] = EIR_UUID128_SOME;
622 memcpy(ptr, uuid->uuid, 16);
624 uuids_start[0] += 16;
630 static void create_eir(struct hci_dev *hdev, u8 *data)
635 name_len = strlen(hdev->dev_name);
641 ptr[1] = EIR_NAME_SHORT;
643 ptr[1] = EIR_NAME_COMPLETE;
645 /* EIR Data length */
646 ptr[0] = name_len + 1;
648 memcpy(ptr + 2, hdev->dev_name, name_len);
650 ptr += (name_len + 2);
653 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
655 ptr[1] = EIR_TX_POWER;
656 ptr[2] = (u8) hdev->inq_tx_power;
661 if (hdev->devid_source > 0) {
663 ptr[1] = EIR_DEVICE_ID;
665 put_unaligned_le16(hdev->devid_source, ptr + 2);
666 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 put_unaligned_le16(hdev->devid_product, ptr + 6);
668 put_unaligned_le16(hdev->devid_version, ptr + 8);
673 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678 void __hci_req_update_eir(struct hci_request *req)
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_write_eir cp;
683 if (!hdev_is_powered(hdev))
686 if (!lmp_ext_inq_capable(hdev))
689 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
692 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
695 memset(&cp, 0, sizeof(cp));
697 create_eir(hdev, cp.data);
699 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
702 memcpy(hdev->eir, cp.data, sizeof(cp.data));
704 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
707 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
709 struct hci_dev *hdev = req->hdev;
711 if (hdev->scanning_paused) {
712 bt_dev_dbg(hdev, "Scanning is paused for suspend");
717 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
719 if (use_ext_scan(hdev)) {
720 struct hci_cp_le_set_ext_scan_enable cp;
722 memset(&cp, 0, sizeof(cp));
723 cp.enable = LE_SCAN_DISABLE;
724 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
727 struct hci_cp_le_set_scan_enable cp;
729 memset(&cp, 0, sizeof(cp));
730 cp.enable = LE_SCAN_DISABLE;
731 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
734 /* Disable address resolution */
735 if (use_ll_privacy(hdev) &&
736 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
737 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
740 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
744 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
747 struct hci_cp_le_del_from_white_list cp;
749 cp.bdaddr_type = bdaddr_type;
750 bacpy(&cp.bdaddr, bdaddr);
752 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
754 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
756 if (use_ll_privacy(req->hdev) &&
757 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
760 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
762 struct hci_cp_le_del_from_resolv_list cp;
764 cp.bdaddr_type = bdaddr_type;
765 bacpy(&cp.bdaddr, bdaddr);
767 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
773 /* Adds connection to white list if needed. On error, returns -1. */
774 static int add_to_white_list(struct hci_request *req,
775 struct hci_conn_params *params, u8 *num_entries,
778 struct hci_cp_le_add_to_white_list cp;
779 struct hci_dev *hdev = req->hdev;
781 /* Already in white list */
782 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
786 /* Select filter policy to accept all advertising */
787 if (*num_entries >= hdev->le_white_list_size)
790 /* White list can not be used with RPAs */
792 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
793 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
797 /* During suspend, only wakeable devices can be in whitelist */
798 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
799 params->current_flags))
803 cp.bdaddr_type = params->addr_type;
804 bacpy(&cp.bdaddr, ¶ms->addr);
806 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
808 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
810 if (use_ll_privacy(hdev) &&
811 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
814 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
817 struct hci_cp_le_add_to_resolv_list cp;
819 cp.bdaddr_type = params->addr_type;
820 bacpy(&cp.bdaddr, ¶ms->addr);
821 memcpy(cp.peer_irk, irk->val, 16);
823 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
824 memcpy(cp.local_irk, hdev->irk, 16);
826 memset(cp.local_irk, 0, 16);
828 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
836 static u8 update_white_list(struct hci_request *req)
838 struct hci_dev *hdev = req->hdev;
839 struct hci_conn_params *params;
840 struct bdaddr_list *b;
842 bool pend_conn, pend_report;
843 /* We allow whitelisting even with RPAs in suspend. In the worst case,
844 * we won't be able to wake from devices that use the privacy1.2
845 * features. Additionally, once we support privacy1.2 and IRK
846 * offloading, we can update this to also check for those conditions.
848 bool allow_rpa = hdev->suspended;
850 /* Go through the current white list programmed into the
851 * controller one by one and check if that address is still
852 * in the list of pending connections or list of devices to
853 * report. If not present in either list, then queue the
854 * command to remove it from the controller.
856 list_for_each_entry(b, &hdev->le_white_list, list) {
857 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
860 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
864 /* If the device is not likely to connect or report,
865 * remove it from the whitelist.
867 if (!pend_conn && !pend_report) {
868 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
872 /* White list can not be used with RPAs */
874 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
875 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
882 /* Since all no longer valid white list entries have been
883 * removed, walk through the list of pending connections
884 * and ensure that any new device gets programmed into
887 * If the list of the devices is larger than the list of
888 * available white list entries in the controller, then
889 * just abort and return filer policy value to not use the
892 list_for_each_entry(params, &hdev->pend_le_conns, action) {
893 if (add_to_white_list(req, params, &num_entries, allow_rpa))
897 /* After adding all new pending connections, walk through
898 * the list of pending reports and also add these to the
899 * white list if there is still space. Abort if space runs out.
901 list_for_each_entry(params, &hdev->pend_le_reports, action) {
902 if (add_to_white_list(req, params, &num_entries, allow_rpa))
906 /* Use the allowlist unless the following conditions are all true:
907 * - We are not currently suspending
908 * - There are 1 or more ADV monitors registered and it's not offloaded
909 * - Interleaved scanning is not currently using the allowlist
911 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
912 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
913 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
916 /* Select filter policy to use white list */
920 static bool scan_use_rpa(struct hci_dev *hdev)
922 return hci_dev_test_flag(hdev, HCI_PRIVACY);
925 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
926 u16 window, u8 own_addr_type, u8 filter_policy,
929 struct hci_dev *hdev = req->hdev;
931 if (hdev->scanning_paused) {
932 bt_dev_dbg(hdev, "Scanning is paused for suspend");
936 if (use_ll_privacy(hdev) &&
937 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
941 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
944 /* Use ext scanning if set ext scan param and ext scan enable is
947 if (use_ext_scan(hdev)) {
948 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
949 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
950 struct hci_cp_le_scan_phy_params *phy_params;
951 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
954 ext_param_cp = (void *)data;
955 phy_params = (void *)ext_param_cp->data;
957 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
958 ext_param_cp->own_addr_type = own_addr_type;
959 ext_param_cp->filter_policy = filter_policy;
961 plen = sizeof(*ext_param_cp);
963 if (scan_1m(hdev) || scan_2m(hdev)) {
964 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
966 memset(phy_params, 0, sizeof(*phy_params));
967 phy_params->type = type;
968 phy_params->interval = cpu_to_le16(interval);
969 phy_params->window = cpu_to_le16(window);
971 plen += sizeof(*phy_params);
975 if (scan_coded(hdev)) {
976 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
978 memset(phy_params, 0, sizeof(*phy_params));
979 phy_params->type = type;
980 phy_params->interval = cpu_to_le16(interval);
981 phy_params->window = cpu_to_le16(window);
983 plen += sizeof(*phy_params);
987 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
990 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
991 ext_enable_cp.enable = LE_SCAN_ENABLE;
992 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
994 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
995 sizeof(ext_enable_cp), &ext_enable_cp);
997 struct hci_cp_le_set_scan_param param_cp;
998 struct hci_cp_le_set_scan_enable enable_cp;
1000 memset(¶m_cp, 0, sizeof(param_cp));
1001 param_cp.type = type;
1002 param_cp.interval = cpu_to_le16(interval);
1003 param_cp.window = cpu_to_le16(window);
1004 param_cp.own_address_type = own_addr_type;
1005 param_cp.filter_policy = filter_policy;
1006 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1009 memset(&enable_cp, 0, sizeof(enable_cp));
1010 enable_cp.enable = LE_SCAN_ENABLE;
1011 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1012 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1017 /* Returns true if an le connection is in the scanning state */
1018 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1020 struct hci_conn_hash *h = &hdev->conn_hash;
1025 list_for_each_entry_rcu(c, &h->list, list) {
1026 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1027 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1038 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1039 * controller based address resolution to be able to reconfigure
1042 void hci_req_add_le_passive_scan(struct hci_request *req)
1044 struct hci_dev *hdev = req->hdev;
1047 u16 window, interval;
1048 /* Background scanning should run with address resolution */
1049 bool addr_resolv = true;
1051 if (hdev->scanning_paused) {
1052 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1056 /* Set require_privacy to false since no SCAN_REQ are send
1057 * during passive scanning. Not using an non-resolvable address
1058 * here is important so that peer devices using direct
1059 * advertising with our address will be correctly reported
1060 * by the controller.
1062 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1066 if (hdev->enable_advmon_interleave_scan &&
1067 __hci_update_interleaved_scan(hdev))
1070 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1071 /* Adding or removing entries from the white list must
1072 * happen before enabling scanning. The controller does
1073 * not allow white list modification while scanning.
1075 filter_policy = update_white_list(req);
1077 /* When the controller is using random resolvable addresses and
1078 * with that having LE privacy enabled, then controllers with
1079 * Extended Scanner Filter Policies support can now enable support
1080 * for handling directed advertising.
1082 * So instead of using filter polices 0x00 (no whitelist)
1083 * and 0x01 (whitelist enabled) use the new filter policies
1084 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1086 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1087 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1088 filter_policy |= 0x02;
1090 if (hdev->suspended) {
1091 window = hdev->le_scan_window_suspend;
1092 interval = hdev->le_scan_int_suspend;
1094 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1095 } else if (hci_is_le_conn_scanning(hdev)) {
1096 window = hdev->le_scan_window_connect;
1097 interval = hdev->le_scan_int_connect;
1098 } else if (hci_is_adv_monitoring(hdev)) {
1099 window = hdev->le_scan_window_adv_monitor;
1100 interval = hdev->le_scan_int_adv_monitor;
1102 window = hdev->le_scan_window;
1103 interval = hdev->le_scan_interval;
1106 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1107 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1108 own_addr_type, filter_policy, addr_resolv);
1111 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1113 struct adv_info *adv_instance;
1115 /* Instance 0x00 always set local name */
1116 if (instance == 0x00)
1119 adv_instance = hci_find_adv_instance(hdev, instance);
1123 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1124 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1127 return adv_instance->scan_rsp_len ? true : false;
1130 static void hci_req_clear_event_filter(struct hci_request *req)
1132 struct hci_cp_set_event_filter f;
1134 memset(&f, 0, sizeof(f));
1135 f.flt_type = HCI_FLT_CLEAR_ALL;
1136 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1138 /* Update page scan state (since we may have modified it when setting
1139 * the event filter).
1141 __hci_req_update_scan(req);
1144 static void hci_req_set_event_filter(struct hci_request *req)
1146 struct bdaddr_list_with_flags *b;
1147 struct hci_cp_set_event_filter f;
1148 struct hci_dev *hdev = req->hdev;
1149 u8 scan = SCAN_DISABLED;
1151 /* Always clear event filter when starting */
1152 hci_req_clear_event_filter(req);
1154 list_for_each_entry(b, &hdev->whitelist, list) {
1155 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1159 memset(&f, 0, sizeof(f));
1160 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1161 f.flt_type = HCI_FLT_CONN_SETUP;
1162 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1163 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1165 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1166 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1171 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1173 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1175 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1178 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 if (hdev->adv_instance_timeout) {
1181 hdev->adv_instance_timeout = 0;
1182 cancel_delayed_work(&hdev->adv_instance_expire);
1186 /* This function requires the caller holds hdev->lock */
1187 void __hci_req_pause_adv_instances(struct hci_request *req)
1189 bt_dev_dbg(req->hdev, "Pausing advertising instances");
1191 /* Call to disable any advertisements active on the controller.
1192 * This will succeed even if no advertisements are configured.
1194 __hci_req_disable_advertising(req);
1196 /* If we are using software rotation, pause the loop */
1197 if (!ext_adv_capable(req->hdev))
1198 cancel_adv_timeout(req->hdev);
1201 /* This function requires the caller holds hdev->lock */
1202 static void __hci_req_resume_adv_instances(struct hci_request *req)
1204 struct adv_info *adv;
1206 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1208 if (ext_adv_capable(req->hdev)) {
1209 /* Call for each tracked instance to be re-enabled */
1210 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1211 __hci_req_enable_ext_advertising(req,
1216 /* Schedule for most recent instance to be restarted and begin
1217 * the software rotation loop
1219 __hci_req_schedule_adv_instance(req,
1220 req->hdev->cur_adv_instance,
1225 /* This function requires the caller holds hdev->lock */
1226 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1228 struct hci_request req;
1230 hci_req_init(&req, hdev);
1231 __hci_req_resume_adv_instances(&req);
1233 return hci_req_run(&req, NULL);
1236 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1238 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1240 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1241 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1242 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1243 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1244 wake_up(&hdev->suspend_wait_q);
1247 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1248 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1249 wake_up(&hdev->suspend_wait_q);
1253 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1256 struct hci_dev *hdev = req->hdev;
1258 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1259 case HCI_ADV_MONITOR_EXT_MSFT:
1260 msft_req_add_set_filter_enable(req, enable);
1266 /* No need to block when enabling since it's on resume path */
1267 if (hdev->suspended && !enable)
1268 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1271 /* Call with hci_dev_lock */
1272 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1275 struct hci_conn *conn;
1276 struct hci_request req;
1278 int disconnect_counter;
1280 if (next == hdev->suspend_state) {
1281 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1285 hdev->suspend_state = next;
1286 hci_req_init(&req, hdev);
1288 if (next == BT_SUSPEND_DISCONNECT) {
1289 /* Mark device as suspended */
1290 hdev->suspended = true;
1292 /* Pause discovery if not already stopped */
1293 old_state = hdev->discovery.state;
1294 if (old_state != DISCOVERY_STOPPED) {
1295 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1296 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1297 queue_work(hdev->req_workqueue, &hdev->discov_update);
1300 hdev->discovery_paused = true;
1301 hdev->discovery_old_state = old_state;
1303 /* Stop directed advertising */
1304 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1306 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1307 cancel_delayed_work(&hdev->discov_off);
1308 queue_delayed_work(hdev->req_workqueue,
1309 &hdev->discov_off, 0);
1312 /* Pause other advertisements */
1313 if (hdev->adv_instance_cnt)
1314 __hci_req_pause_adv_instances(&req);
1316 hdev->advertising_paused = true;
1317 hdev->advertising_old_state = old_state;
1318 /* Disable page scan */
1319 page_scan = SCAN_DISABLED;
1320 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1322 /* Disable LE passive scan if enabled */
1323 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1324 cancel_interleave_scan(hdev);
1325 hci_req_add_le_scan_disable(&req, false);
1328 /* Disable advertisement filters */
1329 hci_req_add_set_adv_filter_enable(&req, false);
1331 /* Mark task needing completion */
1332 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1334 /* Prevent disconnects from causing scanning to be re-enabled */
1335 hdev->scanning_paused = true;
1337 /* Run commands before disconnecting */
1338 hci_req_run(&req, suspend_req_complete);
1340 disconnect_counter = 0;
1341 /* Soft disconnect everything (power off) */
1342 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1343 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1344 disconnect_counter++;
1347 if (disconnect_counter > 0) {
1349 "Had %d disconnects. Will wait on them",
1350 disconnect_counter);
1351 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1353 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1354 /* Unpause to take care of updating scanning params */
1355 hdev->scanning_paused = false;
1356 /* Enable event filter for paired devices */
1357 hci_req_set_event_filter(&req);
1358 /* Enable passive scan at lower duty cycle */
1359 __hci_update_background_scan(&req);
1360 /* Pause scan changes again. */
1361 hdev->scanning_paused = true;
1362 hci_req_run(&req, suspend_req_complete);
1364 hdev->suspended = false;
1365 hdev->scanning_paused = false;
1367 hci_req_clear_event_filter(&req);
1368 /* Reset passive/background scanning to normal */
1369 __hci_update_background_scan(&req);
1370 /* Enable all of the advertisement filters */
1371 hci_req_add_set_adv_filter_enable(&req, true);
1373 /* Unpause directed advertising */
1374 hdev->advertising_paused = false;
1375 if (hdev->advertising_old_state) {
1376 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1377 hdev->suspend_tasks);
1378 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1379 queue_work(hdev->req_workqueue,
1380 &hdev->discoverable_update);
1381 hdev->advertising_old_state = 0;
1384 /* Resume other advertisements */
1385 if (hdev->adv_instance_cnt)
1386 __hci_req_resume_adv_instances(&req);
1388 /* Unpause discovery */
1389 hdev->discovery_paused = false;
1390 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1391 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1392 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1393 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1394 queue_work(hdev->req_workqueue, &hdev->discov_update);
1397 hci_req_run(&req, suspend_req_complete);
1400 hdev->suspend_state = next;
1403 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1404 wake_up(&hdev->suspend_wait_q);
1407 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1409 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1412 void __hci_req_disable_advertising(struct hci_request *req)
1414 if (ext_adv_capable(req->hdev)) {
1415 __hci_req_disable_ext_adv_instance(req, 0x00);
1420 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1424 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1427 struct adv_info *adv_instance;
1429 if (instance == 0x00) {
1430 /* Instance 0 always manages the "Tx Power" and "Flags"
1433 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1435 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1436 * corresponds to the "connectable" instance flag.
1438 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1439 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1441 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1442 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1443 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1444 flags |= MGMT_ADV_FLAG_DISCOV;
1449 adv_instance = hci_find_adv_instance(hdev, instance);
1451 /* Return 0 when we got an invalid instance identifier. */
1455 return adv_instance->flags;
1458 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1460 /* If privacy is not enabled don't use RPA */
1461 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1464 /* If basic privacy mode is enabled use RPA */
1465 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1468 /* If limited privacy mode is enabled don't use RPA if we're
1469 * both discoverable and bondable.
1471 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1472 hci_dev_test_flag(hdev, HCI_BONDABLE))
1475 /* We're neither bondable nor discoverable in the limited
1476 * privacy mode, therefore use RPA.
1481 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1483 /* If there is no connection we are OK to advertise. */
1484 if (hci_conn_num(hdev, LE_LINK) == 0)
1487 /* Check le_states if there is any connection in slave role. */
1488 if (hdev->conn_hash.le_num_slave > 0) {
1489 /* Slave connection state and non connectable mode bit 20. */
1490 if (!connectable && !(hdev->le_states[2] & 0x10))
1493 /* Slave connection state and connectable mode bit 38
1494 * and scannable bit 21.
1496 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1497 !(hdev->le_states[2] & 0x20)))
1501 /* Check le_states if there is any connection in master role. */
1502 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1503 /* Master connection state and non connectable mode bit 18. */
1504 if (!connectable && !(hdev->le_states[2] & 0x02))
1507 /* Master connection state and connectable mode bit 35 and
1510 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1511 !(hdev->le_states[2] & 0x08)))
1518 void __hci_req_enable_advertising(struct hci_request *req)
1520 struct hci_dev *hdev = req->hdev;
1521 struct adv_info *adv_instance;
1522 struct hci_cp_le_set_adv_param cp;
1523 u8 own_addr_type, enable = 0x01;
1525 u16 adv_min_interval, adv_max_interval;
1528 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1529 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1531 /* If the "connectable" instance flag was not set, then choose between
1532 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1534 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1535 mgmt_get_connectable(hdev);
1537 if (!is_advertising_allowed(hdev, connectable))
1540 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1541 __hci_req_disable_advertising(req);
1543 /* Clear the HCI_LE_ADV bit temporarily so that the
1544 * hci_update_random_address knows that it's safe to go ahead
1545 * and write a new random address. The flag will be set back on
1546 * as soon as the SET_ADV_ENABLE HCI command completes.
1548 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1550 /* Set require_privacy to true only when non-connectable
1551 * advertising is used. In that case it is fine to use a
1552 * non-resolvable private address.
1554 if (hci_update_random_address(req, !connectable,
1555 adv_use_rpa(hdev, flags),
1556 &own_addr_type) < 0)
1559 memset(&cp, 0, sizeof(cp));
1562 adv_min_interval = adv_instance->min_interval;
1563 adv_max_interval = adv_instance->max_interval;
1565 adv_min_interval = hdev->le_adv_min_interval;
1566 adv_max_interval = hdev->le_adv_max_interval;
1570 cp.type = LE_ADV_IND;
1572 if (adv_cur_instance_is_scannable(hdev))
1573 cp.type = LE_ADV_SCAN_IND;
1575 cp.type = LE_ADV_NONCONN_IND;
1577 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1578 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1579 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1580 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1584 cp.min_interval = cpu_to_le16(adv_min_interval);
1585 cp.max_interval = cpu_to_le16(adv_max_interval);
1586 cp.own_address_type = own_addr_type;
1587 cp.channel_map = hdev->le_adv_channel_map;
1589 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1591 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1594 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1597 size_t complete_len;
1599 /* no space left for name (+ NULL + type + len) */
1600 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1603 /* use complete name if present and fits */
1604 complete_len = strlen(hdev->dev_name);
1605 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1606 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1607 hdev->dev_name, complete_len + 1);
1609 /* use short name if present */
1610 short_len = strlen(hdev->short_name);
1612 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1613 hdev->short_name, short_len + 1);
1615 /* use shortened full name if present, we already know that name
1616 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1619 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1621 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1622 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1624 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1631 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1633 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1636 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1638 u8 scan_rsp_len = 0;
1640 if (hdev->appearance) {
1641 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1644 return append_local_name(hdev, ptr, scan_rsp_len);
1647 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1650 struct adv_info *adv_instance;
1652 u8 scan_rsp_len = 0;
1654 adv_instance = hci_find_adv_instance(hdev, instance);
1658 instance_flags = adv_instance->flags;
1660 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1661 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1664 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1665 adv_instance->scan_rsp_len);
1667 scan_rsp_len += adv_instance->scan_rsp_len;
1669 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1670 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1672 return scan_rsp_len;
1675 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1677 struct hci_dev *hdev = req->hdev;
1680 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1683 if (ext_adv_capable(hdev)) {
1684 struct hci_cp_le_set_ext_scan_rsp_data cp;
1686 memset(&cp, 0, sizeof(cp));
1689 len = create_instance_scan_rsp_data(hdev, instance,
1692 len = create_default_scan_rsp_data(hdev, cp.data);
1694 if (hdev->scan_rsp_data_len == len &&
1695 !memcmp(cp.data, hdev->scan_rsp_data, len))
1698 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1699 hdev->scan_rsp_data_len = len;
1701 cp.handle = instance;
1703 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1704 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1706 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1709 struct hci_cp_le_set_scan_rsp_data cp;
1711 memset(&cp, 0, sizeof(cp));
1714 len = create_instance_scan_rsp_data(hdev, instance,
1717 len = create_default_scan_rsp_data(hdev, cp.data);
1719 if (hdev->scan_rsp_data_len == len &&
1720 !memcmp(cp.data, hdev->scan_rsp_data, len))
1723 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1724 hdev->scan_rsp_data_len = len;
1728 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1732 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1734 struct adv_info *adv_instance = NULL;
1735 u8 ad_len = 0, flags = 0;
1738 /* Return 0 when the current instance identifier is invalid. */
1740 adv_instance = hci_find_adv_instance(hdev, instance);
1745 instance_flags = get_adv_instance_flags(hdev, instance);
1747 /* If instance already has the flags set skip adding it once
1750 if (adv_instance && eir_get_data(adv_instance->adv_data,
1751 adv_instance->adv_data_len, EIR_FLAGS,
1755 /* The Add Advertising command allows userspace to set both the general
1756 * and limited discoverable flags.
1758 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1759 flags |= LE_AD_GENERAL;
1761 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1762 flags |= LE_AD_LIMITED;
1764 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1765 flags |= LE_AD_NO_BREDR;
1767 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1768 /* If a discovery flag wasn't provided, simply use the global
1772 flags |= mgmt_get_adv_discov_flags(hdev);
1774 /* If flags would still be empty, then there is no need to
1775 * include the "Flags" AD field".
1789 memcpy(ptr, adv_instance->adv_data,
1790 adv_instance->adv_data_len);
1791 ad_len += adv_instance->adv_data_len;
1792 ptr += adv_instance->adv_data_len;
1795 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1798 if (ext_adv_capable(hdev)) {
1800 adv_tx_power = adv_instance->tx_power;
1802 adv_tx_power = hdev->adv_tx_power;
1804 adv_tx_power = hdev->adv_tx_power;
1807 /* Provide Tx Power only if we can provide a valid value for it */
1808 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1810 ptr[1] = EIR_TX_POWER;
1811 ptr[2] = (u8)adv_tx_power;
1821 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1823 struct hci_dev *hdev = req->hdev;
1826 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1829 if (ext_adv_capable(hdev)) {
1830 struct hci_cp_le_set_ext_adv_data cp;
1832 memset(&cp, 0, sizeof(cp));
1834 len = create_instance_adv_data(hdev, instance, cp.data);
1836 /* There's nothing to do if the data hasn't changed */
1837 if (hdev->adv_data_len == len &&
1838 memcmp(cp.data, hdev->adv_data, len) == 0)
1841 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1842 hdev->adv_data_len = len;
1845 cp.handle = instance;
1846 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1847 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1849 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1851 struct hci_cp_le_set_adv_data cp;
1853 memset(&cp, 0, sizeof(cp));
1855 len = create_instance_adv_data(hdev, instance, cp.data);
1857 /* There's nothing to do if the data hasn't changed */
1858 if (hdev->adv_data_len == len &&
1859 memcmp(cp.data, hdev->adv_data, len) == 0)
1862 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1863 hdev->adv_data_len = len;
1867 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1871 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1873 struct hci_request req;
1875 hci_req_init(&req, hdev);
1876 __hci_req_update_adv_data(&req, instance);
1878 return hci_req_run(&req, NULL);
1881 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1884 BT_DBG("%s status %u", hdev->name, status);
1887 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1889 struct hci_request req;
1892 if (!use_ll_privacy(hdev) &&
1893 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1896 hci_req_init(&req, hdev);
1898 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1900 hci_req_run(&req, enable_addr_resolution_complete);
1903 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1905 bt_dev_dbg(hdev, "status %u", status);
1908 void hci_req_reenable_advertising(struct hci_dev *hdev)
1910 struct hci_request req;
1912 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1913 list_empty(&hdev->adv_instances))
1916 hci_req_init(&req, hdev);
1918 if (hdev->cur_adv_instance) {
1919 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1922 if (ext_adv_capable(hdev)) {
1923 __hci_req_start_ext_adv(&req, 0x00);
1925 __hci_req_update_adv_data(&req, 0x00);
1926 __hci_req_update_scan_rsp_data(&req, 0x00);
1927 __hci_req_enable_advertising(&req);
1931 hci_req_run(&req, adv_enable_complete);
1934 static void adv_timeout_expire(struct work_struct *work)
1936 struct hci_dev *hdev = container_of(work, struct hci_dev,
1937 adv_instance_expire.work);
1939 struct hci_request req;
1942 bt_dev_dbg(hdev, "");
1946 hdev->adv_instance_timeout = 0;
1948 instance = hdev->cur_adv_instance;
1949 if (instance == 0x00)
1952 hci_req_init(&req, hdev);
1954 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1956 if (list_empty(&hdev->adv_instances))
1957 __hci_req_disable_advertising(&req);
1959 hci_req_run(&req, NULL);
1962 hci_dev_unlock(hdev);
1965 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1968 struct hci_dev *hdev = req->hdev;
1973 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1974 hci_req_add_le_scan_disable(req, false);
1975 hci_req_add_le_passive_scan(req);
1977 switch (hdev->interleave_scan_state) {
1978 case INTERLEAVE_SCAN_ALLOWLIST:
1979 bt_dev_dbg(hdev, "next state: allowlist");
1980 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1982 case INTERLEAVE_SCAN_NO_FILTER:
1983 bt_dev_dbg(hdev, "next state: no filter");
1984 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1986 case INTERLEAVE_SCAN_NONE:
1987 BT_ERR("unexpected error");
1991 hci_dev_unlock(hdev);
1996 static void interleave_scan_work(struct work_struct *work)
1998 struct hci_dev *hdev = container_of(work, struct hci_dev,
1999 interleave_scan.work);
2001 unsigned long timeout;
2003 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2004 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2005 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2006 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2008 bt_dev_err(hdev, "unexpected error");
2012 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2013 HCI_CMD_TIMEOUT, &status);
2015 /* Don't continue interleaving if it was canceled */
2016 if (is_interleave_scanning(hdev))
2017 queue_delayed_work(hdev->req_workqueue,
2018 &hdev->interleave_scan, timeout);
2021 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2022 bool use_rpa, struct adv_info *adv_instance,
2023 u8 *own_addr_type, bdaddr_t *rand_addr)
2027 bacpy(rand_addr, BDADDR_ANY);
2029 /* If privacy is enabled use a resolvable private address. If
2030 * current RPA has expired then generate a new one.
2035 /* If Controller supports LL Privacy use own address type is
2038 if (use_ll_privacy(hdev))
2039 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2041 *own_addr_type = ADDR_LE_DEV_RANDOM;
2044 if (!adv_instance->rpa_expired &&
2045 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2048 adv_instance->rpa_expired = false;
2050 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2051 !bacmp(&hdev->random_addr, &hdev->rpa))
2055 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2057 bt_dev_err(hdev, "failed to generate new RPA");
2061 bacpy(rand_addr, &hdev->rpa);
2063 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2065 queue_delayed_work(hdev->workqueue,
2066 &adv_instance->rpa_expired_cb, to);
2068 queue_delayed_work(hdev->workqueue,
2069 &hdev->rpa_expired, to);
2074 /* In case of required privacy without resolvable private address,
2075 * use an non-resolvable private address. This is useful for
2076 * non-connectable advertising.
2078 if (require_privacy) {
2082 /* The non-resolvable private address is generated
2083 * from random six bytes with the two most significant
2086 get_random_bytes(&nrpa, 6);
2089 /* The non-resolvable private address shall not be
2090 * equal to the public address.
2092 if (bacmp(&hdev->bdaddr, &nrpa))
2096 *own_addr_type = ADDR_LE_DEV_RANDOM;
2097 bacpy(rand_addr, &nrpa);
2102 /* No privacy so use a public address. */
2103 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2108 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2110 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2113 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2115 struct hci_cp_le_set_ext_adv_params cp;
2116 struct hci_dev *hdev = req->hdev;
2119 bdaddr_t random_addr;
2122 struct adv_info *adv_instance;
2126 adv_instance = hci_find_adv_instance(hdev, instance);
2130 adv_instance = NULL;
2133 flags = get_adv_instance_flags(hdev, instance);
2135 /* If the "connectable" instance flag was not set, then choose between
2136 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2138 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2139 mgmt_get_connectable(hdev);
2141 if (!is_advertising_allowed(hdev, connectable))
2144 /* Set require_privacy to true only when non-connectable
2145 * advertising is used. In that case it is fine to use a
2146 * non-resolvable private address.
2148 err = hci_get_random_address(hdev, !connectable,
2149 adv_use_rpa(hdev, flags), adv_instance,
2150 &own_addr_type, &random_addr);
2154 memset(&cp, 0, sizeof(cp));
2157 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2158 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2159 cp.tx_power = adv_instance->tx_power;
2161 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2162 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2163 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2166 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2170 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2172 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2173 } else if (adv_instance_is_scannable(hdev, instance)) {
2175 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2177 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2180 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2182 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2185 cp.own_addr_type = own_addr_type;
2186 cp.channel_map = hdev->le_adv_channel_map;
2187 cp.handle = instance;
2189 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2190 cp.primary_phy = HCI_ADV_PHY_1M;
2191 cp.secondary_phy = HCI_ADV_PHY_2M;
2192 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2193 cp.primary_phy = HCI_ADV_PHY_CODED;
2194 cp.secondary_phy = HCI_ADV_PHY_CODED;
2196 /* In all other cases use 1M */
2197 cp.primary_phy = HCI_ADV_PHY_1M;
2198 cp.secondary_phy = HCI_ADV_PHY_1M;
2201 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2203 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2204 bacmp(&random_addr, BDADDR_ANY)) {
2205 struct hci_cp_le_set_adv_set_rand_addr cp;
2207 /* Check if random address need to be updated */
2209 if (!bacmp(&random_addr, &adv_instance->random_addr))
2212 if (!bacmp(&random_addr, &hdev->random_addr))
2216 memset(&cp, 0, sizeof(cp));
2218 cp.handle = instance;
2219 bacpy(&cp.bdaddr, &random_addr);
2222 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2229 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2231 struct hci_dev *hdev = req->hdev;
2232 struct hci_cp_le_set_ext_adv_enable *cp;
2233 struct hci_cp_ext_adv_set *adv_set;
2234 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2235 struct adv_info *adv_instance;
2238 adv_instance = hci_find_adv_instance(hdev, instance);
2242 adv_instance = NULL;
2246 adv_set = (void *) cp->data;
2248 memset(cp, 0, sizeof(*cp));
2251 cp->num_of_sets = 0x01;
2253 memset(adv_set, 0, sizeof(*adv_set));
2255 adv_set->handle = instance;
2257 /* Set duration per instance since controller is responsible for
2260 if (adv_instance && adv_instance->duration) {
2261 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2263 /* Time = N * 10 ms */
2264 adv_set->duration = cpu_to_le16(duration / 10);
2267 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2268 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2274 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2276 struct hci_dev *hdev = req->hdev;
2277 struct hci_cp_le_set_ext_adv_enable *cp;
2278 struct hci_cp_ext_adv_set *adv_set;
2279 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2282 /* If request specifies an instance that doesn't exist, fail */
2283 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2286 memset(data, 0, sizeof(data));
2289 adv_set = (void *)cp->data;
2291 /* Instance 0x00 indicates all advertising instances will be disabled */
2292 cp->num_of_sets = !!instance;
2295 adv_set->handle = instance;
2297 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2298 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2303 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2305 struct hci_dev *hdev = req->hdev;
2307 /* If request specifies an instance that doesn't exist, fail */
2308 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2311 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2316 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2318 struct hci_dev *hdev = req->hdev;
2319 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2322 /* If instance isn't pending, the chip knows about it, and it's safe to
2325 if (adv_instance && !adv_instance->pending)
2326 __hci_req_disable_ext_adv_instance(req, instance);
2328 err = __hci_req_setup_ext_adv_instance(req, instance);
2332 __hci_req_update_scan_rsp_data(req, instance);
2333 __hci_req_enable_ext_advertising(req, instance);
2338 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2341 struct hci_dev *hdev = req->hdev;
2342 struct adv_info *adv_instance = NULL;
2345 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2346 list_empty(&hdev->adv_instances))
2349 if (hdev->adv_instance_timeout)
2352 adv_instance = hci_find_adv_instance(hdev, instance);
2356 /* A zero timeout means unlimited advertising. As long as there is
2357 * only one instance, duration should be ignored. We still set a timeout
2358 * in case further instances are being added later on.
2360 * If the remaining lifetime of the instance is more than the duration
2361 * then the timeout corresponds to the duration, otherwise it will be
2362 * reduced to the remaining instance lifetime.
2364 if (adv_instance->timeout == 0 ||
2365 adv_instance->duration <= adv_instance->remaining_time)
2366 timeout = adv_instance->duration;
2368 timeout = adv_instance->remaining_time;
2370 /* The remaining time is being reduced unless the instance is being
2371 * advertised without time limit.
2373 if (adv_instance->timeout)
2374 adv_instance->remaining_time =
2375 adv_instance->remaining_time - timeout;
2377 /* Only use work for scheduling instances with legacy advertising */
2378 if (!ext_adv_capable(hdev)) {
2379 hdev->adv_instance_timeout = timeout;
2380 queue_delayed_work(hdev->req_workqueue,
2381 &hdev->adv_instance_expire,
2382 msecs_to_jiffies(timeout * 1000));
2385 /* If we're just re-scheduling the same instance again then do not
2386 * execute any HCI commands. This happens when a single instance is
2389 if (!force && hdev->cur_adv_instance == instance &&
2390 hci_dev_test_flag(hdev, HCI_LE_ADV))
2393 hdev->cur_adv_instance = instance;
2394 if (ext_adv_capable(hdev)) {
2395 __hci_req_start_ext_adv(req, instance);
2397 __hci_req_update_adv_data(req, instance);
2398 __hci_req_update_scan_rsp_data(req, instance);
2399 __hci_req_enable_advertising(req);
2405 /* For a single instance:
2406 * - force == true: The instance will be removed even when its remaining
2407 * lifetime is not zero.
2408 * - force == false: the instance will be deactivated but kept stored unless
2409 * the remaining lifetime is zero.
2411 * For instance == 0x00:
2412 * - force == true: All instances will be removed regardless of their timeout
2414 * - force == false: Only instances that have a timeout will be removed.
2416 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2417 struct hci_request *req, u8 instance,
2420 struct adv_info *adv_instance, *n, *next_instance = NULL;
2424 /* Cancel any timeout concerning the removed instance(s). */
2425 if (!instance || hdev->cur_adv_instance == instance)
2426 cancel_adv_timeout(hdev);
2428 /* Get the next instance to advertise BEFORE we remove
2429 * the current one. This can be the same instance again
2430 * if there is only one instance.
2432 if (instance && hdev->cur_adv_instance == instance)
2433 next_instance = hci_get_next_instance(hdev, instance);
2435 if (instance == 0x00) {
2436 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2438 if (!(force || adv_instance->timeout))
2441 rem_inst = adv_instance->instance;
2442 err = hci_remove_adv_instance(hdev, rem_inst);
2444 mgmt_advertising_removed(sk, hdev, rem_inst);
2447 adv_instance = hci_find_adv_instance(hdev, instance);
2449 if (force || (adv_instance && adv_instance->timeout &&
2450 !adv_instance->remaining_time)) {
2451 /* Don't advertise a removed instance. */
2452 if (next_instance &&
2453 next_instance->instance == instance)
2454 next_instance = NULL;
2456 err = hci_remove_adv_instance(hdev, instance);
2458 mgmt_advertising_removed(sk, hdev, instance);
2462 if (!req || !hdev_is_powered(hdev) ||
2463 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2466 if (next_instance && !ext_adv_capable(hdev))
2467 __hci_req_schedule_adv_instance(req, next_instance->instance,
2471 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2473 struct hci_dev *hdev = req->hdev;
2475 /* If we're advertising or initiating an LE connection we can't
2476 * go ahead and change the random address at this time. This is
2477 * because the eventual initiator address used for the
2478 * subsequently created connection will be undefined (some
2479 * controllers use the new address and others the one we had
2480 * when the operation started).
2482 * In this kind of scenario skip the update and let the random
2483 * address be updated at the next cycle.
2485 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2486 hci_lookup_le_connect(hdev)) {
2487 bt_dev_dbg(hdev, "Deferring random address update");
2488 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2492 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2495 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2496 bool use_rpa, u8 *own_addr_type)
2498 struct hci_dev *hdev = req->hdev;
2501 /* If privacy is enabled use a resolvable private address. If
2502 * current RPA has expired or there is something else than
2503 * the current RPA in use, then generate a new one.
2508 /* If Controller supports LL Privacy use own address type is
2511 if (use_ll_privacy(hdev))
2512 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2514 *own_addr_type = ADDR_LE_DEV_RANDOM;
2516 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2517 !bacmp(&hdev->random_addr, &hdev->rpa))
2520 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2522 bt_dev_err(hdev, "failed to generate new RPA");
2526 set_random_addr(req, &hdev->rpa);
2528 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2529 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2534 /* In case of required privacy without resolvable private address,
2535 * use an non-resolvable private address. This is useful for active
2536 * scanning and non-connectable advertising.
2538 if (require_privacy) {
2542 /* The non-resolvable private address is generated
2543 * from random six bytes with the two most significant
2546 get_random_bytes(&nrpa, 6);
2549 /* The non-resolvable private address shall not be
2550 * equal to the public address.
2552 if (bacmp(&hdev->bdaddr, &nrpa))
2556 *own_addr_type = ADDR_LE_DEV_RANDOM;
2557 set_random_addr(req, &nrpa);
2561 /* If forcing static address is in use or there is no public
2562 * address use the static address as random address (but skip
2563 * the HCI command if the current random address is already the
2566 * In case BR/EDR has been disabled on a dual-mode controller
2567 * and a static address has been configured, then use that
2568 * address instead of the public BR/EDR address.
2570 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2571 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2572 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2573 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2574 *own_addr_type = ADDR_LE_DEV_RANDOM;
2575 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2576 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2577 &hdev->static_addr);
2581 /* Neither privacy nor static address is being used so use a
2584 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2589 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2591 struct bdaddr_list *b;
2593 list_for_each_entry(b, &hdev->whitelist, list) {
2594 struct hci_conn *conn;
2596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2600 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2607 void __hci_req_update_scan(struct hci_request *req)
2609 struct hci_dev *hdev = req->hdev;
2612 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2615 if (!hdev_is_powered(hdev))
2618 if (mgmt_powering_down(hdev))
2621 if (hdev->scanning_paused)
2624 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2625 disconnected_whitelist_entries(hdev))
2628 scan = SCAN_DISABLED;
2630 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2631 scan |= SCAN_INQUIRY;
2633 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2634 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2637 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2640 static int update_scan(struct hci_request *req, unsigned long opt)
2642 hci_dev_lock(req->hdev);
2643 __hci_req_update_scan(req);
2644 hci_dev_unlock(req->hdev);
2648 static void scan_update_work(struct work_struct *work)
2650 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2652 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2655 static int connectable_update(struct hci_request *req, unsigned long opt)
2657 struct hci_dev *hdev = req->hdev;
2661 __hci_req_update_scan(req);
2663 /* If BR/EDR is not enabled and we disable advertising as a
2664 * by-product of disabling connectable, we need to update the
2665 * advertising flags.
2667 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2668 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2670 /* Update the advertising parameters if necessary */
2671 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2672 !list_empty(&hdev->adv_instances)) {
2673 if (ext_adv_capable(hdev))
2674 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2676 __hci_req_enable_advertising(req);
2679 __hci_update_background_scan(req);
2681 hci_dev_unlock(hdev);
2686 static void connectable_update_work(struct work_struct *work)
2688 struct hci_dev *hdev = container_of(work, struct hci_dev,
2689 connectable_update);
2692 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2693 mgmt_set_connectable_complete(hdev, status);
2696 static u8 get_service_classes(struct hci_dev *hdev)
2698 struct bt_uuid *uuid;
2701 list_for_each_entry(uuid, &hdev->uuids, list)
2702 val |= uuid->svc_hint;
2707 void __hci_req_update_class(struct hci_request *req)
2709 struct hci_dev *hdev = req->hdev;
2712 bt_dev_dbg(hdev, "");
2714 if (!hdev_is_powered(hdev))
2717 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2720 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2723 cod[0] = hdev->minor_class;
2724 cod[1] = hdev->major_class;
2725 cod[2] = get_service_classes(hdev);
2727 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2730 if (memcmp(cod, hdev->dev_class, 3) == 0)
2733 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2736 static void write_iac(struct hci_request *req)
2738 struct hci_dev *hdev = req->hdev;
2739 struct hci_cp_write_current_iac_lap cp;
2741 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2744 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2745 /* Limited discoverable mode */
2746 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2747 cp.iac_lap[0] = 0x00; /* LIAC */
2748 cp.iac_lap[1] = 0x8b;
2749 cp.iac_lap[2] = 0x9e;
2750 cp.iac_lap[3] = 0x33; /* GIAC */
2751 cp.iac_lap[4] = 0x8b;
2752 cp.iac_lap[5] = 0x9e;
2754 /* General discoverable mode */
2756 cp.iac_lap[0] = 0x33; /* GIAC */
2757 cp.iac_lap[1] = 0x8b;
2758 cp.iac_lap[2] = 0x9e;
2761 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2762 (cp.num_iac * 3) + 1, &cp);
2765 static int discoverable_update(struct hci_request *req, unsigned long opt)
2767 struct hci_dev *hdev = req->hdev;
2771 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2773 __hci_req_update_scan(req);
2774 __hci_req_update_class(req);
2777 /* Advertising instances don't use the global discoverable setting, so
2778 * only update AD if advertising was enabled using Set Advertising.
2780 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2781 __hci_req_update_adv_data(req, 0x00);
2783 /* Discoverable mode affects the local advertising
2784 * address in limited privacy mode.
2786 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2787 if (ext_adv_capable(hdev))
2788 __hci_req_start_ext_adv(req, 0x00);
2790 __hci_req_enable_advertising(req);
2794 hci_dev_unlock(hdev);
2799 static void discoverable_update_work(struct work_struct *work)
2801 struct hci_dev *hdev = container_of(work, struct hci_dev,
2802 discoverable_update);
2805 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2806 mgmt_set_discoverable_complete(hdev, status);
2809 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2812 switch (conn->state) {
2815 if (conn->type == AMP_LINK) {
2816 struct hci_cp_disconn_phy_link cp;
2818 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2820 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2823 struct hci_cp_disconnect dc;
2825 dc.handle = cpu_to_le16(conn->handle);
2827 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2830 conn->state = BT_DISCONN;
2834 if (conn->type == LE_LINK) {
2835 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2837 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2839 } else if (conn->type == ACL_LINK) {
2840 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2842 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2847 if (conn->type == ACL_LINK) {
2848 struct hci_cp_reject_conn_req rej;
2850 bacpy(&rej.bdaddr, &conn->dst);
2851 rej.reason = reason;
2853 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2855 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2856 struct hci_cp_reject_sync_conn_req rej;
2858 bacpy(&rej.bdaddr, &conn->dst);
2860 /* SCO rejection has its own limited set of
2861 * allowed error values (0x0D-0x0F) which isn't
2862 * compatible with most values passed to this
2863 * function. To be safe hard-code one of the
2864 * values that's suitable for SCO.
2866 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2868 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2873 conn->state = BT_CLOSED;
2878 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2881 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2884 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2886 struct hci_request req;
2889 hci_req_init(&req, conn->hdev);
2891 __hci_abort_conn(&req, conn, reason);
2893 err = hci_req_run(&req, abort_conn_complete);
2894 if (err && err != -ENODATA) {
2895 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2902 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2904 hci_dev_lock(req->hdev);
2905 __hci_update_background_scan(req);
2906 hci_dev_unlock(req->hdev);
2910 static void bg_scan_update(struct work_struct *work)
2912 struct hci_dev *hdev = container_of(work, struct hci_dev,
2914 struct hci_conn *conn;
2918 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2924 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2926 hci_le_conn_failed(conn, status);
2928 hci_dev_unlock(hdev);
2931 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2933 hci_req_add_le_scan_disable(req, false);
2937 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2940 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2941 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2942 struct hci_cp_inquiry cp;
2944 bt_dev_dbg(req->hdev, "");
2946 hci_dev_lock(req->hdev);
2947 hci_inquiry_cache_flush(req->hdev);
2948 hci_dev_unlock(req->hdev);
2950 memset(&cp, 0, sizeof(cp));
2952 if (req->hdev->discovery.limited)
2953 memcpy(&cp.lap, liac, sizeof(cp.lap));
2955 memcpy(&cp.lap, giac, sizeof(cp.lap));
2959 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2964 static void le_scan_disable_work(struct work_struct *work)
2966 struct hci_dev *hdev = container_of(work, struct hci_dev,
2967 le_scan_disable.work);
2970 bt_dev_dbg(hdev, "");
2972 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2975 cancel_delayed_work(&hdev->le_scan_restart);
2977 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2979 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2984 hdev->discovery.scan_start = 0;
2986 /* If we were running LE only scan, change discovery state. If
2987 * we were running both LE and BR/EDR inquiry simultaneously,
2988 * and BR/EDR inquiry is already finished, stop discovery,
2989 * otherwise BR/EDR inquiry will stop discovery when finished.
2990 * If we will resolve remote device name, do not change
2994 if (hdev->discovery.type == DISCOV_TYPE_LE)
2995 goto discov_stopped;
2997 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3000 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3001 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3002 hdev->discovery.state != DISCOVERY_RESOLVING)
3003 goto discov_stopped;
3008 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3009 HCI_CMD_TIMEOUT, &status);
3011 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3012 goto discov_stopped;
3019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3020 hci_dev_unlock(hdev);
3023 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3025 struct hci_dev *hdev = req->hdev;
3027 /* If controller is not scanning we are done. */
3028 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3031 if (hdev->scanning_paused) {
3032 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3036 hci_req_add_le_scan_disable(req, false);
3038 if (use_ext_scan(hdev)) {
3039 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3041 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3042 ext_enable_cp.enable = LE_SCAN_ENABLE;
3043 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3045 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3046 sizeof(ext_enable_cp), &ext_enable_cp);
3048 struct hci_cp_le_set_scan_enable cp;
3050 memset(&cp, 0, sizeof(cp));
3051 cp.enable = LE_SCAN_ENABLE;
3052 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3053 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3059 static void le_scan_restart_work(struct work_struct *work)
3061 struct hci_dev *hdev = container_of(work, struct hci_dev,
3062 le_scan_restart.work);
3063 unsigned long timeout, duration, scan_start, now;
3066 bt_dev_dbg(hdev, "");
3068 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3070 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3077 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3078 !hdev->discovery.scan_start)
3081 /* When the scan was started, hdev->le_scan_disable has been queued
3082 * after duration from scan_start. During scan restart this job
3083 * has been canceled, and we need to queue it again after proper
3084 * timeout, to make sure that scan does not run indefinitely.
3086 duration = hdev->discovery.scan_duration;
3087 scan_start = hdev->discovery.scan_start;
3089 if (now - scan_start <= duration) {
3092 if (now >= scan_start)
3093 elapsed = now - scan_start;
3095 elapsed = ULONG_MAX - scan_start + now;
3097 timeout = duration - elapsed;
3102 queue_delayed_work(hdev->req_workqueue,
3103 &hdev->le_scan_disable, timeout);
3106 hci_dev_unlock(hdev);
3109 static int active_scan(struct hci_request *req, unsigned long opt)
3111 uint16_t interval = opt;
3112 struct hci_dev *hdev = req->hdev;
3114 /* White list is not used for discovery */
3115 u8 filter_policy = 0x00;
3116 /* Discovery doesn't require controller address resolution */
3117 bool addr_resolv = false;
3120 bt_dev_dbg(hdev, "");
3122 /* If controller is scanning, it means the background scanning is
3123 * running. Thus, we should temporarily stop it in order to set the
3124 * discovery scanning parameters.
3126 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3127 hci_req_add_le_scan_disable(req, false);
3128 cancel_interleave_scan(hdev);
3131 /* All active scans will be done with either a resolvable private
3132 * address (when privacy feature has been enabled) or non-resolvable
3135 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3138 own_addr_type = ADDR_LE_DEV_PUBLIC;
3140 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3141 hdev->le_scan_window_discovery, own_addr_type,
3142 filter_policy, addr_resolv);
3146 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3150 bt_dev_dbg(req->hdev, "");
3152 err = active_scan(req, opt);
3156 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3159 static void start_discovery(struct hci_dev *hdev, u8 *status)
3161 unsigned long timeout;
3163 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3165 switch (hdev->discovery.type) {
3166 case DISCOV_TYPE_BREDR:
3167 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3168 hci_req_sync(hdev, bredr_inquiry,
3169 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3172 case DISCOV_TYPE_INTERLEAVED:
3173 /* When running simultaneous discovery, the LE scanning time
3174 * should occupy the whole discovery time sine BR/EDR inquiry
3175 * and LE scanning are scheduled by the controller.
3177 * For interleaving discovery in comparison, BR/EDR inquiry
3178 * and LE scanning are done sequentially with separate
3181 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3183 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3184 /* During simultaneous discovery, we double LE scan
3185 * interval. We must leave some time for the controller
3186 * to do BR/EDR inquiry.
3188 hci_req_sync(hdev, interleaved_discov,
3189 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3194 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3195 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3196 HCI_CMD_TIMEOUT, status);
3198 case DISCOV_TYPE_LE:
3199 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3200 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3201 HCI_CMD_TIMEOUT, status);
3204 *status = HCI_ERROR_UNSPECIFIED;
3211 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3213 /* When service discovery is used and the controller has a
3214 * strict duplicate filter, it is important to remember the
3215 * start and duration of the scan. This is required for
3216 * restarting scanning during the discovery phase.
3218 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3219 hdev->discovery.result_filtering) {
3220 hdev->discovery.scan_start = jiffies;
3221 hdev->discovery.scan_duration = timeout;
3224 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3228 bool hci_req_stop_discovery(struct hci_request *req)
3230 struct hci_dev *hdev = req->hdev;
3231 struct discovery_state *d = &hdev->discovery;
3232 struct hci_cp_remote_name_req_cancel cp;
3233 struct inquiry_entry *e;
3236 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3238 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3239 if (test_bit(HCI_INQUIRY, &hdev->flags))
3240 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3242 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3243 cancel_delayed_work(&hdev->le_scan_disable);
3244 hci_req_add_le_scan_disable(req, false);
3249 /* Passive scanning */
3250 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3251 hci_req_add_le_scan_disable(req, false);
3256 /* No further actions needed for LE-only discovery */
3257 if (d->type == DISCOV_TYPE_LE)
3260 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3261 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3266 bacpy(&cp.bdaddr, &e->data.bdaddr);
3267 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3275 static int stop_discovery(struct hci_request *req, unsigned long opt)
3277 hci_dev_lock(req->hdev);
3278 hci_req_stop_discovery(req);
3279 hci_dev_unlock(req->hdev);
3284 static void discov_update(struct work_struct *work)
3286 struct hci_dev *hdev = container_of(work, struct hci_dev,
3290 switch (hdev->discovery.state) {
3291 case DISCOVERY_STARTING:
3292 start_discovery(hdev, &status);
3293 mgmt_start_discovery_complete(hdev, status);
3295 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3297 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3299 case DISCOVERY_STOPPING:
3300 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3301 mgmt_stop_discovery_complete(hdev, status);
3303 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3305 case DISCOVERY_STOPPED:
3311 static void discov_off(struct work_struct *work)
3313 struct hci_dev *hdev = container_of(work, struct hci_dev,
3316 bt_dev_dbg(hdev, "");
3320 /* When discoverable timeout triggers, then just make sure
3321 * the limited discoverable flag is cleared. Even in the case
3322 * of a timeout triggered from general discoverable, it is
3323 * safe to unconditionally clear the flag.
3325 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3326 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3327 hdev->discov_timeout = 0;
3329 hci_dev_unlock(hdev);
3331 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3332 mgmt_new_settings(hdev);
3335 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3337 struct hci_dev *hdev = req->hdev;
3342 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3343 !lmp_host_ssp_capable(hdev)) {
3346 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3348 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3351 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3352 sizeof(support), &support);
3356 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3357 lmp_bredr_capable(hdev)) {
3358 struct hci_cp_write_le_host_supported cp;
3363 /* Check first if we already have the right
3364 * host state (host features set)
3366 if (cp.le != lmp_host_le_capable(hdev) ||
3367 cp.simul != lmp_host_le_br_capable(hdev))
3368 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3372 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3373 /* Make sure the controller has a good default for
3374 * advertising data. This also applies to the case
3375 * where BR/EDR was toggled during the AUTO_OFF phase.
3377 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3378 list_empty(&hdev->adv_instances)) {
3381 if (ext_adv_capable(hdev)) {
3382 err = __hci_req_setup_ext_adv_instance(req,
3385 __hci_req_update_scan_rsp_data(req,
3389 __hci_req_update_adv_data(req, 0x00);
3390 __hci_req_update_scan_rsp_data(req, 0x00);
3393 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3394 if (!ext_adv_capable(hdev))
3395 __hci_req_enable_advertising(req);
3397 __hci_req_enable_ext_advertising(req,
3400 } else if (!list_empty(&hdev->adv_instances)) {
3401 struct adv_info *adv_instance;
3403 adv_instance = list_first_entry(&hdev->adv_instances,
3404 struct adv_info, list);
3405 __hci_req_schedule_adv_instance(req,
3406 adv_instance->instance,
3411 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3412 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3413 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3414 sizeof(link_sec), &link_sec);
3416 if (lmp_bredr_capable(hdev)) {
3417 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3418 __hci_req_write_fast_connectable(req, true);
3420 __hci_req_write_fast_connectable(req, false);
3421 __hci_req_update_scan(req);
3422 __hci_req_update_class(req);
3423 __hci_req_update_name(req);
3424 __hci_req_update_eir(req);
3427 hci_dev_unlock(hdev);
3431 int __hci_req_hci_power_on(struct hci_dev *hdev)
3433 /* Register the available SMP channels (BR/EDR and LE) only when
3434 * successfully powering on the controller. This late
3435 * registration is required so that LE SMP can clearly decide if
3436 * the public address or static address is used.
3440 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3444 void hci_request_setup(struct hci_dev *hdev)
3446 INIT_WORK(&hdev->discov_update, discov_update);
3447 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3448 INIT_WORK(&hdev->scan_update, scan_update_work);
3449 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3450 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3451 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3452 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3453 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3454 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3455 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3458 void hci_request_cancel_all(struct hci_dev *hdev)
3460 hci_req_sync_cancel(hdev, ENODEV);
3462 cancel_work_sync(&hdev->discov_update);
3463 cancel_work_sync(&hdev->bg_scan_update);
3464 cancel_work_sync(&hdev->scan_update);
3465 cancel_work_sync(&hdev->connectable_update);
3466 cancel_work_sync(&hdev->discoverable_update);
3467 cancel_delayed_work_sync(&hdev->discov_off);
3468 cancel_delayed_work_sync(&hdev->le_scan_disable);
3469 cancel_delayed_work_sync(&hdev->le_scan_restart);
3471 if (hdev->adv_instance_timeout) {
3472 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3473 hdev->adv_instance_timeout = 0;
3476 cancel_interleave_scan(hdev);