2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
34 #define HCI_REQ_DONE 0
35 #define HCI_REQ_PEND 1
36 #define HCI_REQ_CANCELED 2
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
40 skb_queue_head_init(&req->cmd_q);
45 void hci_req_purge(struct hci_request *req)
47 skb_queue_purge(&req->cmd_q);
50 bool hci_req_status_pend(struct hci_dev *hdev)
52 return hdev->req_status == HCI_REQ_PEND;
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56 hci_req_complete_skb_t complete_skb)
58 struct hci_dev *hdev = req->hdev;
62 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
64 /* If an error occurred during request building, remove all HCI
65 * commands queued on the HCI request queue.
68 skb_queue_purge(&req->cmd_q);
72 /* Do not allow empty requests */
73 if (skb_queue_empty(&req->cmd_q))
76 skb = skb_peek_tail(&req->cmd_q);
78 bt_cb(skb)->hci.req_complete = complete;
79 } else if (complete_skb) {
80 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
88 queue_work(hdev->workqueue, &hdev->cmd_work);
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
95 return req_run(req, complete, NULL);
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
100 return req_run(req, NULL, complete);
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106 bt_dev_dbg(hdev, "result 0x%2.2x", result);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
112 hdev->req_skb = skb_get(skb);
113 wake_up_interruptible(&hdev->req_wait_q);
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119 bt_dev_dbg(hdev, "err 0x%2.2x", err);
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129 const void *param, u8 event, u32 timeout)
131 struct hci_request req;
135 bt_dev_dbg(hdev, "");
137 hci_req_init(&req, hdev);
139 hci_req_add_ev(&req, opcode, plen, param, event);
141 hdev->req_status = HCI_REQ_PEND;
143 err = hci_req_run_skb(&req, hci_req_sync_complete);
147 err = wait_event_interruptible_timeout(hdev->req_wait_q,
148 hdev->req_status != HCI_REQ_PEND, timeout);
150 if (err == -ERESTARTSYS)
151 return ERR_PTR(-EINTR);
153 switch (hdev->req_status) {
155 err = -bt_to_errno(hdev->req_result);
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
167 hdev->req_status = hdev->req_result = 0;
169 hdev->req_skb = NULL;
171 bt_dev_dbg(hdev, "end: err %d", err);
179 return ERR_PTR(-ENODATA);
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186 const void *param, u32 timeout)
188 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190 EXPORT_SYMBOL(__hci_cmd_sync);
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt, u32 timeout, u8 *hci_status)
197 struct hci_request req;
200 bt_dev_dbg(hdev, "start");
202 hci_req_init(&req, hdev);
204 hdev->req_status = HCI_REQ_PEND;
206 err = func(&req, opt);
209 *hci_status = HCI_ERROR_UNSPECIFIED;
213 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 hdev->req_status = 0;
217 /* ENODATA means the HCI request command queue is empty.
218 * This can happen when a request with conditionals doesn't
219 * trigger any commands to be sent. This is normal behavior
220 * and should not trigger an error return.
222 if (err == -ENODATA) {
229 *hci_status = HCI_ERROR_UNSPECIFIED;
234 err = wait_event_interruptible_timeout(hdev->req_wait_q,
235 hdev->req_status != HCI_REQ_PEND, timeout);
237 if (err == -ERESTARTSYS)
240 switch (hdev->req_status) {
242 err = -bt_to_errno(hdev->req_result);
244 *hci_status = hdev->req_result;
247 case HCI_REQ_CANCELED:
248 err = -hdev->req_result;
250 *hci_status = HCI_ERROR_UNSPECIFIED;
256 *hci_status = HCI_ERROR_UNSPECIFIED;
260 kfree_skb(hdev->req_skb);
261 hdev->req_skb = NULL;
262 hdev->req_status = hdev->req_result = 0;
264 bt_dev_dbg(hdev, "end: err %d", err);
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt, u32 timeout, u8 *hci_status)
275 /* Serialize all requests */
276 hci_req_sync_lock(hdev);
277 /* check the state after obtaing the lock to protect the HCI_UP
278 * against any races from hci_dev_do_close when the controller
281 if (test_bit(HCI_UP, &hdev->flags))
282 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
285 hci_req_sync_unlock(hdev);
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
293 int len = HCI_COMMAND_HDR_SIZE + plen;
294 struct hci_command_hdr *hdr;
297 skb = bt_skb_alloc(len, GFP_ATOMIC);
301 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302 hdr->opcode = cpu_to_le16(opcode);
306 skb_put_data(skb, param, plen);
308 bt_dev_dbg(hdev, "skb len %d", skb->len);
310 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311 hci_skb_opcode(skb) = opcode;
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318 const void *param, u8 event)
320 struct hci_dev *hdev = req->hdev;
323 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
325 /* If an error occurred during request building, there is no point in
326 * queueing the HCI command. We can simply return.
331 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
339 if (skb_queue_empty(&req->cmd_q))
340 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342 bt_cb(skb)->hci.req_event = event;
344 skb_queue_tail(&req->cmd_q, skb);
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
350 hci_req_add_ev(req, opcode, plen, param, 0);
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_write_page_scan_activity acp;
359 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
362 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
366 type = PAGE_SCAN_TYPE_INTERLACED;
368 /* 160 msec page scan interval */
369 acp.interval = cpu_to_le16(0x0100);
371 type = hdev->def_page_scan_type;
372 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
375 acp.window = cpu_to_le16(hdev->def_page_scan_window);
377 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378 __cpu_to_le16(hdev->page_scan_window) != acp.window)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
382 if (hdev->page_scan_type != type)
383 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
386 static void start_interleave_scan(struct hci_dev *hdev)
388 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389 queue_delayed_work(hdev->req_workqueue,
390 &hdev->interleave_scan, 0);
393 static bool is_interleave_scanning(struct hci_dev *hdev)
395 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
398 static void cancel_interleave_scan(struct hci_dev *hdev)
400 bt_dev_dbg(hdev, "cancelling interleave scan");
402 cancel_delayed_work_sync(&hdev->interleave_scan);
404 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
407 /* Return true if interleave_scan wasn't started until exiting this function,
408 * otherwise, return false
410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
412 /* Do interleaved scan only if all of the following are true:
413 * - There is at least one ADV monitor
414 * - At least one pending LE connection or one device to be scanned for
415 * - Monitor offloading is not supported
416 * If so, we should alternate between allowlist scan and one without
417 * any filters to save power.
419 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420 !(list_empty(&hdev->pend_le_conns) &&
421 list_empty(&hdev->pend_le_reports)) &&
422 hci_get_adv_monitor_offload_ext(hdev) ==
423 HCI_ADV_MONITOR_EXT_NONE;
424 bool is_interleaving = is_interleave_scanning(hdev);
426 if (use_interleaving && !is_interleaving) {
427 start_interleave_scan(hdev);
428 bt_dev_dbg(hdev, "starting interleave scan");
432 if (!use_interleaving && is_interleaving)
433 cancel_interleave_scan(hdev);
438 /* This function controls the background scanning based on hdev->pend_le_conns
439 * list. If there are pending LE connection we start the background scanning,
440 * otherwise we stop it.
442 * This function requires the caller holds hdev->lock.
444 static void __hci_update_background_scan(struct hci_request *req)
446 struct hci_dev *hdev = req->hdev;
448 if (!test_bit(HCI_UP, &hdev->flags) ||
449 test_bit(HCI_INIT, &hdev->flags) ||
450 hci_dev_test_flag(hdev, HCI_SETUP) ||
451 hci_dev_test_flag(hdev, HCI_CONFIG) ||
452 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453 hci_dev_test_flag(hdev, HCI_UNREGISTER))
456 /* No point in doing scanning if LE support hasn't been enabled */
457 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
460 /* If discovery is active don't interfere with it */
461 if (hdev->discovery.state != DISCOVERY_STOPPED)
464 /* Reset RSSI and UUID filters when starting background scanning
465 * since these filters are meant for service discovery only.
467 * The Start Discovery and Start Service Discovery operations
468 * ensure to set proper values for RSSI threshold and UUID
469 * filter list. So it is safe to just reset them here.
471 hci_discovery_filter_clear(hdev);
473 bt_dev_dbg(hdev, "ADV monitoring is %s",
474 hci_is_adv_monitoring(hdev) ? "on" : "off");
476 if (list_empty(&hdev->pend_le_conns) &&
477 list_empty(&hdev->pend_le_reports) &&
478 !hci_is_adv_monitoring(hdev)) {
479 /* If there is no pending LE connections or devices
480 * to be scanned for or no ADV monitors, we should stop the
481 * background scanning.
484 /* If controller is not scanning we are done. */
485 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
488 hci_req_add_le_scan_disable(req, false);
490 bt_dev_dbg(hdev, "stopping background scanning");
492 /* If there is at least one pending LE connection, we should
493 * keep the background scan running.
496 /* If controller is connecting, we should not start scanning
497 * since some controllers are not able to scan and connect at
500 if (hci_lookup_le_connect(hdev))
503 /* If controller is currently scanning, we stop it to ensure we
504 * don't miss any advertising (due to duplicates filter).
506 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507 hci_req_add_le_scan_disable(req, false);
509 hci_req_add_le_passive_scan(req);
510 bt_dev_dbg(hdev, "starting background scanning");
514 void __hci_req_update_name(struct hci_request *req)
516 struct hci_dev *hdev = req->hdev;
517 struct hci_cp_write_local_name cp;
519 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
521 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
524 #define PNP_INFO_SVCLASS_ID 0x1200
526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
534 list_for_each_entry(uuid, &hdev->uuids, list) {
537 if (uuid->size != 16)
540 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
544 if (uuid16 == PNP_INFO_SVCLASS_ID)
550 uuids_start[1] = EIR_UUID16_ALL;
554 /* Stop if not enough space to put next UUID */
555 if ((ptr - data) + sizeof(u16) > len) {
556 uuids_start[1] = EIR_UUID16_SOME;
560 *ptr++ = (uuid16 & 0x00ff);
561 *ptr++ = (uuid16 & 0xff00) >> 8;
562 uuids_start[0] += sizeof(uuid16);
568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
570 u8 *ptr = data, *uuids_start = NULL;
571 struct bt_uuid *uuid;
576 list_for_each_entry(uuid, &hdev->uuids, list) {
577 if (uuid->size != 32)
583 uuids_start[1] = EIR_UUID32_ALL;
587 /* Stop if not enough space to put next UUID */
588 if ((ptr - data) + sizeof(u32) > len) {
589 uuids_start[1] = EIR_UUID32_SOME;
593 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
595 uuids_start[0] += sizeof(u32);
601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
603 u8 *ptr = data, *uuids_start = NULL;
604 struct bt_uuid *uuid;
609 list_for_each_entry(uuid, &hdev->uuids, list) {
610 if (uuid->size != 128)
616 uuids_start[1] = EIR_UUID128_ALL;
620 /* Stop if not enough space to put next UUID */
621 if ((ptr - data) + 16 > len) {
622 uuids_start[1] = EIR_UUID128_SOME;
626 memcpy(ptr, uuid->uuid, 16);
628 uuids_start[0] += 16;
634 static void create_eir(struct hci_dev *hdev, u8 *data)
639 name_len = strlen(hdev->dev_name);
645 ptr[1] = EIR_NAME_SHORT;
647 ptr[1] = EIR_NAME_COMPLETE;
649 /* EIR Data length */
650 ptr[0] = name_len + 1;
652 memcpy(ptr + 2, hdev->dev_name, name_len);
654 ptr += (name_len + 2);
657 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
659 ptr[1] = EIR_TX_POWER;
660 ptr[2] = (u8) hdev->inq_tx_power;
665 if (hdev->devid_source > 0) {
667 ptr[1] = EIR_DEVICE_ID;
669 put_unaligned_le16(hdev->devid_source, ptr + 2);
670 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671 put_unaligned_le16(hdev->devid_product, ptr + 6);
672 put_unaligned_le16(hdev->devid_version, ptr + 8);
677 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
682 void __hci_req_update_eir(struct hci_request *req)
684 struct hci_dev *hdev = req->hdev;
685 struct hci_cp_write_eir cp;
687 if (!hdev_is_powered(hdev))
690 if (!lmp_ext_inq_capable(hdev))
693 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
696 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
699 memset(&cp, 0, sizeof(cp));
701 create_eir(hdev, cp.data);
703 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
706 memcpy(hdev->eir, cp.data, sizeof(cp.data));
708 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
713 struct hci_dev *hdev = req->hdev;
715 if (hdev->scanning_paused) {
716 bt_dev_dbg(hdev, "Scanning is paused for suspend");
721 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
723 if (use_ext_scan(hdev)) {
724 struct hci_cp_le_set_ext_scan_enable cp;
726 memset(&cp, 0, sizeof(cp));
727 cp.enable = LE_SCAN_DISABLE;
728 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
731 struct hci_cp_le_set_scan_enable cp;
733 memset(&cp, 0, sizeof(cp));
734 cp.enable = LE_SCAN_DISABLE;
735 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
738 /* Disable address resolution */
739 if (use_ll_privacy(hdev) &&
740 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
744 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
748 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
751 struct hci_cp_le_del_from_white_list cp;
753 cp.bdaddr_type = bdaddr_type;
754 bacpy(&cp.bdaddr, bdaddr);
756 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
758 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
760 if (use_ll_privacy(req->hdev) &&
761 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
764 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
766 struct hci_cp_le_del_from_resolv_list cp;
768 cp.bdaddr_type = bdaddr_type;
769 bacpy(&cp.bdaddr, bdaddr);
771 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
777 /* Adds connection to white list if needed. On error, returns -1. */
778 static int add_to_white_list(struct hci_request *req,
779 struct hci_conn_params *params, u8 *num_entries,
782 struct hci_cp_le_add_to_white_list cp;
783 struct hci_dev *hdev = req->hdev;
785 /* Already in white list */
786 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
790 /* Select filter policy to accept all advertising */
791 if (*num_entries >= hdev->le_white_list_size)
794 /* White list can not be used with RPAs */
796 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
801 /* During suspend, only wakeable devices can be in whitelist */
802 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803 params->current_flags))
807 cp.bdaddr_type = params->addr_type;
808 bacpy(&cp.bdaddr, ¶ms->addr);
810 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
812 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
814 if (use_ll_privacy(hdev) &&
815 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
818 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
821 struct hci_cp_le_add_to_resolv_list cp;
823 cp.bdaddr_type = params->addr_type;
824 bacpy(&cp.bdaddr, ¶ms->addr);
825 memcpy(cp.peer_irk, irk->val, 16);
827 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828 memcpy(cp.local_irk, hdev->irk, 16);
830 memset(cp.local_irk, 0, 16);
832 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
840 static u8 update_white_list(struct hci_request *req)
842 struct hci_dev *hdev = req->hdev;
843 struct hci_conn_params *params;
844 struct bdaddr_list *b;
846 bool pend_conn, pend_report;
847 /* We allow whitelisting even with RPAs in suspend. In the worst case,
848 * we won't be able to wake from devices that use the privacy1.2
849 * features. Additionally, once we support privacy1.2 and IRK
850 * offloading, we can update this to also check for those conditions.
852 bool allow_rpa = hdev->suspended;
854 /* Go through the current white list programmed into the
855 * controller one by one and check if that address is still
856 * in the list of pending connections or list of devices to
857 * report. If not present in either list, then queue the
858 * command to remove it from the controller.
860 list_for_each_entry(b, &hdev->le_white_list, list) {
861 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
864 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
868 /* If the device is not likely to connect or report,
869 * remove it from the whitelist.
871 if (!pend_conn && !pend_report) {
872 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
876 /* White list can not be used with RPAs */
878 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
879 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
886 /* Since all no longer valid white list entries have been
887 * removed, walk through the list of pending connections
888 * and ensure that any new device gets programmed into
891 * If the list of the devices is larger than the list of
892 * available white list entries in the controller, then
893 * just abort and return filer policy value to not use the
896 list_for_each_entry(params, &hdev->pend_le_conns, action) {
897 if (add_to_white_list(req, params, &num_entries, allow_rpa))
901 /* After adding all new pending connections, walk through
902 * the list of pending reports and also add these to the
903 * white list if there is still space. Abort if space runs out.
905 list_for_each_entry(params, &hdev->pend_le_reports, action) {
906 if (add_to_white_list(req, params, &num_entries, allow_rpa))
910 /* Use the allowlist unless the following conditions are all true:
911 * - We are not currently suspending
912 * - There are 1 or more ADV monitors registered and it's not offloaded
913 * - Interleaved scanning is not currently using the allowlist
915 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
916 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
917 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
920 /* Select filter policy to use white list */
924 static bool scan_use_rpa(struct hci_dev *hdev)
926 return hci_dev_test_flag(hdev, HCI_PRIVACY);
929 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
930 u16 window, u8 own_addr_type, u8 filter_policy,
933 struct hci_dev *hdev = req->hdev;
935 if (hdev->scanning_paused) {
936 bt_dev_dbg(hdev, "Scanning is paused for suspend");
940 if (use_ll_privacy(hdev) &&
941 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
945 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
948 /* Use ext scanning if set ext scan param and ext scan enable is
951 if (use_ext_scan(hdev)) {
952 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
953 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
954 struct hci_cp_le_scan_phy_params *phy_params;
955 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
958 ext_param_cp = (void *)data;
959 phy_params = (void *)ext_param_cp->data;
961 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
962 ext_param_cp->own_addr_type = own_addr_type;
963 ext_param_cp->filter_policy = filter_policy;
965 plen = sizeof(*ext_param_cp);
967 if (scan_1m(hdev) || scan_2m(hdev)) {
968 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
970 memset(phy_params, 0, sizeof(*phy_params));
971 phy_params->type = type;
972 phy_params->interval = cpu_to_le16(interval);
973 phy_params->window = cpu_to_le16(window);
975 plen += sizeof(*phy_params);
979 if (scan_coded(hdev)) {
980 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
982 memset(phy_params, 0, sizeof(*phy_params));
983 phy_params->type = type;
984 phy_params->interval = cpu_to_le16(interval);
985 phy_params->window = cpu_to_le16(window);
987 plen += sizeof(*phy_params);
991 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
994 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
995 ext_enable_cp.enable = LE_SCAN_ENABLE;
996 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
998 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
999 sizeof(ext_enable_cp), &ext_enable_cp);
1001 struct hci_cp_le_set_scan_param param_cp;
1002 struct hci_cp_le_set_scan_enable enable_cp;
1004 memset(¶m_cp, 0, sizeof(param_cp));
1005 param_cp.type = type;
1006 param_cp.interval = cpu_to_le16(interval);
1007 param_cp.window = cpu_to_le16(window);
1008 param_cp.own_address_type = own_addr_type;
1009 param_cp.filter_policy = filter_policy;
1010 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1013 memset(&enable_cp, 0, sizeof(enable_cp));
1014 enable_cp.enable = LE_SCAN_ENABLE;
1015 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1016 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021 /* Returns true if an le connection is in the scanning state */
1022 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1024 struct hci_conn_hash *h = &hdev->conn_hash;
1029 list_for_each_entry_rcu(c, &h->list, list) {
1030 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1031 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1042 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1043 * controller based address resolution to be able to reconfigure
1046 void hci_req_add_le_passive_scan(struct hci_request *req)
1048 struct hci_dev *hdev = req->hdev;
1051 u16 window, interval;
1052 /* Background scanning should run with address resolution */
1053 bool addr_resolv = true;
1055 if (hdev->scanning_paused) {
1056 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1060 /* Set require_privacy to false since no SCAN_REQ are send
1061 * during passive scanning. Not using an non-resolvable address
1062 * here is important so that peer devices using direct
1063 * advertising with our address will be correctly reported
1064 * by the controller.
1066 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1070 if (hdev->enable_advmon_interleave_scan &&
1071 __hci_update_interleaved_scan(hdev))
1074 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1075 /* Adding or removing entries from the white list must
1076 * happen before enabling scanning. The controller does
1077 * not allow white list modification while scanning.
1079 filter_policy = update_white_list(req);
1081 /* When the controller is using random resolvable addresses and
1082 * with that having LE privacy enabled, then controllers with
1083 * Extended Scanner Filter Policies support can now enable support
1084 * for handling directed advertising.
1086 * So instead of using filter polices 0x00 (no whitelist)
1087 * and 0x01 (whitelist enabled) use the new filter policies
1088 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1090 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1091 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1092 filter_policy |= 0x02;
1094 if (hdev->suspended) {
1095 window = hdev->le_scan_window_suspend;
1096 interval = hdev->le_scan_int_suspend;
1098 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1099 } else if (hci_is_le_conn_scanning(hdev)) {
1100 window = hdev->le_scan_window_connect;
1101 interval = hdev->le_scan_int_connect;
1102 } else if (hci_is_adv_monitoring(hdev)) {
1103 window = hdev->le_scan_window_adv_monitor;
1104 interval = hdev->le_scan_int_adv_monitor;
1106 window = hdev->le_scan_window;
1107 interval = hdev->le_scan_interval;
1110 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1111 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1112 own_addr_type, filter_policy, addr_resolv);
1115 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1117 struct adv_info *adv_instance;
1119 /* Instance 0x00 always set local name */
1120 if (instance == 0x00)
1123 adv_instance = hci_find_adv_instance(hdev, instance);
1127 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1128 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1131 return adv_instance->scan_rsp_len ? true : false;
1134 static void hci_req_clear_event_filter(struct hci_request *req)
1136 struct hci_cp_set_event_filter f;
1138 memset(&f, 0, sizeof(f));
1139 f.flt_type = HCI_FLT_CLEAR_ALL;
1140 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1142 /* Update page scan state (since we may have modified it when setting
1143 * the event filter).
1145 __hci_req_update_scan(req);
1148 static void hci_req_set_event_filter(struct hci_request *req)
1150 struct bdaddr_list_with_flags *b;
1151 struct hci_cp_set_event_filter f;
1152 struct hci_dev *hdev = req->hdev;
1153 u8 scan = SCAN_DISABLED;
1155 /* Always clear event filter when starting */
1156 hci_req_clear_event_filter(req);
1158 list_for_each_entry(b, &hdev->whitelist, list) {
1159 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1163 memset(&f, 0, sizeof(f));
1164 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1165 f.flt_type = HCI_FLT_CONN_SETUP;
1166 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1167 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1169 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1170 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1175 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1177 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1179 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1182 static void cancel_adv_timeout(struct hci_dev *hdev)
1184 if (hdev->adv_instance_timeout) {
1185 hdev->adv_instance_timeout = 0;
1186 cancel_delayed_work(&hdev->adv_instance_expire);
1190 /* This function requires the caller holds hdev->lock */
1191 void __hci_req_pause_adv_instances(struct hci_request *req)
1193 bt_dev_dbg(req->hdev, "Pausing advertising instances");
1195 /* Call to disable any advertisements active on the controller.
1196 * This will succeed even if no advertisements are configured.
1198 __hci_req_disable_advertising(req);
1200 /* If we are using software rotation, pause the loop */
1201 if (!ext_adv_capable(req->hdev))
1202 cancel_adv_timeout(req->hdev);
1205 /* This function requires the caller holds hdev->lock */
1206 static void __hci_req_resume_adv_instances(struct hci_request *req)
1208 struct adv_info *adv;
1210 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1212 if (ext_adv_capable(req->hdev)) {
1213 /* Call for each tracked instance to be re-enabled */
1214 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1215 __hci_req_enable_ext_advertising(req,
1220 /* Schedule for most recent instance to be restarted and begin
1221 * the software rotation loop
1223 __hci_req_schedule_adv_instance(req,
1224 req->hdev->cur_adv_instance,
1229 /* This function requires the caller holds hdev->lock */
1230 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1232 struct hci_request req;
1234 hci_req_init(&req, hdev);
1235 __hci_req_resume_adv_instances(&req);
1237 return hci_req_run(&req, NULL);
1240 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1242 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1244 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1245 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1246 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1247 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1248 wake_up(&hdev->suspend_wait_q);
1251 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1252 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1253 wake_up(&hdev->suspend_wait_q);
1257 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1260 struct hci_dev *hdev = req->hdev;
1262 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1263 case HCI_ADV_MONITOR_EXT_MSFT:
1264 msft_req_add_set_filter_enable(req, enable);
1270 /* No need to block when enabling since it's on resume path */
1271 if (hdev->suspended && !enable)
1272 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1275 /* Call with hci_dev_lock */
1276 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1279 struct hci_conn *conn;
1280 struct hci_request req;
1282 int disconnect_counter;
1284 if (next == hdev->suspend_state) {
1285 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1289 hdev->suspend_state = next;
1290 hci_req_init(&req, hdev);
1292 if (next == BT_SUSPEND_DISCONNECT) {
1293 /* Mark device as suspended */
1294 hdev->suspended = true;
1296 /* Pause discovery if not already stopped */
1297 old_state = hdev->discovery.state;
1298 if (old_state != DISCOVERY_STOPPED) {
1299 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1300 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1301 queue_work(hdev->req_workqueue, &hdev->discov_update);
1304 hdev->discovery_paused = true;
1305 hdev->discovery_old_state = old_state;
1307 /* Stop directed advertising */
1308 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1310 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1311 cancel_delayed_work(&hdev->discov_off);
1312 queue_delayed_work(hdev->req_workqueue,
1313 &hdev->discov_off, 0);
1316 /* Pause other advertisements */
1317 if (hdev->adv_instance_cnt)
1318 __hci_req_pause_adv_instances(&req);
1320 hdev->advertising_paused = true;
1321 hdev->advertising_old_state = old_state;
1322 /* Disable page scan */
1323 page_scan = SCAN_DISABLED;
1324 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1326 /* Disable LE passive scan if enabled */
1327 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1328 cancel_interleave_scan(hdev);
1329 hci_req_add_le_scan_disable(&req, false);
1332 /* Disable advertisement filters */
1333 hci_req_add_set_adv_filter_enable(&req, false);
1335 /* Mark task needing completion */
1336 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1338 /* Prevent disconnects from causing scanning to be re-enabled */
1339 hdev->scanning_paused = true;
1341 /* Run commands before disconnecting */
1342 hci_req_run(&req, suspend_req_complete);
1344 disconnect_counter = 0;
1345 /* Soft disconnect everything (power off) */
1346 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1347 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1348 disconnect_counter++;
1351 if (disconnect_counter > 0) {
1353 "Had %d disconnects. Will wait on them",
1354 disconnect_counter);
1355 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1357 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1358 /* Unpause to take care of updating scanning params */
1359 hdev->scanning_paused = false;
1360 /* Enable event filter for paired devices */
1361 hci_req_set_event_filter(&req);
1362 /* Enable passive scan at lower duty cycle */
1363 __hci_update_background_scan(&req);
1364 /* Pause scan changes again. */
1365 hdev->scanning_paused = true;
1366 hci_req_run(&req, suspend_req_complete);
1368 hdev->suspended = false;
1369 hdev->scanning_paused = false;
1371 hci_req_clear_event_filter(&req);
1372 /* Reset passive/background scanning to normal */
1373 __hci_update_background_scan(&req);
1374 /* Enable all of the advertisement filters */
1375 hci_req_add_set_adv_filter_enable(&req, true);
1377 /* Unpause directed advertising */
1378 hdev->advertising_paused = false;
1379 if (hdev->advertising_old_state) {
1380 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1381 hdev->suspend_tasks);
1382 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1383 queue_work(hdev->req_workqueue,
1384 &hdev->discoverable_update);
1385 hdev->advertising_old_state = 0;
1388 /* Resume other advertisements */
1389 if (hdev->adv_instance_cnt)
1390 __hci_req_resume_adv_instances(&req);
1392 /* Unpause discovery */
1393 hdev->discovery_paused = false;
1394 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1395 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1396 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1397 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1398 queue_work(hdev->req_workqueue, &hdev->discov_update);
1401 hci_req_run(&req, suspend_req_complete);
1404 hdev->suspend_state = next;
1407 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1408 wake_up(&hdev->suspend_wait_q);
1411 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1413 return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1416 void __hci_req_disable_advertising(struct hci_request *req)
1418 if (ext_adv_capable(req->hdev)) {
1419 __hci_req_disable_ext_adv_instance(req, 0x00);
1424 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1428 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1431 struct adv_info *adv_instance;
1433 if (instance == 0x00) {
1434 /* Instance 0 always manages the "Tx Power" and "Flags"
1437 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1439 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1440 * corresponds to the "connectable" instance flag.
1442 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1443 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1445 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1446 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1447 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1448 flags |= MGMT_ADV_FLAG_DISCOV;
1453 adv_instance = hci_find_adv_instance(hdev, instance);
1455 /* Return 0 when we got an invalid instance identifier. */
1459 return adv_instance->flags;
1462 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1464 /* If privacy is not enabled don't use RPA */
1465 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1468 /* If basic privacy mode is enabled use RPA */
1469 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1472 /* If limited privacy mode is enabled don't use RPA if we're
1473 * both discoverable and bondable.
1475 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1476 hci_dev_test_flag(hdev, HCI_BONDABLE))
1479 /* We're neither bondable nor discoverable in the limited
1480 * privacy mode, therefore use RPA.
1485 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1487 /* If there is no connection we are OK to advertise. */
1488 if (hci_conn_num(hdev, LE_LINK) == 0)
1491 /* Check le_states if there is any connection in slave role. */
1492 if (hdev->conn_hash.le_num_slave > 0) {
1493 /* Slave connection state and non connectable mode bit 20. */
1494 if (!connectable && !(hdev->le_states[2] & 0x10))
1497 /* Slave connection state and connectable mode bit 38
1498 * and scannable bit 21.
1500 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1501 !(hdev->le_states[2] & 0x20)))
1505 /* Check le_states if there is any connection in master role. */
1506 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1507 /* Master connection state and non connectable mode bit 18. */
1508 if (!connectable && !(hdev->le_states[2] & 0x02))
1511 /* Master connection state and connectable mode bit 35 and
1514 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1515 !(hdev->le_states[2] & 0x08)))
1522 void __hci_req_enable_advertising(struct hci_request *req)
1524 struct hci_dev *hdev = req->hdev;
1525 struct adv_info *adv_instance;
1526 struct hci_cp_le_set_adv_param cp;
1527 u8 own_addr_type, enable = 0x01;
1529 u16 adv_min_interval, adv_max_interval;
1532 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1533 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1535 /* If the "connectable" instance flag was not set, then choose between
1536 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1538 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1539 mgmt_get_connectable(hdev);
1541 if (!is_advertising_allowed(hdev, connectable))
1544 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1545 __hci_req_disable_advertising(req);
1547 /* Clear the HCI_LE_ADV bit temporarily so that the
1548 * hci_update_random_address knows that it's safe to go ahead
1549 * and write a new random address. The flag will be set back on
1550 * as soon as the SET_ADV_ENABLE HCI command completes.
1552 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1554 /* Set require_privacy to true only when non-connectable
1555 * advertising is used. In that case it is fine to use a
1556 * non-resolvable private address.
1558 if (hci_update_random_address(req, !connectable,
1559 adv_use_rpa(hdev, flags),
1560 &own_addr_type) < 0)
1563 memset(&cp, 0, sizeof(cp));
1566 adv_min_interval = adv_instance->min_interval;
1567 adv_max_interval = adv_instance->max_interval;
1569 adv_min_interval = hdev->le_adv_min_interval;
1570 adv_max_interval = hdev->le_adv_max_interval;
1574 cp.type = LE_ADV_IND;
1576 if (adv_cur_instance_is_scannable(hdev))
1577 cp.type = LE_ADV_SCAN_IND;
1579 cp.type = LE_ADV_NONCONN_IND;
1581 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1582 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1583 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1584 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1588 cp.min_interval = cpu_to_le16(adv_min_interval);
1589 cp.max_interval = cpu_to_le16(adv_max_interval);
1590 cp.own_address_type = own_addr_type;
1591 cp.channel_map = hdev->le_adv_channel_map;
1593 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1595 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1598 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1601 size_t complete_len;
1603 /* no space left for name (+ NULL + type + len) */
1604 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1607 /* use complete name if present and fits */
1608 complete_len = strlen(hdev->dev_name);
1609 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1610 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1611 hdev->dev_name, complete_len + 1);
1613 /* use short name if present */
1614 short_len = strlen(hdev->short_name);
1616 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1617 hdev->short_name, short_len + 1);
1619 /* use shortened full name if present, we already know that name
1620 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1623 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1625 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1626 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1628 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1635 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1637 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1640 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1642 u8 scan_rsp_len = 0;
1644 if (hdev->appearance) {
1645 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1648 return append_local_name(hdev, ptr, scan_rsp_len);
1651 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1654 struct adv_info *adv_instance;
1656 u8 scan_rsp_len = 0;
1658 adv_instance = hci_find_adv_instance(hdev, instance);
1662 instance_flags = adv_instance->flags;
1664 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1665 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1668 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1669 adv_instance->scan_rsp_len);
1671 scan_rsp_len += adv_instance->scan_rsp_len;
1673 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1674 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1676 return scan_rsp_len;
1679 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1681 struct hci_dev *hdev = req->hdev;
1684 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1687 if (ext_adv_capable(hdev)) {
1688 struct hci_cp_le_set_ext_scan_rsp_data cp;
1690 memset(&cp, 0, sizeof(cp));
1693 len = create_instance_scan_rsp_data(hdev, instance,
1696 len = create_default_scan_rsp_data(hdev, cp.data);
1698 if (hdev->scan_rsp_data_len == len &&
1699 !memcmp(cp.data, hdev->scan_rsp_data, len))
1702 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1703 hdev->scan_rsp_data_len = len;
1705 cp.handle = instance;
1707 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1708 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1710 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1713 struct hci_cp_le_set_scan_rsp_data cp;
1715 memset(&cp, 0, sizeof(cp));
1718 len = create_instance_scan_rsp_data(hdev, instance,
1721 len = create_default_scan_rsp_data(hdev, cp.data);
1723 if (hdev->scan_rsp_data_len == len &&
1724 !memcmp(cp.data, hdev->scan_rsp_data, len))
1727 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1728 hdev->scan_rsp_data_len = len;
1732 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1736 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1738 struct adv_info *adv_instance = NULL;
1739 u8 ad_len = 0, flags = 0;
1742 /* Return 0 when the current instance identifier is invalid. */
1744 adv_instance = hci_find_adv_instance(hdev, instance);
1749 instance_flags = get_adv_instance_flags(hdev, instance);
1751 /* If instance already has the flags set skip adding it once
1754 if (adv_instance && eir_get_data(adv_instance->adv_data,
1755 adv_instance->adv_data_len, EIR_FLAGS,
1759 /* The Add Advertising command allows userspace to set both the general
1760 * and limited discoverable flags.
1762 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1763 flags |= LE_AD_GENERAL;
1765 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1766 flags |= LE_AD_LIMITED;
1768 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1769 flags |= LE_AD_NO_BREDR;
1771 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1772 /* If a discovery flag wasn't provided, simply use the global
1776 flags |= mgmt_get_adv_discov_flags(hdev);
1778 /* If flags would still be empty, then there is no need to
1779 * include the "Flags" AD field".
1793 memcpy(ptr, adv_instance->adv_data,
1794 adv_instance->adv_data_len);
1795 ad_len += adv_instance->adv_data_len;
1796 ptr += adv_instance->adv_data_len;
1799 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1802 if (ext_adv_capable(hdev)) {
1804 adv_tx_power = adv_instance->tx_power;
1806 adv_tx_power = hdev->adv_tx_power;
1808 adv_tx_power = hdev->adv_tx_power;
1811 /* Provide Tx Power only if we can provide a valid value for it */
1812 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1814 ptr[1] = EIR_TX_POWER;
1815 ptr[2] = (u8)adv_tx_power;
1825 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1827 struct hci_dev *hdev = req->hdev;
1830 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1833 if (ext_adv_capable(hdev)) {
1834 struct hci_cp_le_set_ext_adv_data cp;
1836 memset(&cp, 0, sizeof(cp));
1838 len = create_instance_adv_data(hdev, instance, cp.data);
1840 /* There's nothing to do if the data hasn't changed */
1841 if (hdev->adv_data_len == len &&
1842 memcmp(cp.data, hdev->adv_data, len) == 0)
1845 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1846 hdev->adv_data_len = len;
1849 cp.handle = instance;
1850 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1851 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1853 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1855 struct hci_cp_le_set_adv_data cp;
1857 memset(&cp, 0, sizeof(cp));
1859 len = create_instance_adv_data(hdev, instance, cp.data);
1861 /* There's nothing to do if the data hasn't changed */
1862 if (hdev->adv_data_len == len &&
1863 memcmp(cp.data, hdev->adv_data, len) == 0)
1866 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1867 hdev->adv_data_len = len;
1871 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1875 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1877 struct hci_request req;
1879 hci_req_init(&req, hdev);
1880 __hci_req_update_adv_data(&req, instance);
1882 return hci_req_run(&req, NULL);
1885 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1888 BT_DBG("%s status %u", hdev->name, status);
1891 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1893 struct hci_request req;
1896 if (!use_ll_privacy(hdev) &&
1897 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1900 hci_req_init(&req, hdev);
1902 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1904 hci_req_run(&req, enable_addr_resolution_complete);
1907 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1909 bt_dev_dbg(hdev, "status %u", status);
1912 void hci_req_reenable_advertising(struct hci_dev *hdev)
1914 struct hci_request req;
1916 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1917 list_empty(&hdev->adv_instances))
1920 hci_req_init(&req, hdev);
1922 if (hdev->cur_adv_instance) {
1923 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1926 if (ext_adv_capable(hdev)) {
1927 __hci_req_start_ext_adv(&req, 0x00);
1929 __hci_req_update_adv_data(&req, 0x00);
1930 __hci_req_update_scan_rsp_data(&req, 0x00);
1931 __hci_req_enable_advertising(&req);
1935 hci_req_run(&req, adv_enable_complete);
1938 static void adv_timeout_expire(struct work_struct *work)
1940 struct hci_dev *hdev = container_of(work, struct hci_dev,
1941 adv_instance_expire.work);
1943 struct hci_request req;
1946 bt_dev_dbg(hdev, "");
1950 hdev->adv_instance_timeout = 0;
1952 instance = hdev->cur_adv_instance;
1953 if (instance == 0x00)
1956 hci_req_init(&req, hdev);
1958 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1960 if (list_empty(&hdev->adv_instances))
1961 __hci_req_disable_advertising(&req);
1963 hci_req_run(&req, NULL);
1966 hci_dev_unlock(hdev);
1969 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1972 struct hci_dev *hdev = req->hdev;
1977 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1978 hci_req_add_le_scan_disable(req, false);
1979 hci_req_add_le_passive_scan(req);
1981 switch (hdev->interleave_scan_state) {
1982 case INTERLEAVE_SCAN_ALLOWLIST:
1983 bt_dev_dbg(hdev, "next state: allowlist");
1984 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1986 case INTERLEAVE_SCAN_NO_FILTER:
1987 bt_dev_dbg(hdev, "next state: no filter");
1988 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1990 case INTERLEAVE_SCAN_NONE:
1991 BT_ERR("unexpected error");
1995 hci_dev_unlock(hdev);
2000 static void interleave_scan_work(struct work_struct *work)
2002 struct hci_dev *hdev = container_of(work, struct hci_dev,
2003 interleave_scan.work);
2005 unsigned long timeout;
2007 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2008 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2009 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2010 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2012 bt_dev_err(hdev, "unexpected error");
2016 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2017 HCI_CMD_TIMEOUT, &status);
2019 /* Don't continue interleaving if it was canceled */
2020 if (is_interleave_scanning(hdev))
2021 queue_delayed_work(hdev->req_workqueue,
2022 &hdev->interleave_scan, timeout);
2025 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2026 bool use_rpa, struct adv_info *adv_instance,
2027 u8 *own_addr_type, bdaddr_t *rand_addr)
2031 bacpy(rand_addr, BDADDR_ANY);
2033 /* If privacy is enabled use a resolvable private address. If
2034 * current RPA has expired then generate a new one.
2039 /* If Controller supports LL Privacy use own address type is
2042 if (use_ll_privacy(hdev))
2043 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2045 *own_addr_type = ADDR_LE_DEV_RANDOM;
2048 if (!adv_instance->rpa_expired &&
2049 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2052 adv_instance->rpa_expired = false;
2054 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2055 !bacmp(&hdev->random_addr, &hdev->rpa))
2059 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2061 bt_dev_err(hdev, "failed to generate new RPA");
2065 bacpy(rand_addr, &hdev->rpa);
2067 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2069 queue_delayed_work(hdev->workqueue,
2070 &adv_instance->rpa_expired_cb, to);
2072 queue_delayed_work(hdev->workqueue,
2073 &hdev->rpa_expired, to);
2078 /* In case of required privacy without resolvable private address,
2079 * use an non-resolvable private address. This is useful for
2080 * non-connectable advertising.
2082 if (require_privacy) {
2086 /* The non-resolvable private address is generated
2087 * from random six bytes with the two most significant
2090 get_random_bytes(&nrpa, 6);
2093 /* The non-resolvable private address shall not be
2094 * equal to the public address.
2096 if (bacmp(&hdev->bdaddr, &nrpa))
2100 *own_addr_type = ADDR_LE_DEV_RANDOM;
2101 bacpy(rand_addr, &nrpa);
2106 /* No privacy so use a public address. */
2107 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2112 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2114 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2117 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2119 struct hci_cp_le_set_ext_adv_params cp;
2120 struct hci_dev *hdev = req->hdev;
2123 bdaddr_t random_addr;
2126 struct adv_info *adv_instance;
2130 adv_instance = hci_find_adv_instance(hdev, instance);
2134 adv_instance = NULL;
2137 flags = get_adv_instance_flags(hdev, instance);
2139 /* If the "connectable" instance flag was not set, then choose between
2140 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2142 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2143 mgmt_get_connectable(hdev);
2145 if (!is_advertising_allowed(hdev, connectable))
2148 /* Set require_privacy to true only when non-connectable
2149 * advertising is used. In that case it is fine to use a
2150 * non-resolvable private address.
2152 err = hci_get_random_address(hdev, !connectable,
2153 adv_use_rpa(hdev, flags), adv_instance,
2154 &own_addr_type, &random_addr);
2158 memset(&cp, 0, sizeof(cp));
2161 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2162 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2163 cp.tx_power = adv_instance->tx_power;
2165 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2166 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2167 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2170 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2174 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2176 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2177 } else if (adv_instance_is_scannable(hdev, instance)) {
2179 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2181 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2184 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2186 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2189 cp.own_addr_type = own_addr_type;
2190 cp.channel_map = hdev->le_adv_channel_map;
2191 cp.handle = instance;
2193 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2194 cp.primary_phy = HCI_ADV_PHY_1M;
2195 cp.secondary_phy = HCI_ADV_PHY_2M;
2196 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2197 cp.primary_phy = HCI_ADV_PHY_CODED;
2198 cp.secondary_phy = HCI_ADV_PHY_CODED;
2200 /* In all other cases use 1M */
2201 cp.primary_phy = HCI_ADV_PHY_1M;
2202 cp.secondary_phy = HCI_ADV_PHY_1M;
2205 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2207 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2208 bacmp(&random_addr, BDADDR_ANY)) {
2209 struct hci_cp_le_set_adv_set_rand_addr cp;
2211 /* Check if random address need to be updated */
2213 if (!bacmp(&random_addr, &adv_instance->random_addr))
2216 if (!bacmp(&random_addr, &hdev->random_addr))
2220 memset(&cp, 0, sizeof(cp));
2222 cp.handle = instance;
2223 bacpy(&cp.bdaddr, &random_addr);
2226 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2233 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2235 struct hci_dev *hdev = req->hdev;
2236 struct hci_cp_le_set_ext_adv_enable *cp;
2237 struct hci_cp_ext_adv_set *adv_set;
2238 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2239 struct adv_info *adv_instance;
2242 adv_instance = hci_find_adv_instance(hdev, instance);
2246 adv_instance = NULL;
2250 adv_set = (void *) cp->data;
2252 memset(cp, 0, sizeof(*cp));
2255 cp->num_of_sets = 0x01;
2257 memset(adv_set, 0, sizeof(*adv_set));
2259 adv_set->handle = instance;
2261 /* Set duration per instance since controller is responsible for
2264 if (adv_instance && adv_instance->duration) {
2265 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2267 /* Time = N * 10 ms */
2268 adv_set->duration = cpu_to_le16(duration / 10);
2271 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2272 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2278 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2280 struct hci_dev *hdev = req->hdev;
2281 struct hci_cp_le_set_ext_adv_enable *cp;
2282 struct hci_cp_ext_adv_set *adv_set;
2283 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2286 /* If request specifies an instance that doesn't exist, fail */
2287 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2290 memset(data, 0, sizeof(data));
2293 adv_set = (void *)cp->data;
2295 /* Instance 0x00 indicates all advertising instances will be disabled */
2296 cp->num_of_sets = !!instance;
2299 adv_set->handle = instance;
2301 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2302 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2307 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2309 struct hci_dev *hdev = req->hdev;
2311 /* If request specifies an instance that doesn't exist, fail */
2312 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2315 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2320 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2322 struct hci_dev *hdev = req->hdev;
2323 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2326 /* If instance isn't pending, the chip knows about it, and it's safe to
2329 if (adv_instance && !adv_instance->pending)
2330 __hci_req_disable_ext_adv_instance(req, instance);
2332 err = __hci_req_setup_ext_adv_instance(req, instance);
2336 __hci_req_update_scan_rsp_data(req, instance);
2337 __hci_req_enable_ext_advertising(req, instance);
2342 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2345 struct hci_dev *hdev = req->hdev;
2346 struct adv_info *adv_instance = NULL;
2349 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2350 list_empty(&hdev->adv_instances))
2353 if (hdev->adv_instance_timeout)
2356 adv_instance = hci_find_adv_instance(hdev, instance);
2360 /* A zero timeout means unlimited advertising. As long as there is
2361 * only one instance, duration should be ignored. We still set a timeout
2362 * in case further instances are being added later on.
2364 * If the remaining lifetime of the instance is more than the duration
2365 * then the timeout corresponds to the duration, otherwise it will be
2366 * reduced to the remaining instance lifetime.
2368 if (adv_instance->timeout == 0 ||
2369 adv_instance->duration <= adv_instance->remaining_time)
2370 timeout = adv_instance->duration;
2372 timeout = adv_instance->remaining_time;
2374 /* The remaining time is being reduced unless the instance is being
2375 * advertised without time limit.
2377 if (adv_instance->timeout)
2378 adv_instance->remaining_time =
2379 adv_instance->remaining_time - timeout;
2381 /* Only use work for scheduling instances with legacy advertising */
2382 if (!ext_adv_capable(hdev)) {
2383 hdev->adv_instance_timeout = timeout;
2384 queue_delayed_work(hdev->req_workqueue,
2385 &hdev->adv_instance_expire,
2386 msecs_to_jiffies(timeout * 1000));
2389 /* If we're just re-scheduling the same instance again then do not
2390 * execute any HCI commands. This happens when a single instance is
2393 if (!force && hdev->cur_adv_instance == instance &&
2394 hci_dev_test_flag(hdev, HCI_LE_ADV))
2397 hdev->cur_adv_instance = instance;
2398 if (ext_adv_capable(hdev)) {
2399 __hci_req_start_ext_adv(req, instance);
2401 __hci_req_update_adv_data(req, instance);
2402 __hci_req_update_scan_rsp_data(req, instance);
2403 __hci_req_enable_advertising(req);
2409 /* For a single instance:
2410 * - force == true: The instance will be removed even when its remaining
2411 * lifetime is not zero.
2412 * - force == false: the instance will be deactivated but kept stored unless
2413 * the remaining lifetime is zero.
2415 * For instance == 0x00:
2416 * - force == true: All instances will be removed regardless of their timeout
2418 * - force == false: Only instances that have a timeout will be removed.
2420 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2421 struct hci_request *req, u8 instance,
2424 struct adv_info *adv_instance, *n, *next_instance = NULL;
2428 /* Cancel any timeout concerning the removed instance(s). */
2429 if (!instance || hdev->cur_adv_instance == instance)
2430 cancel_adv_timeout(hdev);
2432 /* Get the next instance to advertise BEFORE we remove
2433 * the current one. This can be the same instance again
2434 * if there is only one instance.
2436 if (instance && hdev->cur_adv_instance == instance)
2437 next_instance = hci_get_next_instance(hdev, instance);
2439 if (instance == 0x00) {
2440 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2442 if (!(force || adv_instance->timeout))
2445 rem_inst = adv_instance->instance;
2446 err = hci_remove_adv_instance(hdev, rem_inst);
2448 mgmt_advertising_removed(sk, hdev, rem_inst);
2451 adv_instance = hci_find_adv_instance(hdev, instance);
2453 if (force || (adv_instance && adv_instance->timeout &&
2454 !adv_instance->remaining_time)) {
2455 /* Don't advertise a removed instance. */
2456 if (next_instance &&
2457 next_instance->instance == instance)
2458 next_instance = NULL;
2460 err = hci_remove_adv_instance(hdev, instance);
2462 mgmt_advertising_removed(sk, hdev, instance);
2466 if (!req || !hdev_is_powered(hdev) ||
2467 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2470 if (next_instance && !ext_adv_capable(hdev))
2471 __hci_req_schedule_adv_instance(req, next_instance->instance,
2475 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2477 struct hci_dev *hdev = req->hdev;
2479 /* If we're advertising or initiating an LE connection we can't
2480 * go ahead and change the random address at this time. This is
2481 * because the eventual initiator address used for the
2482 * subsequently created connection will be undefined (some
2483 * controllers use the new address and others the one we had
2484 * when the operation started).
2486 * In this kind of scenario skip the update and let the random
2487 * address be updated at the next cycle.
2489 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2490 hci_lookup_le_connect(hdev)) {
2491 bt_dev_dbg(hdev, "Deferring random address update");
2492 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2496 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2499 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2500 bool use_rpa, u8 *own_addr_type)
2502 struct hci_dev *hdev = req->hdev;
2505 /* If privacy is enabled use a resolvable private address. If
2506 * current RPA has expired or there is something else than
2507 * the current RPA in use, then generate a new one.
2512 /* If Controller supports LL Privacy use own address type is
2515 if (use_ll_privacy(hdev))
2516 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2518 *own_addr_type = ADDR_LE_DEV_RANDOM;
2520 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2521 !bacmp(&hdev->random_addr, &hdev->rpa))
2524 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2526 bt_dev_err(hdev, "failed to generate new RPA");
2530 set_random_addr(req, &hdev->rpa);
2532 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2533 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2538 /* In case of required privacy without resolvable private address,
2539 * use an non-resolvable private address. This is useful for active
2540 * scanning and non-connectable advertising.
2542 if (require_privacy) {
2546 /* The non-resolvable private address is generated
2547 * from random six bytes with the two most significant
2550 get_random_bytes(&nrpa, 6);
2553 /* The non-resolvable private address shall not be
2554 * equal to the public address.
2556 if (bacmp(&hdev->bdaddr, &nrpa))
2560 *own_addr_type = ADDR_LE_DEV_RANDOM;
2561 set_random_addr(req, &nrpa);
2565 /* If forcing static address is in use or there is no public
2566 * address use the static address as random address (but skip
2567 * the HCI command if the current random address is already the
2570 * In case BR/EDR has been disabled on a dual-mode controller
2571 * and a static address has been configured, then use that
2572 * address instead of the public BR/EDR address.
2574 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2575 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2576 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2577 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2578 *own_addr_type = ADDR_LE_DEV_RANDOM;
2579 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2580 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2581 &hdev->static_addr);
2585 /* Neither privacy nor static address is being used so use a
2588 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2593 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2595 struct bdaddr_list *b;
2597 list_for_each_entry(b, &hdev->whitelist, list) {
2598 struct hci_conn *conn;
2600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2604 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2611 void __hci_req_update_scan(struct hci_request *req)
2613 struct hci_dev *hdev = req->hdev;
2616 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2619 if (!hdev_is_powered(hdev))
2622 if (mgmt_powering_down(hdev))
2625 if (hdev->scanning_paused)
2628 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2629 disconnected_whitelist_entries(hdev))
2632 scan = SCAN_DISABLED;
2634 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2635 scan |= SCAN_INQUIRY;
2637 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2638 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2641 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2644 static int update_scan(struct hci_request *req, unsigned long opt)
2646 hci_dev_lock(req->hdev);
2647 __hci_req_update_scan(req);
2648 hci_dev_unlock(req->hdev);
2652 static void scan_update_work(struct work_struct *work)
2654 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2656 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2659 static int connectable_update(struct hci_request *req, unsigned long opt)
2661 struct hci_dev *hdev = req->hdev;
2665 __hci_req_update_scan(req);
2667 /* If BR/EDR is not enabled and we disable advertising as a
2668 * by-product of disabling connectable, we need to update the
2669 * advertising flags.
2671 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2672 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2674 /* Update the advertising parameters if necessary */
2675 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2676 !list_empty(&hdev->adv_instances)) {
2677 if (ext_adv_capable(hdev))
2678 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2680 __hci_req_enable_advertising(req);
2683 __hci_update_background_scan(req);
2685 hci_dev_unlock(hdev);
2690 static void connectable_update_work(struct work_struct *work)
2692 struct hci_dev *hdev = container_of(work, struct hci_dev,
2693 connectable_update);
2696 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2697 mgmt_set_connectable_complete(hdev, status);
2700 static u8 get_service_classes(struct hci_dev *hdev)
2702 struct bt_uuid *uuid;
2705 list_for_each_entry(uuid, &hdev->uuids, list)
2706 val |= uuid->svc_hint;
2711 void __hci_req_update_class(struct hci_request *req)
2713 struct hci_dev *hdev = req->hdev;
2716 bt_dev_dbg(hdev, "");
2718 if (!hdev_is_powered(hdev))
2721 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2724 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2727 cod[0] = hdev->minor_class;
2728 cod[1] = hdev->major_class;
2729 cod[2] = get_service_classes(hdev);
2731 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2734 if (memcmp(cod, hdev->dev_class, 3) == 0)
2737 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2740 static void write_iac(struct hci_request *req)
2742 struct hci_dev *hdev = req->hdev;
2743 struct hci_cp_write_current_iac_lap cp;
2745 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2748 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2749 /* Limited discoverable mode */
2750 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2751 cp.iac_lap[0] = 0x00; /* LIAC */
2752 cp.iac_lap[1] = 0x8b;
2753 cp.iac_lap[2] = 0x9e;
2754 cp.iac_lap[3] = 0x33; /* GIAC */
2755 cp.iac_lap[4] = 0x8b;
2756 cp.iac_lap[5] = 0x9e;
2758 /* General discoverable mode */
2760 cp.iac_lap[0] = 0x33; /* GIAC */
2761 cp.iac_lap[1] = 0x8b;
2762 cp.iac_lap[2] = 0x9e;
2765 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2766 (cp.num_iac * 3) + 1, &cp);
2769 static int discoverable_update(struct hci_request *req, unsigned long opt)
2771 struct hci_dev *hdev = req->hdev;
2775 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2777 __hci_req_update_scan(req);
2778 __hci_req_update_class(req);
2781 /* Advertising instances don't use the global discoverable setting, so
2782 * only update AD if advertising was enabled using Set Advertising.
2784 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2785 __hci_req_update_adv_data(req, 0x00);
2787 /* Discoverable mode affects the local advertising
2788 * address in limited privacy mode.
2790 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2791 if (ext_adv_capable(hdev))
2792 __hci_req_start_ext_adv(req, 0x00);
2794 __hci_req_enable_advertising(req);
2798 hci_dev_unlock(hdev);
2803 static void discoverable_update_work(struct work_struct *work)
2805 struct hci_dev *hdev = container_of(work, struct hci_dev,
2806 discoverable_update);
2809 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2810 mgmt_set_discoverable_complete(hdev, status);
2813 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2816 switch (conn->state) {
2819 if (conn->type == AMP_LINK) {
2820 struct hci_cp_disconn_phy_link cp;
2822 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2824 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2827 struct hci_cp_disconnect dc;
2829 dc.handle = cpu_to_le16(conn->handle);
2831 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2834 conn->state = BT_DISCONN;
2838 if (conn->type == LE_LINK) {
2839 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2841 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2843 } else if (conn->type == ACL_LINK) {
2844 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2846 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2851 if (conn->type == ACL_LINK) {
2852 struct hci_cp_reject_conn_req rej;
2854 bacpy(&rej.bdaddr, &conn->dst);
2855 rej.reason = reason;
2857 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2859 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2860 struct hci_cp_reject_sync_conn_req rej;
2862 bacpy(&rej.bdaddr, &conn->dst);
2864 /* SCO rejection has its own limited set of
2865 * allowed error values (0x0D-0x0F) which isn't
2866 * compatible with most values passed to this
2867 * function. To be safe hard-code one of the
2868 * values that's suitable for SCO.
2870 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2872 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2877 conn->state = BT_CLOSED;
2882 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2885 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2888 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2890 struct hci_request req;
2893 hci_req_init(&req, conn->hdev);
2895 __hci_abort_conn(&req, conn, reason);
2897 err = hci_req_run(&req, abort_conn_complete);
2898 if (err && err != -ENODATA) {
2899 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2906 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2908 hci_dev_lock(req->hdev);
2909 __hci_update_background_scan(req);
2910 hci_dev_unlock(req->hdev);
2914 static void bg_scan_update(struct work_struct *work)
2916 struct hci_dev *hdev = container_of(work, struct hci_dev,
2918 struct hci_conn *conn;
2922 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2928 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2930 hci_le_conn_failed(conn, status);
2932 hci_dev_unlock(hdev);
2935 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2937 hci_req_add_le_scan_disable(req, false);
2941 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2944 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2945 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2946 struct hci_cp_inquiry cp;
2948 bt_dev_dbg(req->hdev, "");
2950 hci_dev_lock(req->hdev);
2951 hci_inquiry_cache_flush(req->hdev);
2952 hci_dev_unlock(req->hdev);
2954 memset(&cp, 0, sizeof(cp));
2956 if (req->hdev->discovery.limited)
2957 memcpy(&cp.lap, liac, sizeof(cp.lap));
2959 memcpy(&cp.lap, giac, sizeof(cp.lap));
2963 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2968 static void le_scan_disable_work(struct work_struct *work)
2970 struct hci_dev *hdev = container_of(work, struct hci_dev,
2971 le_scan_disable.work);
2974 bt_dev_dbg(hdev, "");
2976 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2979 cancel_delayed_work(&hdev->le_scan_restart);
2981 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2983 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2988 hdev->discovery.scan_start = 0;
2990 /* If we were running LE only scan, change discovery state. If
2991 * we were running both LE and BR/EDR inquiry simultaneously,
2992 * and BR/EDR inquiry is already finished, stop discovery,
2993 * otherwise BR/EDR inquiry will stop discovery when finished.
2994 * If we will resolve remote device name, do not change
2998 if (hdev->discovery.type == DISCOV_TYPE_LE)
2999 goto discov_stopped;
3001 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3004 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3005 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3006 hdev->discovery.state != DISCOVERY_RESOLVING)
3007 goto discov_stopped;
3012 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3013 HCI_CMD_TIMEOUT, &status);
3015 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3016 goto discov_stopped;
3023 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3024 hci_dev_unlock(hdev);
3027 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3029 struct hci_dev *hdev = req->hdev;
3031 /* If controller is not scanning we are done. */
3032 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3035 if (hdev->scanning_paused) {
3036 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3040 hci_req_add_le_scan_disable(req, false);
3042 if (use_ext_scan(hdev)) {
3043 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3045 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3046 ext_enable_cp.enable = LE_SCAN_ENABLE;
3047 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3049 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3050 sizeof(ext_enable_cp), &ext_enable_cp);
3052 struct hci_cp_le_set_scan_enable cp;
3054 memset(&cp, 0, sizeof(cp));
3055 cp.enable = LE_SCAN_ENABLE;
3056 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3057 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3063 static void le_scan_restart_work(struct work_struct *work)
3065 struct hci_dev *hdev = container_of(work, struct hci_dev,
3066 le_scan_restart.work);
3067 unsigned long timeout, duration, scan_start, now;
3070 bt_dev_dbg(hdev, "");
3072 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3074 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3081 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3082 !hdev->discovery.scan_start)
3085 /* When the scan was started, hdev->le_scan_disable has been queued
3086 * after duration from scan_start. During scan restart this job
3087 * has been canceled, and we need to queue it again after proper
3088 * timeout, to make sure that scan does not run indefinitely.
3090 duration = hdev->discovery.scan_duration;
3091 scan_start = hdev->discovery.scan_start;
3093 if (now - scan_start <= duration) {
3096 if (now >= scan_start)
3097 elapsed = now - scan_start;
3099 elapsed = ULONG_MAX - scan_start + now;
3101 timeout = duration - elapsed;
3106 queue_delayed_work(hdev->req_workqueue,
3107 &hdev->le_scan_disable, timeout);
3110 hci_dev_unlock(hdev);
3113 static int active_scan(struct hci_request *req, unsigned long opt)
3115 uint16_t interval = opt;
3116 struct hci_dev *hdev = req->hdev;
3118 /* White list is not used for discovery */
3119 u8 filter_policy = 0x00;
3120 /* Discovery doesn't require controller address resolution */
3121 bool addr_resolv = false;
3124 bt_dev_dbg(hdev, "");
3126 /* If controller is scanning, it means the background scanning is
3127 * running. Thus, we should temporarily stop it in order to set the
3128 * discovery scanning parameters.
3130 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3131 hci_req_add_le_scan_disable(req, false);
3132 cancel_interleave_scan(hdev);
3135 /* All active scans will be done with either a resolvable private
3136 * address (when privacy feature has been enabled) or non-resolvable
3139 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3142 own_addr_type = ADDR_LE_DEV_PUBLIC;
3144 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3145 hdev->le_scan_window_discovery, own_addr_type,
3146 filter_policy, addr_resolv);
3150 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3154 bt_dev_dbg(req->hdev, "");
3156 err = active_scan(req, opt);
3160 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3163 static void start_discovery(struct hci_dev *hdev, u8 *status)
3165 unsigned long timeout;
3167 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3169 switch (hdev->discovery.type) {
3170 case DISCOV_TYPE_BREDR:
3171 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3172 hci_req_sync(hdev, bredr_inquiry,
3173 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3176 case DISCOV_TYPE_INTERLEAVED:
3177 /* When running simultaneous discovery, the LE scanning time
3178 * should occupy the whole discovery time sine BR/EDR inquiry
3179 * and LE scanning are scheduled by the controller.
3181 * For interleaving discovery in comparison, BR/EDR inquiry
3182 * and LE scanning are done sequentially with separate
3185 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3187 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3188 /* During simultaneous discovery, we double LE scan
3189 * interval. We must leave some time for the controller
3190 * to do BR/EDR inquiry.
3192 hci_req_sync(hdev, interleaved_discov,
3193 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3198 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3199 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3200 HCI_CMD_TIMEOUT, status);
3202 case DISCOV_TYPE_LE:
3203 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3204 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3205 HCI_CMD_TIMEOUT, status);
3208 *status = HCI_ERROR_UNSPECIFIED;
3215 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3217 /* When service discovery is used and the controller has a
3218 * strict duplicate filter, it is important to remember the
3219 * start and duration of the scan. This is required for
3220 * restarting scanning during the discovery phase.
3222 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3223 hdev->discovery.result_filtering) {
3224 hdev->discovery.scan_start = jiffies;
3225 hdev->discovery.scan_duration = timeout;
3228 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3232 bool hci_req_stop_discovery(struct hci_request *req)
3234 struct hci_dev *hdev = req->hdev;
3235 struct discovery_state *d = &hdev->discovery;
3236 struct hci_cp_remote_name_req_cancel cp;
3237 struct inquiry_entry *e;
3240 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3242 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3243 if (test_bit(HCI_INQUIRY, &hdev->flags))
3244 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3246 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3247 cancel_delayed_work(&hdev->le_scan_disable);
3248 hci_req_add_le_scan_disable(req, false);
3253 /* Passive scanning */
3254 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3255 hci_req_add_le_scan_disable(req, false);
3260 /* No further actions needed for LE-only discovery */
3261 if (d->type == DISCOV_TYPE_LE)
3264 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3265 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3270 bacpy(&cp.bdaddr, &e->data.bdaddr);
3271 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3279 static int stop_discovery(struct hci_request *req, unsigned long opt)
3281 hci_dev_lock(req->hdev);
3282 hci_req_stop_discovery(req);
3283 hci_dev_unlock(req->hdev);
3288 static void discov_update(struct work_struct *work)
3290 struct hci_dev *hdev = container_of(work, struct hci_dev,
3294 switch (hdev->discovery.state) {
3295 case DISCOVERY_STARTING:
3296 start_discovery(hdev, &status);
3297 mgmt_start_discovery_complete(hdev, status);
3299 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3301 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3303 case DISCOVERY_STOPPING:
3304 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3305 mgmt_stop_discovery_complete(hdev, status);
3307 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3309 case DISCOVERY_STOPPED:
3315 static void discov_off(struct work_struct *work)
3317 struct hci_dev *hdev = container_of(work, struct hci_dev,
3320 bt_dev_dbg(hdev, "");
3324 /* When discoverable timeout triggers, then just make sure
3325 * the limited discoverable flag is cleared. Even in the case
3326 * of a timeout triggered from general discoverable, it is
3327 * safe to unconditionally clear the flag.
3329 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3330 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3331 hdev->discov_timeout = 0;
3333 hci_dev_unlock(hdev);
3335 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3336 mgmt_new_settings(hdev);
3339 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3341 struct hci_dev *hdev = req->hdev;
3346 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3347 !lmp_host_ssp_capable(hdev)) {
3350 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3352 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3355 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3356 sizeof(support), &support);
3360 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3361 lmp_bredr_capable(hdev)) {
3362 struct hci_cp_write_le_host_supported cp;
3367 /* Check first if we already have the right
3368 * host state (host features set)
3370 if (cp.le != lmp_host_le_capable(hdev) ||
3371 cp.simul != lmp_host_le_br_capable(hdev))
3372 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3376 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3377 /* Make sure the controller has a good default for
3378 * advertising data. This also applies to the case
3379 * where BR/EDR was toggled during the AUTO_OFF phase.
3381 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3382 list_empty(&hdev->adv_instances)) {
3385 if (ext_adv_capable(hdev)) {
3386 err = __hci_req_setup_ext_adv_instance(req,
3389 __hci_req_update_scan_rsp_data(req,
3393 __hci_req_update_adv_data(req, 0x00);
3394 __hci_req_update_scan_rsp_data(req, 0x00);
3397 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3398 if (!ext_adv_capable(hdev))
3399 __hci_req_enable_advertising(req);
3401 __hci_req_enable_ext_advertising(req,
3404 } else if (!list_empty(&hdev->adv_instances)) {
3405 struct adv_info *adv_instance;
3407 adv_instance = list_first_entry(&hdev->adv_instances,
3408 struct adv_info, list);
3409 __hci_req_schedule_adv_instance(req,
3410 adv_instance->instance,
3415 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3416 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3417 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3418 sizeof(link_sec), &link_sec);
3420 if (lmp_bredr_capable(hdev)) {
3421 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3422 __hci_req_write_fast_connectable(req, true);
3424 __hci_req_write_fast_connectable(req, false);
3425 __hci_req_update_scan(req);
3426 __hci_req_update_class(req);
3427 __hci_req_update_name(req);
3428 __hci_req_update_eir(req);
3431 hci_dev_unlock(hdev);
3435 int __hci_req_hci_power_on(struct hci_dev *hdev)
3437 /* Register the available SMP channels (BR/EDR and LE) only when
3438 * successfully powering on the controller. This late
3439 * registration is required so that LE SMP can clearly decide if
3440 * the public address or static address is used.
3444 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3448 void hci_request_setup(struct hci_dev *hdev)
3450 INIT_WORK(&hdev->discov_update, discov_update);
3451 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3452 INIT_WORK(&hdev->scan_update, scan_update_work);
3453 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3454 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3455 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3456 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3457 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3458 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3459 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3462 void hci_request_cancel_all(struct hci_dev *hdev)
3464 hci_req_sync_cancel(hdev, ENODEV);
3466 cancel_work_sync(&hdev->discov_update);
3467 cancel_work_sync(&hdev->bg_scan_update);
3468 cancel_work_sync(&hdev->scan_update);
3469 cancel_work_sync(&hdev->connectable_update);
3470 cancel_work_sync(&hdev->discoverable_update);
3471 cancel_delayed_work_sync(&hdev->discov_off);
3472 cancel_delayed_work_sync(&hdev->le_scan_disable);
3473 cancel_delayed_work_sync(&hdev->le_scan_restart);
3475 if (hdev->adv_instance_timeout) {
3476 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3477 hdev->adv_instance_timeout = 0;
3480 cancel_interleave_scan(hdev);