2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 #define LE_SUSPEND_SCAN_WINDOW 0x0012
38 #define LE_SUSPEND_SCAN_INTERVAL 0x0400
40 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
42 skb_queue_head_init(&req->cmd_q);
47 void hci_req_purge(struct hci_request *req)
49 skb_queue_purge(&req->cmd_q);
52 bool hci_req_status_pend(struct hci_dev *hdev)
54 return hdev->req_status == HCI_REQ_PEND;
57 static int req_run(struct hci_request *req, hci_req_complete_t complete,
58 hci_req_complete_skb_t complete_skb)
60 struct hci_dev *hdev = req->hdev;
64 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
66 /* If an error occurred during request building, remove all HCI
67 * commands queued on the HCI request queue.
70 skb_queue_purge(&req->cmd_q);
74 /* Do not allow empty requests */
75 if (skb_queue_empty(&req->cmd_q))
78 skb = skb_peek_tail(&req->cmd_q);
80 bt_cb(skb)->hci.req_complete = complete;
81 } else if (complete_skb) {
82 bt_cb(skb)->hci.req_complete_skb = complete_skb;
83 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
86 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
87 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
88 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
90 queue_work(hdev->workqueue, &hdev->cmd_work);
95 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
97 return req_run(req, complete, NULL);
100 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
102 return req_run(req, NULL, complete);
105 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
108 BT_DBG("%s result 0x%2.2x", hdev->name, result);
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
114 hdev->req_skb = skb_get(skb);
115 wake_up_interruptible(&hdev->req_wait_q);
119 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
121 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123 if (hdev->req_status == HCI_REQ_PEND) {
124 hdev->req_result = err;
125 hdev->req_status = HCI_REQ_CANCELED;
126 wake_up_interruptible(&hdev->req_wait_q);
130 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
131 const void *param, u8 event, u32 timeout)
133 struct hci_request req;
137 BT_DBG("%s", hdev->name);
139 hci_req_init(&req, hdev);
141 hci_req_add_ev(&req, opcode, plen, param, event);
143 hdev->req_status = HCI_REQ_PEND;
145 err = hci_req_run_skb(&req, hci_req_sync_complete);
149 err = wait_event_interruptible_timeout(hdev->req_wait_q,
150 hdev->req_status != HCI_REQ_PEND, timeout);
152 if (err == -ERESTARTSYS)
153 return ERR_PTR(-EINTR);
155 switch (hdev->req_status) {
157 err = -bt_to_errno(hdev->req_result);
160 case HCI_REQ_CANCELED:
161 err = -hdev->req_result;
169 hdev->req_status = hdev->req_result = 0;
171 hdev->req_skb = NULL;
173 BT_DBG("%s end: err %d", hdev->name, err);
181 return ERR_PTR(-ENODATA);
185 EXPORT_SYMBOL(__hci_cmd_sync_ev);
187 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
188 const void *param, u32 timeout)
190 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
192 EXPORT_SYMBOL(__hci_cmd_sync);
194 /* Execute request and wait for completion. */
195 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
197 unsigned long opt, u32 timeout, u8 *hci_status)
199 struct hci_request req;
202 BT_DBG("%s start", hdev->name);
204 hci_req_init(&req, hdev);
206 hdev->req_status = HCI_REQ_PEND;
208 err = func(&req, opt);
211 *hci_status = HCI_ERROR_UNSPECIFIED;
215 err = hci_req_run_skb(&req, hci_req_sync_complete);
217 hdev->req_status = 0;
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
224 if (err == -ENODATA) {
231 *hci_status = HCI_ERROR_UNSPECIFIED;
236 err = wait_event_interruptible_timeout(hdev->req_wait_q,
237 hdev->req_status != HCI_REQ_PEND, timeout);
239 if (err == -ERESTARTSYS)
242 switch (hdev->req_status) {
244 err = -bt_to_errno(hdev->req_result);
246 *hci_status = hdev->req_result;
249 case HCI_REQ_CANCELED:
250 err = -hdev->req_result;
252 *hci_status = HCI_ERROR_UNSPECIFIED;
258 *hci_status = HCI_ERROR_UNSPECIFIED;
262 kfree_skb(hdev->req_skb);
263 hdev->req_skb = NULL;
264 hdev->req_status = hdev->req_result = 0;
266 BT_DBG("%s end: err %d", hdev->name, err);
271 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt, u32 timeout, u8 *hci_status)
277 if (!test_bit(HCI_UP, &hdev->flags))
280 /* Serialize all requests */
281 hci_req_sync_lock(hdev);
282 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283 hci_req_sync_unlock(hdev);
288 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
291 int len = HCI_COMMAND_HDR_SIZE + plen;
292 struct hci_command_hdr *hdr;
295 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
300 hdr->opcode = cpu_to_le16(opcode);
304 skb_put_data(skb, param, plen);
306 BT_DBG("skb len %d", skb->len);
308 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
309 hci_skb_opcode(skb) = opcode;
314 /* Queue a command to an asynchronous HCI request */
315 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
316 const void *param, u8 event)
318 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323 /* If an error occurred during request building, there is no point in
324 * queueing the HCI command. We can simply return.
329 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
337 if (skb_queue_empty(&req->cmd_q))
338 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
340 bt_cb(skb)->hci.req_event = event;
342 skb_queue_tail(&req->cmd_q, skb);
345 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
348 hci_req_add_ev(req, opcode, plen, param, 0);
351 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353 struct hci_dev *hdev = req->hdev;
354 struct hci_cp_write_page_scan_activity acp;
357 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
360 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 type = PAGE_SCAN_TYPE_INTERLACED;
366 /* 160 msec page scan interval */
367 acp.interval = cpu_to_le16(0x0100);
369 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371 /* default 1.28 sec page scan */
372 acp.interval = cpu_to_le16(0x0800);
375 acp.window = cpu_to_le16(0x0012);
377 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378 __cpu_to_le16(hdev->page_scan_window) != acp.window)
379 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
382 if (hdev->page_scan_type != type)
383 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
386 /* This function controls the background scanning based on hdev->pend_le_conns
387 * list. If there are pending LE connection we start the background scanning,
388 * otherwise we stop it.
390 * This function requires the caller holds hdev->lock.
392 static void __hci_update_background_scan(struct hci_request *req)
394 struct hci_dev *hdev = req->hdev;
396 if (!test_bit(HCI_UP, &hdev->flags) ||
397 test_bit(HCI_INIT, &hdev->flags) ||
398 hci_dev_test_flag(hdev, HCI_SETUP) ||
399 hci_dev_test_flag(hdev, HCI_CONFIG) ||
400 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
401 hci_dev_test_flag(hdev, HCI_UNREGISTER))
404 /* No point in doing scanning if LE support hasn't been enabled */
405 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
408 /* If discovery is active don't interfere with it */
409 if (hdev->discovery.state != DISCOVERY_STOPPED)
412 /* Reset RSSI and UUID filters when starting background scanning
413 * since these filters are meant for service discovery only.
415 * The Start Discovery and Start Service Discovery operations
416 * ensure to set proper values for RSSI threshold and UUID
417 * filter list. So it is safe to just reset them here.
419 hci_discovery_filter_clear(hdev);
421 if (list_empty(&hdev->pend_le_conns) &&
422 list_empty(&hdev->pend_le_reports)) {
423 /* If there is no pending LE connections or devices
424 * to be scanned for, we should stop the background
428 /* If controller is not scanning we are done. */
429 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
432 hci_req_add_le_scan_disable(req);
434 BT_DBG("%s stopping background scanning", hdev->name);
436 /* If there is at least one pending LE connection, we should
437 * keep the background scan running.
440 /* If controller is connecting, we should not start scanning
441 * since some controllers are not able to scan and connect at
444 if (hci_lookup_le_connect(hdev))
447 /* If controller is currently scanning, we stop it to ensure we
448 * don't miss any advertising (due to duplicates filter).
450 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
451 hci_req_add_le_scan_disable(req);
453 hci_req_add_le_passive_scan(req);
455 BT_DBG("%s starting background scanning", hdev->name);
459 void __hci_req_update_name(struct hci_request *req)
461 struct hci_dev *hdev = req->hdev;
462 struct hci_cp_write_local_name cp;
464 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
469 #define PNP_INFO_SVCLASS_ID 0x1200
471 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473 u8 *ptr = data, *uuids_start = NULL;
474 struct bt_uuid *uuid;
479 list_for_each_entry(uuid, &hdev->uuids, list) {
482 if (uuid->size != 16)
485 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 if (uuid16 == PNP_INFO_SVCLASS_ID)
495 uuids_start[1] = EIR_UUID16_ALL;
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u16) > len) {
501 uuids_start[1] = EIR_UUID16_SOME;
505 *ptr++ = (uuid16 & 0x00ff);
506 *ptr++ = (uuid16 & 0xff00) >> 8;
507 uuids_start[0] += sizeof(uuid16);
513 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 32)
528 uuids_start[1] = EIR_UUID32_ALL;
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + sizeof(u32) > len) {
534 uuids_start[1] = EIR_UUID32_SOME;
538 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 uuids_start[0] += sizeof(u32);
546 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548 u8 *ptr = data, *uuids_start = NULL;
549 struct bt_uuid *uuid;
554 list_for_each_entry(uuid, &hdev->uuids, list) {
555 if (uuid->size != 128)
561 uuids_start[1] = EIR_UUID128_ALL;
565 /* Stop if not enough space to put next UUID */
566 if ((ptr - data) + 16 > len) {
567 uuids_start[1] = EIR_UUID128_SOME;
571 memcpy(ptr, uuid->uuid, 16);
573 uuids_start[0] += 16;
579 static void create_eir(struct hci_dev *hdev, u8 *data)
584 name_len = strlen(hdev->dev_name);
590 ptr[1] = EIR_NAME_SHORT;
592 ptr[1] = EIR_NAME_COMPLETE;
594 /* EIR Data length */
595 ptr[0] = name_len + 1;
597 memcpy(ptr + 2, hdev->dev_name, name_len);
599 ptr += (name_len + 2);
602 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[1] = EIR_TX_POWER;
605 ptr[2] = (u8) hdev->inq_tx_power;
610 if (hdev->devid_source > 0) {
612 ptr[1] = EIR_DEVICE_ID;
614 put_unaligned_le16(hdev->devid_source, ptr + 2);
615 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
616 put_unaligned_le16(hdev->devid_product, ptr + 6);
617 put_unaligned_le16(hdev->devid_version, ptr + 8);
622 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 void __hci_req_update_eir(struct hci_request *req)
629 struct hci_dev *hdev = req->hdev;
630 struct hci_cp_write_eir cp;
632 if (!hdev_is_powered(hdev))
635 if (!lmp_ext_inq_capable(hdev))
638 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
641 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
644 memset(&cp, 0, sizeof(cp));
646 create_eir(hdev, cp.data);
648 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
651 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
656 void hci_req_add_le_scan_disable(struct hci_request *req)
658 struct hci_dev *hdev = req->hdev;
660 if (hdev->scanning_paused) {
661 bt_dev_dbg(hdev, "Scanning is paused for suspend");
665 if (use_ext_scan(hdev)) {
666 struct hci_cp_le_set_ext_scan_enable cp;
668 memset(&cp, 0, sizeof(cp));
669 cp.enable = LE_SCAN_DISABLE;
670 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
673 struct hci_cp_le_set_scan_enable cp;
675 memset(&cp, 0, sizeof(cp));
676 cp.enable = LE_SCAN_DISABLE;
677 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
681 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
684 struct hci_cp_le_del_from_white_list cp;
686 cp.bdaddr_type = bdaddr_type;
687 bacpy(&cp.bdaddr, bdaddr);
689 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
691 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
694 /* Adds connection to white list if needed. On error, returns -1. */
695 static int add_to_white_list(struct hci_request *req,
696 struct hci_conn_params *params, u8 *num_entries,
699 struct hci_cp_le_add_to_white_list cp;
700 struct hci_dev *hdev = req->hdev;
702 /* Already in white list */
703 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
707 /* Select filter policy to accept all advertising */
708 if (*num_entries >= hdev->le_white_list_size)
711 /* White list can not be used with RPAs */
713 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
717 /* During suspend, only wakeable devices can be in whitelist */
718 if (hdev->suspended && !params->wakeable)
722 cp.bdaddr_type = params->addr_type;
723 bacpy(&cp.bdaddr, ¶ms->addr);
725 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
727 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
732 static u8 update_white_list(struct hci_request *req)
734 struct hci_dev *hdev = req->hdev;
735 struct hci_conn_params *params;
736 struct bdaddr_list *b;
738 bool pend_conn, pend_report;
739 /* We allow whitelisting even with RPAs in suspend. In the worst case,
740 * we won't be able to wake from devices that use the privacy1.2
741 * features. Additionally, once we support privacy1.2 and IRK
742 * offloading, we can update this to also check for those conditions.
744 bool allow_rpa = hdev->suspended;
746 /* Go through the current white list programmed into the
747 * controller one by one and check if that address is still
748 * in the list of pending connections or list of devices to
749 * report. If not present in either list, then queue the
750 * command to remove it from the controller.
752 list_for_each_entry(b, &hdev->le_white_list, list) {
753 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
756 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
760 /* If the device is not likely to connect or report,
761 * remove it from the whitelist.
763 if (!pend_conn && !pend_report) {
764 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
768 /* White list can not be used with RPAs */
770 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
777 /* Since all no longer valid white list entries have been
778 * removed, walk through the list of pending connections
779 * and ensure that any new device gets programmed into
782 * If the list of the devices is larger than the list of
783 * available white list entries in the controller, then
784 * just abort and return filer policy value to not use the
787 list_for_each_entry(params, &hdev->pend_le_conns, action) {
788 if (add_to_white_list(req, params, &num_entries, allow_rpa))
792 /* After adding all new pending connections, walk through
793 * the list of pending reports and also add these to the
794 * white list if there is still space. Abort if space runs out.
796 list_for_each_entry(params, &hdev->pend_le_reports, action) {
797 if (add_to_white_list(req, params, &num_entries, allow_rpa))
801 /* Select filter policy to use white list */
805 static bool scan_use_rpa(struct hci_dev *hdev)
807 return hci_dev_test_flag(hdev, HCI_PRIVACY);
810 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
811 u16 window, u8 own_addr_type, u8 filter_policy)
813 struct hci_dev *hdev = req->hdev;
815 /* Use ext scanning if set ext scan param and ext scan enable is
818 if (use_ext_scan(hdev)) {
819 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
820 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
821 struct hci_cp_le_scan_phy_params *phy_params;
822 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
825 ext_param_cp = (void *)data;
826 phy_params = (void *)ext_param_cp->data;
828 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
829 ext_param_cp->own_addr_type = own_addr_type;
830 ext_param_cp->filter_policy = filter_policy;
832 plen = sizeof(*ext_param_cp);
834 if (scan_1m(hdev) || scan_2m(hdev)) {
835 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
837 memset(phy_params, 0, sizeof(*phy_params));
838 phy_params->type = type;
839 phy_params->interval = cpu_to_le16(interval);
840 phy_params->window = cpu_to_le16(window);
842 plen += sizeof(*phy_params);
846 if (scan_coded(hdev)) {
847 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
849 memset(phy_params, 0, sizeof(*phy_params));
850 phy_params->type = type;
851 phy_params->interval = cpu_to_le16(interval);
852 phy_params->window = cpu_to_le16(window);
854 plen += sizeof(*phy_params);
858 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
861 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
862 ext_enable_cp.enable = LE_SCAN_ENABLE;
863 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
865 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
866 sizeof(ext_enable_cp), &ext_enable_cp);
868 struct hci_cp_le_set_scan_param param_cp;
869 struct hci_cp_le_set_scan_enable enable_cp;
871 memset(¶m_cp, 0, sizeof(param_cp));
872 param_cp.type = type;
873 param_cp.interval = cpu_to_le16(interval);
874 param_cp.window = cpu_to_le16(window);
875 param_cp.own_address_type = own_addr_type;
876 param_cp.filter_policy = filter_policy;
877 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
880 memset(&enable_cp, 0, sizeof(enable_cp));
881 enable_cp.enable = LE_SCAN_ENABLE;
882 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
883 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
888 void hci_req_add_le_passive_scan(struct hci_request *req)
890 struct hci_dev *hdev = req->hdev;
893 u16 window, interval;
895 if (hdev->scanning_paused) {
896 bt_dev_dbg(hdev, "Scanning is paused for suspend");
900 /* Set require_privacy to false since no SCAN_REQ are send
901 * during passive scanning. Not using an non-resolvable address
902 * here is important so that peer devices using direct
903 * advertising with our address will be correctly reported
906 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
910 /* Adding or removing entries from the white list must
911 * happen before enabling scanning. The controller does
912 * not allow white list modification while scanning.
914 filter_policy = update_white_list(req);
916 /* When the controller is using random resolvable addresses and
917 * with that having LE privacy enabled, then controllers with
918 * Extended Scanner Filter Policies support can now enable support
919 * for handling directed advertising.
921 * So instead of using filter polices 0x00 (no whitelist)
922 * and 0x01 (whitelist enabled) use the new filter policies
923 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
925 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
926 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
927 filter_policy |= 0x02;
929 if (hdev->suspended) {
930 window = LE_SUSPEND_SCAN_WINDOW;
931 interval = LE_SUSPEND_SCAN_INTERVAL;
933 window = hdev->le_scan_window;
934 interval = hdev->le_scan_interval;
937 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
938 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
939 own_addr_type, filter_policy);
942 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
944 struct adv_info *adv_instance;
946 /* Instance 0x00 always set local name */
947 if (instance == 0x00)
950 adv_instance = hci_find_adv_instance(hdev, instance);
954 /* TODO: Take into account the "appearance" and "local-name" flags here.
955 * These are currently being ignored as they are not supported.
957 return adv_instance->scan_rsp_len;
960 static void hci_req_clear_event_filter(struct hci_request *req)
962 struct hci_cp_set_event_filter f;
964 memset(&f, 0, sizeof(f));
965 f.flt_type = HCI_FLT_CLEAR_ALL;
966 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
968 /* Update page scan state (since we may have modified it when setting
971 __hci_req_update_scan(req);
974 static void hci_req_set_event_filter(struct hci_request *req)
976 struct bdaddr_list *b;
977 struct hci_cp_set_event_filter f;
978 struct hci_dev *hdev = req->hdev;
981 /* Always clear event filter when starting */
982 hci_req_clear_event_filter(req);
984 list_for_each_entry(b, &hdev->wakeable, list) {
985 memset(&f, 0, sizeof(f));
986 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
987 f.flt_type = HCI_FLT_CONN_SETUP;
988 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
989 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
991 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
992 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
995 scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED;
996 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
999 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1001 /* Can't change params without disabling first */
1002 hci_req_add_le_scan_disable(req);
1004 /* Configure params and enable scanning */
1005 hci_req_add_le_passive_scan(req);
1007 /* Block suspend notifier on response */
1008 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1011 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1013 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1015 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1016 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1017 wake_up(&hdev->suspend_wait_q);
1021 /* Call with hci_dev_lock */
1022 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1025 struct hci_conn *conn;
1026 struct hci_request req;
1028 int disconnect_counter;
1030 if (next == hdev->suspend_state) {
1031 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1035 hdev->suspend_state = next;
1036 hci_req_init(&req, hdev);
1038 if (next == BT_SUSPEND_DISCONNECT) {
1039 /* Mark device as suspended */
1040 hdev->suspended = true;
1042 /* Pause discovery if not already stopped */
1043 old_state = hdev->discovery.state;
1044 if (old_state != DISCOVERY_STOPPED) {
1045 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1046 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1047 queue_work(hdev->req_workqueue, &hdev->discov_update);
1050 hdev->discovery_paused = true;
1051 hdev->discovery_old_state = old_state;
1053 /* Stop advertising */
1054 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1056 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1057 cancel_delayed_work(&hdev->discov_off);
1058 queue_delayed_work(hdev->req_workqueue,
1059 &hdev->discov_off, 0);
1062 hdev->advertising_paused = true;
1063 hdev->advertising_old_state = old_state;
1064 /* Disable page scan */
1065 page_scan = SCAN_DISABLED;
1066 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1068 /* Disable LE passive scan */
1069 hci_req_add_le_scan_disable(&req);
1071 /* Mark task needing completion */
1072 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1074 /* Prevent disconnects from causing scanning to be re-enabled */
1075 hdev->scanning_paused = true;
1077 /* Run commands before disconnecting */
1078 hci_req_run(&req, suspend_req_complete);
1080 disconnect_counter = 0;
1081 /* Soft disconnect everything (power off) */
1082 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1083 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1084 disconnect_counter++;
1087 if (disconnect_counter > 0) {
1089 "Had %d disconnects. Will wait on them",
1090 disconnect_counter);
1091 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1093 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1094 /* Unpause to take care of updating scanning params */
1095 hdev->scanning_paused = false;
1096 /* Enable event filter for paired devices */
1097 hci_req_set_event_filter(&req);
1098 /* Enable passive scan at lower duty cycle */
1099 hci_req_config_le_suspend_scan(&req);
1100 /* Pause scan changes again. */
1101 hdev->scanning_paused = true;
1102 hci_req_run(&req, suspend_req_complete);
1104 hdev->suspended = false;
1105 hdev->scanning_paused = false;
1107 hci_req_clear_event_filter(&req);
1108 /* Reset passive/background scanning to normal */
1109 hci_req_config_le_suspend_scan(&req);
1111 /* Unpause advertising */
1112 hdev->advertising_paused = false;
1113 if (hdev->advertising_old_state) {
1114 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1115 hdev->suspend_tasks);
1116 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1117 queue_work(hdev->req_workqueue,
1118 &hdev->discoverable_update);
1119 hdev->advertising_old_state = 0;
1122 /* Unpause discovery */
1123 hdev->discovery_paused = false;
1124 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1125 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1126 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1127 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1128 queue_work(hdev->req_workqueue, &hdev->discov_update);
1131 hci_req_run(&req, suspend_req_complete);
1134 hdev->suspend_state = next;
1137 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1138 wake_up(&hdev->suspend_wait_q);
1141 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1143 u8 instance = hdev->cur_adv_instance;
1144 struct adv_info *adv_instance;
1146 /* Instance 0x00 always set local name */
1147 if (instance == 0x00)
1150 adv_instance = hci_find_adv_instance(hdev, instance);
1154 /* TODO: Take into account the "appearance" and "local-name" flags here.
1155 * These are currently being ignored as they are not supported.
1157 return adv_instance->scan_rsp_len;
1160 void __hci_req_disable_advertising(struct hci_request *req)
1162 if (ext_adv_capable(req->hdev)) {
1163 struct hci_cp_le_set_ext_adv_enable cp;
1166 /* Disable all sets since we only support one set at the moment */
1167 cp.num_of_sets = 0x00;
1169 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
1173 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1177 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1180 struct adv_info *adv_instance;
1182 if (instance == 0x00) {
1183 /* Instance 0 always manages the "Tx Power" and "Flags"
1186 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1188 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1189 * corresponds to the "connectable" instance flag.
1191 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1192 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1194 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1195 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1196 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1197 flags |= MGMT_ADV_FLAG_DISCOV;
1202 adv_instance = hci_find_adv_instance(hdev, instance);
1204 /* Return 0 when we got an invalid instance identifier. */
1208 return adv_instance->flags;
1211 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1213 /* If privacy is not enabled don't use RPA */
1214 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1217 /* If basic privacy mode is enabled use RPA */
1218 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1221 /* If limited privacy mode is enabled don't use RPA if we're
1222 * both discoverable and bondable.
1224 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1225 hci_dev_test_flag(hdev, HCI_BONDABLE))
1228 /* We're neither bondable nor discoverable in the limited
1229 * privacy mode, therefore use RPA.
1234 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1236 /* If there is no connection we are OK to advertise. */
1237 if (hci_conn_num(hdev, LE_LINK) == 0)
1240 /* Check le_states if there is any connection in slave role. */
1241 if (hdev->conn_hash.le_num_slave > 0) {
1242 /* Slave connection state and non connectable mode bit 20. */
1243 if (!connectable && !(hdev->le_states[2] & 0x10))
1246 /* Slave connection state and connectable mode bit 38
1247 * and scannable bit 21.
1249 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1250 !(hdev->le_states[2] & 0x20)))
1254 /* Check le_states if there is any connection in master role. */
1255 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1256 /* Master connection state and non connectable mode bit 18. */
1257 if (!connectable && !(hdev->le_states[2] & 0x02))
1260 /* Master connection state and connectable mode bit 35 and
1263 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1264 !(hdev->le_states[2] & 0x08)))
1271 void __hci_req_enable_advertising(struct hci_request *req)
1273 struct hci_dev *hdev = req->hdev;
1274 struct hci_cp_le_set_adv_param cp;
1275 u8 own_addr_type, enable = 0x01;
1277 u16 adv_min_interval, adv_max_interval;
1280 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1282 /* If the "connectable" instance flag was not set, then choose between
1283 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1285 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1286 mgmt_get_connectable(hdev);
1288 if (!is_advertising_allowed(hdev, connectable))
1291 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1292 __hci_req_disable_advertising(req);
1294 /* Clear the HCI_LE_ADV bit temporarily so that the
1295 * hci_update_random_address knows that it's safe to go ahead
1296 * and write a new random address. The flag will be set back on
1297 * as soon as the SET_ADV_ENABLE HCI command completes.
1299 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1301 /* Set require_privacy to true only when non-connectable
1302 * advertising is used. In that case it is fine to use a
1303 * non-resolvable private address.
1305 if (hci_update_random_address(req, !connectable,
1306 adv_use_rpa(hdev, flags),
1307 &own_addr_type) < 0)
1310 memset(&cp, 0, sizeof(cp));
1313 cp.type = LE_ADV_IND;
1315 adv_min_interval = hdev->le_adv_min_interval;
1316 adv_max_interval = hdev->le_adv_max_interval;
1318 if (get_cur_adv_instance_scan_rsp_len(hdev))
1319 cp.type = LE_ADV_SCAN_IND;
1321 cp.type = LE_ADV_NONCONN_IND;
1323 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1324 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1325 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1326 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1328 adv_min_interval = hdev->le_adv_min_interval;
1329 adv_max_interval = hdev->le_adv_max_interval;
1333 cp.min_interval = cpu_to_le16(adv_min_interval);
1334 cp.max_interval = cpu_to_le16(adv_max_interval);
1335 cp.own_address_type = own_addr_type;
1336 cp.channel_map = hdev->le_adv_channel_map;
1338 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1340 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1343 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1346 size_t complete_len;
1348 /* no space left for name (+ NULL + type + len) */
1349 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1352 /* use complete name if present and fits */
1353 complete_len = strlen(hdev->dev_name);
1354 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1355 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1356 hdev->dev_name, complete_len + 1);
1358 /* use short name if present */
1359 short_len = strlen(hdev->short_name);
1361 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1362 hdev->short_name, short_len + 1);
1364 /* use shortened full name if present, we already know that name
1365 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1368 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1370 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1371 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1373 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1380 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1382 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1385 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1387 u8 scan_rsp_len = 0;
1389 if (hdev->appearance) {
1390 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1393 return append_local_name(hdev, ptr, scan_rsp_len);
1396 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1399 struct adv_info *adv_instance;
1401 u8 scan_rsp_len = 0;
1403 adv_instance = hci_find_adv_instance(hdev, instance);
1407 instance_flags = adv_instance->flags;
1409 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1410 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1413 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1414 adv_instance->scan_rsp_len);
1416 scan_rsp_len += adv_instance->scan_rsp_len;
1418 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1419 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1421 return scan_rsp_len;
1424 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1426 struct hci_dev *hdev = req->hdev;
1429 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1432 if (ext_adv_capable(hdev)) {
1433 struct hci_cp_le_set_ext_scan_rsp_data cp;
1435 memset(&cp, 0, sizeof(cp));
1438 len = create_instance_scan_rsp_data(hdev, instance,
1441 len = create_default_scan_rsp_data(hdev, cp.data);
1443 if (hdev->scan_rsp_data_len == len &&
1444 !memcmp(cp.data, hdev->scan_rsp_data, len))
1447 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1448 hdev->scan_rsp_data_len = len;
1450 cp.handle = instance;
1452 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1453 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1455 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1458 struct hci_cp_le_set_scan_rsp_data cp;
1460 memset(&cp, 0, sizeof(cp));
1463 len = create_instance_scan_rsp_data(hdev, instance,
1466 len = create_default_scan_rsp_data(hdev, cp.data);
1468 if (hdev->scan_rsp_data_len == len &&
1469 !memcmp(cp.data, hdev->scan_rsp_data, len))
1472 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1473 hdev->scan_rsp_data_len = len;
1477 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1481 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1483 struct adv_info *adv_instance = NULL;
1484 u8 ad_len = 0, flags = 0;
1487 /* Return 0 when the current instance identifier is invalid. */
1489 adv_instance = hci_find_adv_instance(hdev, instance);
1494 instance_flags = get_adv_instance_flags(hdev, instance);
1496 /* If instance already has the flags set skip adding it once
1499 if (adv_instance && eir_get_data(adv_instance->adv_data,
1500 adv_instance->adv_data_len, EIR_FLAGS,
1504 /* The Add Advertising command allows userspace to set both the general
1505 * and limited discoverable flags.
1507 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1508 flags |= LE_AD_GENERAL;
1510 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1511 flags |= LE_AD_LIMITED;
1513 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1514 flags |= LE_AD_NO_BREDR;
1516 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1517 /* If a discovery flag wasn't provided, simply use the global
1521 flags |= mgmt_get_adv_discov_flags(hdev);
1523 /* If flags would still be empty, then there is no need to
1524 * include the "Flags" AD field".
1538 memcpy(ptr, adv_instance->adv_data,
1539 adv_instance->adv_data_len);
1540 ad_len += adv_instance->adv_data_len;
1541 ptr += adv_instance->adv_data_len;
1544 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1547 if (ext_adv_capable(hdev)) {
1549 adv_tx_power = adv_instance->tx_power;
1551 adv_tx_power = hdev->adv_tx_power;
1553 adv_tx_power = hdev->adv_tx_power;
1556 /* Provide Tx Power only if we can provide a valid value for it */
1557 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1559 ptr[1] = EIR_TX_POWER;
1560 ptr[2] = (u8)adv_tx_power;
1570 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1572 struct hci_dev *hdev = req->hdev;
1575 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1578 if (ext_adv_capable(hdev)) {
1579 struct hci_cp_le_set_ext_adv_data cp;
1581 memset(&cp, 0, sizeof(cp));
1583 len = create_instance_adv_data(hdev, instance, cp.data);
1585 /* There's nothing to do if the data hasn't changed */
1586 if (hdev->adv_data_len == len &&
1587 memcmp(cp.data, hdev->adv_data, len) == 0)
1590 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1591 hdev->adv_data_len = len;
1594 cp.handle = instance;
1595 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1596 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1598 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1600 struct hci_cp_le_set_adv_data cp;
1602 memset(&cp, 0, sizeof(cp));
1604 len = create_instance_adv_data(hdev, instance, cp.data);
1606 /* There's nothing to do if the data hasn't changed */
1607 if (hdev->adv_data_len == len &&
1608 memcmp(cp.data, hdev->adv_data, len) == 0)
1611 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1612 hdev->adv_data_len = len;
1616 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1620 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1622 struct hci_request req;
1624 hci_req_init(&req, hdev);
1625 __hci_req_update_adv_data(&req, instance);
1627 return hci_req_run(&req, NULL);
1630 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1632 BT_DBG("%s status %u", hdev->name, status);
1635 void hci_req_reenable_advertising(struct hci_dev *hdev)
1637 struct hci_request req;
1639 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1640 list_empty(&hdev->adv_instances))
1643 hci_req_init(&req, hdev);
1645 if (hdev->cur_adv_instance) {
1646 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1649 if (ext_adv_capable(hdev)) {
1650 __hci_req_start_ext_adv(&req, 0x00);
1652 __hci_req_update_adv_data(&req, 0x00);
1653 __hci_req_update_scan_rsp_data(&req, 0x00);
1654 __hci_req_enable_advertising(&req);
1658 hci_req_run(&req, adv_enable_complete);
1661 static void adv_timeout_expire(struct work_struct *work)
1663 struct hci_dev *hdev = container_of(work, struct hci_dev,
1664 adv_instance_expire.work);
1666 struct hci_request req;
1669 BT_DBG("%s", hdev->name);
1673 hdev->adv_instance_timeout = 0;
1675 instance = hdev->cur_adv_instance;
1676 if (instance == 0x00)
1679 hci_req_init(&req, hdev);
1681 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1683 if (list_empty(&hdev->adv_instances))
1684 __hci_req_disable_advertising(&req);
1686 hci_req_run(&req, NULL);
1689 hci_dev_unlock(hdev);
1692 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1693 bool use_rpa, struct adv_info *adv_instance,
1694 u8 *own_addr_type, bdaddr_t *rand_addr)
1698 bacpy(rand_addr, BDADDR_ANY);
1700 /* If privacy is enabled use a resolvable private address. If
1701 * current RPA has expired then generate a new one.
1706 *own_addr_type = ADDR_LE_DEV_RANDOM;
1709 if (!adv_instance->rpa_expired &&
1710 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1713 adv_instance->rpa_expired = false;
1715 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1716 !bacmp(&hdev->random_addr, &hdev->rpa))
1720 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1722 bt_dev_err(hdev, "failed to generate new RPA");
1726 bacpy(rand_addr, &hdev->rpa);
1728 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1730 queue_delayed_work(hdev->workqueue,
1731 &adv_instance->rpa_expired_cb, to);
1733 queue_delayed_work(hdev->workqueue,
1734 &hdev->rpa_expired, to);
1739 /* In case of required privacy without resolvable private address,
1740 * use an non-resolvable private address. This is useful for
1741 * non-connectable advertising.
1743 if (require_privacy) {
1747 /* The non-resolvable private address is generated
1748 * from random six bytes with the two most significant
1751 get_random_bytes(&nrpa, 6);
1754 /* The non-resolvable private address shall not be
1755 * equal to the public address.
1757 if (bacmp(&hdev->bdaddr, &nrpa))
1761 *own_addr_type = ADDR_LE_DEV_RANDOM;
1762 bacpy(rand_addr, &nrpa);
1767 /* No privacy so use a public address. */
1768 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1773 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1775 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1778 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1780 struct hci_cp_le_set_ext_adv_params cp;
1781 struct hci_dev *hdev = req->hdev;
1784 bdaddr_t random_addr;
1787 struct adv_info *adv_instance;
1789 /* In ext adv set param interval is 3 octets */
1790 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1793 adv_instance = hci_find_adv_instance(hdev, instance);
1797 adv_instance = NULL;
1800 flags = get_adv_instance_flags(hdev, instance);
1802 /* If the "connectable" instance flag was not set, then choose between
1803 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1805 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1806 mgmt_get_connectable(hdev);
1808 if (!is_advertising_allowed(hdev, connectable))
1811 /* Set require_privacy to true only when non-connectable
1812 * advertising is used. In that case it is fine to use a
1813 * non-resolvable private address.
1815 err = hci_get_random_address(hdev, !connectable,
1816 adv_use_rpa(hdev, flags), adv_instance,
1817 &own_addr_type, &random_addr);
1821 memset(&cp, 0, sizeof(cp));
1823 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1824 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1826 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1830 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1832 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1833 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1835 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1837 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1840 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1842 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1845 cp.own_addr_type = own_addr_type;
1846 cp.channel_map = hdev->le_adv_channel_map;
1848 cp.handle = instance;
1850 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1851 cp.primary_phy = HCI_ADV_PHY_1M;
1852 cp.secondary_phy = HCI_ADV_PHY_2M;
1853 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1854 cp.primary_phy = HCI_ADV_PHY_CODED;
1855 cp.secondary_phy = HCI_ADV_PHY_CODED;
1857 /* In all other cases use 1M */
1858 cp.primary_phy = HCI_ADV_PHY_1M;
1859 cp.secondary_phy = HCI_ADV_PHY_1M;
1862 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1864 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1865 bacmp(&random_addr, BDADDR_ANY)) {
1866 struct hci_cp_le_set_adv_set_rand_addr cp;
1868 /* Check if random address need to be updated */
1870 if (!bacmp(&random_addr, &adv_instance->random_addr))
1873 if (!bacmp(&random_addr, &hdev->random_addr))
1877 memset(&cp, 0, sizeof(cp));
1879 cp.handle = instance;
1880 bacpy(&cp.bdaddr, &random_addr);
1883 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1890 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1892 struct hci_dev *hdev = req->hdev;
1893 struct hci_cp_le_set_ext_adv_enable *cp;
1894 struct hci_cp_ext_adv_set *adv_set;
1895 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1896 struct adv_info *adv_instance;
1899 adv_instance = hci_find_adv_instance(hdev, instance);
1903 adv_instance = NULL;
1907 adv_set = (void *) cp->data;
1909 memset(cp, 0, sizeof(*cp));
1912 cp->num_of_sets = 0x01;
1914 memset(adv_set, 0, sizeof(*adv_set));
1916 adv_set->handle = instance;
1918 /* Set duration per instance since controller is responsible for
1921 if (adv_instance && adv_instance->duration) {
1922 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1924 /* Time = N * 10 ms */
1925 adv_set->duration = cpu_to_le16(duration / 10);
1928 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1929 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1935 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1937 struct hci_dev *hdev = req->hdev;
1940 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1941 __hci_req_disable_advertising(req);
1943 err = __hci_req_setup_ext_adv_instance(req, instance);
1947 __hci_req_update_scan_rsp_data(req, instance);
1948 __hci_req_enable_ext_advertising(req, instance);
1953 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1956 struct hci_dev *hdev = req->hdev;
1957 struct adv_info *adv_instance = NULL;
1960 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1961 list_empty(&hdev->adv_instances))
1964 if (hdev->adv_instance_timeout)
1967 adv_instance = hci_find_adv_instance(hdev, instance);
1971 /* A zero timeout means unlimited advertising. As long as there is
1972 * only one instance, duration should be ignored. We still set a timeout
1973 * in case further instances are being added later on.
1975 * If the remaining lifetime of the instance is more than the duration
1976 * then the timeout corresponds to the duration, otherwise it will be
1977 * reduced to the remaining instance lifetime.
1979 if (adv_instance->timeout == 0 ||
1980 adv_instance->duration <= adv_instance->remaining_time)
1981 timeout = adv_instance->duration;
1983 timeout = adv_instance->remaining_time;
1985 /* The remaining time is being reduced unless the instance is being
1986 * advertised without time limit.
1988 if (adv_instance->timeout)
1989 adv_instance->remaining_time =
1990 adv_instance->remaining_time - timeout;
1992 /* Only use work for scheduling instances with legacy advertising */
1993 if (!ext_adv_capable(hdev)) {
1994 hdev->adv_instance_timeout = timeout;
1995 queue_delayed_work(hdev->req_workqueue,
1996 &hdev->adv_instance_expire,
1997 msecs_to_jiffies(timeout * 1000));
2000 /* If we're just re-scheduling the same instance again then do not
2001 * execute any HCI commands. This happens when a single instance is
2004 if (!force && hdev->cur_adv_instance == instance &&
2005 hci_dev_test_flag(hdev, HCI_LE_ADV))
2008 hdev->cur_adv_instance = instance;
2009 if (ext_adv_capable(hdev)) {
2010 __hci_req_start_ext_adv(req, instance);
2012 __hci_req_update_adv_data(req, instance);
2013 __hci_req_update_scan_rsp_data(req, instance);
2014 __hci_req_enable_advertising(req);
2020 static void cancel_adv_timeout(struct hci_dev *hdev)
2022 if (hdev->adv_instance_timeout) {
2023 hdev->adv_instance_timeout = 0;
2024 cancel_delayed_work(&hdev->adv_instance_expire);
2028 /* For a single instance:
2029 * - force == true: The instance will be removed even when its remaining
2030 * lifetime is not zero.
2031 * - force == false: the instance will be deactivated but kept stored unless
2032 * the remaining lifetime is zero.
2034 * For instance == 0x00:
2035 * - force == true: All instances will be removed regardless of their timeout
2037 * - force == false: Only instances that have a timeout will be removed.
2039 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2040 struct hci_request *req, u8 instance,
2043 struct adv_info *adv_instance, *n, *next_instance = NULL;
2047 /* Cancel any timeout concerning the removed instance(s). */
2048 if (!instance || hdev->cur_adv_instance == instance)
2049 cancel_adv_timeout(hdev);
2051 /* Get the next instance to advertise BEFORE we remove
2052 * the current one. This can be the same instance again
2053 * if there is only one instance.
2055 if (instance && hdev->cur_adv_instance == instance)
2056 next_instance = hci_get_next_instance(hdev, instance);
2058 if (instance == 0x00) {
2059 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2061 if (!(force || adv_instance->timeout))
2064 rem_inst = adv_instance->instance;
2065 err = hci_remove_adv_instance(hdev, rem_inst);
2067 mgmt_advertising_removed(sk, hdev, rem_inst);
2070 adv_instance = hci_find_adv_instance(hdev, instance);
2072 if (force || (adv_instance && adv_instance->timeout &&
2073 !adv_instance->remaining_time)) {
2074 /* Don't advertise a removed instance. */
2075 if (next_instance &&
2076 next_instance->instance == instance)
2077 next_instance = NULL;
2079 err = hci_remove_adv_instance(hdev, instance);
2081 mgmt_advertising_removed(sk, hdev, instance);
2085 if (!req || !hdev_is_powered(hdev) ||
2086 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2090 __hci_req_schedule_adv_instance(req, next_instance->instance,
2094 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2096 struct hci_dev *hdev = req->hdev;
2098 /* If we're advertising or initiating an LE connection we can't
2099 * go ahead and change the random address at this time. This is
2100 * because the eventual initiator address used for the
2101 * subsequently created connection will be undefined (some
2102 * controllers use the new address and others the one we had
2103 * when the operation started).
2105 * In this kind of scenario skip the update and let the random
2106 * address be updated at the next cycle.
2108 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2109 hci_lookup_le_connect(hdev)) {
2110 BT_DBG("Deferring random address update");
2111 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2115 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2118 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2119 bool use_rpa, u8 *own_addr_type)
2121 struct hci_dev *hdev = req->hdev;
2124 /* If privacy is enabled use a resolvable private address. If
2125 * current RPA has expired or there is something else than
2126 * the current RPA in use, then generate a new one.
2131 *own_addr_type = ADDR_LE_DEV_RANDOM;
2133 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2134 !bacmp(&hdev->random_addr, &hdev->rpa))
2137 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2139 bt_dev_err(hdev, "failed to generate new RPA");
2143 set_random_addr(req, &hdev->rpa);
2145 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2146 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2151 /* In case of required privacy without resolvable private address,
2152 * use an non-resolvable private address. This is useful for active
2153 * scanning and non-connectable advertising.
2155 if (require_privacy) {
2159 /* The non-resolvable private address is generated
2160 * from random six bytes with the two most significant
2163 get_random_bytes(&nrpa, 6);
2166 /* The non-resolvable private address shall not be
2167 * equal to the public address.
2169 if (bacmp(&hdev->bdaddr, &nrpa))
2173 *own_addr_type = ADDR_LE_DEV_RANDOM;
2174 set_random_addr(req, &nrpa);
2178 /* If forcing static address is in use or there is no public
2179 * address use the static address as random address (but skip
2180 * the HCI command if the current random address is already the
2183 * In case BR/EDR has been disabled on a dual-mode controller
2184 * and a static address has been configured, then use that
2185 * address instead of the public BR/EDR address.
2187 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2188 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2189 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2190 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2191 *own_addr_type = ADDR_LE_DEV_RANDOM;
2192 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2193 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2194 &hdev->static_addr);
2198 /* Neither privacy nor static address is being used so use a
2201 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2206 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2208 struct bdaddr_list *b;
2210 list_for_each_entry(b, &hdev->whitelist, list) {
2211 struct hci_conn *conn;
2213 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2217 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2224 void __hci_req_update_scan(struct hci_request *req)
2226 struct hci_dev *hdev = req->hdev;
2229 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2232 if (!hdev_is_powered(hdev))
2235 if (mgmt_powering_down(hdev))
2238 if (hdev->scanning_paused)
2241 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2242 disconnected_whitelist_entries(hdev))
2245 scan = SCAN_DISABLED;
2247 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2248 scan |= SCAN_INQUIRY;
2250 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2251 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2254 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2257 static int update_scan(struct hci_request *req, unsigned long opt)
2259 hci_dev_lock(req->hdev);
2260 __hci_req_update_scan(req);
2261 hci_dev_unlock(req->hdev);
2265 static void scan_update_work(struct work_struct *work)
2267 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2269 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2272 static int connectable_update(struct hci_request *req, unsigned long opt)
2274 struct hci_dev *hdev = req->hdev;
2278 __hci_req_update_scan(req);
2280 /* If BR/EDR is not enabled and we disable advertising as a
2281 * by-product of disabling connectable, we need to update the
2282 * advertising flags.
2284 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2285 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2287 /* Update the advertising parameters if necessary */
2288 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2289 !list_empty(&hdev->adv_instances)) {
2290 if (ext_adv_capable(hdev))
2291 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2293 __hci_req_enable_advertising(req);
2296 __hci_update_background_scan(req);
2298 hci_dev_unlock(hdev);
2303 static void connectable_update_work(struct work_struct *work)
2305 struct hci_dev *hdev = container_of(work, struct hci_dev,
2306 connectable_update);
2309 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2310 mgmt_set_connectable_complete(hdev, status);
2313 static u8 get_service_classes(struct hci_dev *hdev)
2315 struct bt_uuid *uuid;
2318 list_for_each_entry(uuid, &hdev->uuids, list)
2319 val |= uuid->svc_hint;
2324 void __hci_req_update_class(struct hci_request *req)
2326 struct hci_dev *hdev = req->hdev;
2329 BT_DBG("%s", hdev->name);
2331 if (!hdev_is_powered(hdev))
2334 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2337 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2340 cod[0] = hdev->minor_class;
2341 cod[1] = hdev->major_class;
2342 cod[2] = get_service_classes(hdev);
2344 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2347 if (memcmp(cod, hdev->dev_class, 3) == 0)
2350 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2353 static void write_iac(struct hci_request *req)
2355 struct hci_dev *hdev = req->hdev;
2356 struct hci_cp_write_current_iac_lap cp;
2358 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2361 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2362 /* Limited discoverable mode */
2363 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2364 cp.iac_lap[0] = 0x00; /* LIAC */
2365 cp.iac_lap[1] = 0x8b;
2366 cp.iac_lap[2] = 0x9e;
2367 cp.iac_lap[3] = 0x33; /* GIAC */
2368 cp.iac_lap[4] = 0x8b;
2369 cp.iac_lap[5] = 0x9e;
2371 /* General discoverable mode */
2373 cp.iac_lap[0] = 0x33; /* GIAC */
2374 cp.iac_lap[1] = 0x8b;
2375 cp.iac_lap[2] = 0x9e;
2378 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2379 (cp.num_iac * 3) + 1, &cp);
2382 static int discoverable_update(struct hci_request *req, unsigned long opt)
2384 struct hci_dev *hdev = req->hdev;
2388 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2390 __hci_req_update_scan(req);
2391 __hci_req_update_class(req);
2394 /* Advertising instances don't use the global discoverable setting, so
2395 * only update AD if advertising was enabled using Set Advertising.
2397 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2398 __hci_req_update_adv_data(req, 0x00);
2400 /* Discoverable mode affects the local advertising
2401 * address in limited privacy mode.
2403 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2404 if (ext_adv_capable(hdev))
2405 __hci_req_start_ext_adv(req, 0x00);
2407 __hci_req_enable_advertising(req);
2411 hci_dev_unlock(hdev);
2416 static void discoverable_update_work(struct work_struct *work)
2418 struct hci_dev *hdev = container_of(work, struct hci_dev,
2419 discoverable_update);
2422 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2423 mgmt_set_discoverable_complete(hdev, status);
2426 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2429 switch (conn->state) {
2432 if (conn->type == AMP_LINK) {
2433 struct hci_cp_disconn_phy_link cp;
2435 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2437 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2440 struct hci_cp_disconnect dc;
2442 dc.handle = cpu_to_le16(conn->handle);
2444 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2447 conn->state = BT_DISCONN;
2451 if (conn->type == LE_LINK) {
2452 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2454 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2456 } else if (conn->type == ACL_LINK) {
2457 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2459 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2464 if (conn->type == ACL_LINK) {
2465 struct hci_cp_reject_conn_req rej;
2467 bacpy(&rej.bdaddr, &conn->dst);
2468 rej.reason = reason;
2470 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2472 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2473 struct hci_cp_reject_sync_conn_req rej;
2475 bacpy(&rej.bdaddr, &conn->dst);
2477 /* SCO rejection has its own limited set of
2478 * allowed error values (0x0D-0x0F) which isn't
2479 * compatible with most values passed to this
2480 * function. To be safe hard-code one of the
2481 * values that's suitable for SCO.
2483 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2485 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2490 conn->state = BT_CLOSED;
2495 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2498 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2501 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2503 struct hci_request req;
2506 hci_req_init(&req, conn->hdev);
2508 __hci_abort_conn(&req, conn, reason);
2510 err = hci_req_run(&req, abort_conn_complete);
2511 if (err && err != -ENODATA) {
2512 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2519 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2521 hci_dev_lock(req->hdev);
2522 __hci_update_background_scan(req);
2523 hci_dev_unlock(req->hdev);
2527 static void bg_scan_update(struct work_struct *work)
2529 struct hci_dev *hdev = container_of(work, struct hci_dev,
2531 struct hci_conn *conn;
2535 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2541 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2543 hci_le_conn_failed(conn, status);
2545 hci_dev_unlock(hdev);
2548 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2550 hci_req_add_le_scan_disable(req);
2554 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2557 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2558 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2559 struct hci_cp_inquiry cp;
2561 BT_DBG("%s", req->hdev->name);
2563 hci_dev_lock(req->hdev);
2564 hci_inquiry_cache_flush(req->hdev);
2565 hci_dev_unlock(req->hdev);
2567 memset(&cp, 0, sizeof(cp));
2569 if (req->hdev->discovery.limited)
2570 memcpy(&cp.lap, liac, sizeof(cp.lap));
2572 memcpy(&cp.lap, giac, sizeof(cp.lap));
2576 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2581 static void le_scan_disable_work(struct work_struct *work)
2583 struct hci_dev *hdev = container_of(work, struct hci_dev,
2584 le_scan_disable.work);
2587 BT_DBG("%s", hdev->name);
2589 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2592 cancel_delayed_work(&hdev->le_scan_restart);
2594 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2596 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2601 hdev->discovery.scan_start = 0;
2603 /* If we were running LE only scan, change discovery state. If
2604 * we were running both LE and BR/EDR inquiry simultaneously,
2605 * and BR/EDR inquiry is already finished, stop discovery,
2606 * otherwise BR/EDR inquiry will stop discovery when finished.
2607 * If we will resolve remote device name, do not change
2611 if (hdev->discovery.type == DISCOV_TYPE_LE)
2612 goto discov_stopped;
2614 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2617 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2618 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2619 hdev->discovery.state != DISCOVERY_RESOLVING)
2620 goto discov_stopped;
2625 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2626 HCI_CMD_TIMEOUT, &status);
2628 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2629 goto discov_stopped;
2636 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2637 hci_dev_unlock(hdev);
2640 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2642 struct hci_dev *hdev = req->hdev;
2644 /* If controller is not scanning we are done. */
2645 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2648 hci_req_add_le_scan_disable(req);
2650 if (use_ext_scan(hdev)) {
2651 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2653 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2654 ext_enable_cp.enable = LE_SCAN_ENABLE;
2655 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2657 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2658 sizeof(ext_enable_cp), &ext_enable_cp);
2660 struct hci_cp_le_set_scan_enable cp;
2662 memset(&cp, 0, sizeof(cp));
2663 cp.enable = LE_SCAN_ENABLE;
2664 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2665 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2671 static void le_scan_restart_work(struct work_struct *work)
2673 struct hci_dev *hdev = container_of(work, struct hci_dev,
2674 le_scan_restart.work);
2675 unsigned long timeout, duration, scan_start, now;
2678 BT_DBG("%s", hdev->name);
2680 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2682 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2689 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2690 !hdev->discovery.scan_start)
2693 /* When the scan was started, hdev->le_scan_disable has been queued
2694 * after duration from scan_start. During scan restart this job
2695 * has been canceled, and we need to queue it again after proper
2696 * timeout, to make sure that scan does not run indefinitely.
2698 duration = hdev->discovery.scan_duration;
2699 scan_start = hdev->discovery.scan_start;
2701 if (now - scan_start <= duration) {
2704 if (now >= scan_start)
2705 elapsed = now - scan_start;
2707 elapsed = ULONG_MAX - scan_start + now;
2709 timeout = duration - elapsed;
2714 queue_delayed_work(hdev->req_workqueue,
2715 &hdev->le_scan_disable, timeout);
2718 hci_dev_unlock(hdev);
2721 static int active_scan(struct hci_request *req, unsigned long opt)
2723 uint16_t interval = opt;
2724 struct hci_dev *hdev = req->hdev;
2726 /* White list is not used for discovery */
2727 u8 filter_policy = 0x00;
2730 BT_DBG("%s", hdev->name);
2732 /* If controller is scanning, it means the background scanning is
2733 * running. Thus, we should temporarily stop it in order to set the
2734 * discovery scanning parameters.
2736 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2737 hci_req_add_le_scan_disable(req);
2739 /* All active scans will be done with either a resolvable private
2740 * address (when privacy feature has been enabled) or non-resolvable
2743 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2746 own_addr_type = ADDR_LE_DEV_PUBLIC;
2748 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2749 own_addr_type, filter_policy);
2753 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2757 BT_DBG("%s", req->hdev->name);
2759 err = active_scan(req, opt);
2763 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2766 static void start_discovery(struct hci_dev *hdev, u8 *status)
2768 unsigned long timeout;
2770 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2772 switch (hdev->discovery.type) {
2773 case DISCOV_TYPE_BREDR:
2774 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2775 hci_req_sync(hdev, bredr_inquiry,
2776 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2779 case DISCOV_TYPE_INTERLEAVED:
2780 /* When running simultaneous discovery, the LE scanning time
2781 * should occupy the whole discovery time sine BR/EDR inquiry
2782 * and LE scanning are scheduled by the controller.
2784 * For interleaving discovery in comparison, BR/EDR inquiry
2785 * and LE scanning are done sequentially with separate
2788 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2790 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2791 /* During simultaneous discovery, we double LE scan
2792 * interval. We must leave some time for the controller
2793 * to do BR/EDR inquiry.
2795 hci_req_sync(hdev, interleaved_discov,
2796 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2801 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2802 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2803 HCI_CMD_TIMEOUT, status);
2805 case DISCOV_TYPE_LE:
2806 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2807 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2808 HCI_CMD_TIMEOUT, status);
2811 *status = HCI_ERROR_UNSPECIFIED;
2818 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2820 /* When service discovery is used and the controller has a
2821 * strict duplicate filter, it is important to remember the
2822 * start and duration of the scan. This is required for
2823 * restarting scanning during the discovery phase.
2825 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2826 hdev->discovery.result_filtering) {
2827 hdev->discovery.scan_start = jiffies;
2828 hdev->discovery.scan_duration = timeout;
2831 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2835 bool hci_req_stop_discovery(struct hci_request *req)
2837 struct hci_dev *hdev = req->hdev;
2838 struct discovery_state *d = &hdev->discovery;
2839 struct hci_cp_remote_name_req_cancel cp;
2840 struct inquiry_entry *e;
2843 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2845 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2846 if (test_bit(HCI_INQUIRY, &hdev->flags))
2847 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2849 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2850 cancel_delayed_work(&hdev->le_scan_disable);
2851 hci_req_add_le_scan_disable(req);
2856 /* Passive scanning */
2857 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2858 hci_req_add_le_scan_disable(req);
2863 /* No further actions needed for LE-only discovery */
2864 if (d->type == DISCOV_TYPE_LE)
2867 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2868 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2873 bacpy(&cp.bdaddr, &e->data.bdaddr);
2874 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2882 static int stop_discovery(struct hci_request *req, unsigned long opt)
2884 hci_dev_lock(req->hdev);
2885 hci_req_stop_discovery(req);
2886 hci_dev_unlock(req->hdev);
2891 static void discov_update(struct work_struct *work)
2893 struct hci_dev *hdev = container_of(work, struct hci_dev,
2897 switch (hdev->discovery.state) {
2898 case DISCOVERY_STARTING:
2899 start_discovery(hdev, &status);
2900 mgmt_start_discovery_complete(hdev, status);
2902 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2904 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2906 case DISCOVERY_STOPPING:
2907 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2908 mgmt_stop_discovery_complete(hdev, status);
2910 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2912 case DISCOVERY_STOPPED:
2918 static void discov_off(struct work_struct *work)
2920 struct hci_dev *hdev = container_of(work, struct hci_dev,
2923 BT_DBG("%s", hdev->name);
2927 /* When discoverable timeout triggers, then just make sure
2928 * the limited discoverable flag is cleared. Even in the case
2929 * of a timeout triggered from general discoverable, it is
2930 * safe to unconditionally clear the flag.
2932 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2933 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2934 hdev->discov_timeout = 0;
2936 hci_dev_unlock(hdev);
2938 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2939 mgmt_new_settings(hdev);
2942 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2944 struct hci_dev *hdev = req->hdev;
2949 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2950 !lmp_host_ssp_capable(hdev)) {
2953 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2955 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2958 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2959 sizeof(support), &support);
2963 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2964 lmp_bredr_capable(hdev)) {
2965 struct hci_cp_write_le_host_supported cp;
2970 /* Check first if we already have the right
2971 * host state (host features set)
2973 if (cp.le != lmp_host_le_capable(hdev) ||
2974 cp.simul != lmp_host_le_br_capable(hdev))
2975 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2979 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2980 /* Make sure the controller has a good default for
2981 * advertising data. This also applies to the case
2982 * where BR/EDR was toggled during the AUTO_OFF phase.
2984 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2985 list_empty(&hdev->adv_instances)) {
2988 if (ext_adv_capable(hdev)) {
2989 err = __hci_req_setup_ext_adv_instance(req,
2992 __hci_req_update_scan_rsp_data(req,
2996 __hci_req_update_adv_data(req, 0x00);
2997 __hci_req_update_scan_rsp_data(req, 0x00);
3000 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3001 if (!ext_adv_capable(hdev))
3002 __hci_req_enable_advertising(req);
3004 __hci_req_enable_ext_advertising(req,
3007 } else if (!list_empty(&hdev->adv_instances)) {
3008 struct adv_info *adv_instance;
3010 adv_instance = list_first_entry(&hdev->adv_instances,
3011 struct adv_info, list);
3012 __hci_req_schedule_adv_instance(req,
3013 adv_instance->instance,
3018 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3019 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3020 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3021 sizeof(link_sec), &link_sec);
3023 if (lmp_bredr_capable(hdev)) {
3024 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3025 __hci_req_write_fast_connectable(req, true);
3027 __hci_req_write_fast_connectable(req, false);
3028 __hci_req_update_scan(req);
3029 __hci_req_update_class(req);
3030 __hci_req_update_name(req);
3031 __hci_req_update_eir(req);
3034 hci_dev_unlock(hdev);
3038 int __hci_req_hci_power_on(struct hci_dev *hdev)
3040 /* Register the available SMP channels (BR/EDR and LE) only when
3041 * successfully powering on the controller. This late
3042 * registration is required so that LE SMP can clearly decide if
3043 * the public address or static address is used.
3047 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3051 void hci_request_setup(struct hci_dev *hdev)
3053 INIT_WORK(&hdev->discov_update, discov_update);
3054 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3055 INIT_WORK(&hdev->scan_update, scan_update_work);
3056 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3057 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3058 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3059 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3060 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3061 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3064 void hci_request_cancel_all(struct hci_dev *hdev)
3066 hci_req_sync_cancel(hdev, ENODEV);
3068 cancel_work_sync(&hdev->discov_update);
3069 cancel_work_sync(&hdev->bg_scan_update);
3070 cancel_work_sync(&hdev->scan_update);
3071 cancel_work_sync(&hdev->connectable_update);
3072 cancel_work_sync(&hdev->discoverable_update);
3073 cancel_delayed_work_sync(&hdev->discov_off);
3074 cancel_delayed_work_sync(&hdev->le_scan_disable);
3075 cancel_delayed_work_sync(&hdev->le_scan_restart);
3077 if (hdev->adv_instance_timeout) {
3078 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3079 hdev->adv_instance_timeout = 0;