2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
130 struct hci_request req;
134 BT_DBG("%s", hdev->name);
136 hci_req_init(&req, hdev);
138 hci_req_add_ev(&req, opcode, plen, param, event);
140 hdev->req_status = HCI_REQ_PEND;
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
152 switch (hdev->req_status) {
154 err = -bt_to_errno(hdev->req_result);
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
166 hdev->req_status = hdev->req_result = 0;
168 hdev->req_skb = NULL;
170 BT_DBG("%s end: err %d", hdev->name, err);
178 return ERR_PTR(-ENODATA);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 EXPORT_SYMBOL(__hci_cmd_sync);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt, u32 timeout, u8 *hci_status)
196 struct hci_request req;
199 BT_DBG("%s start", hdev->name);
201 hci_req_init(&req, hdev);
203 hdev->req_status = HCI_REQ_PEND;
205 err = func(&req, opt);
208 *hci_status = HCI_ERROR_UNSPECIFIED;
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err == -ENODATA) {
228 *hci_status = HCI_ERROR_UNSPECIFIED;
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
236 if (err == -ERESTARTSYS)
239 switch (hdev->req_status) {
241 err = -bt_to_errno(hdev->req_result);
243 *hci_status = hdev->req_result;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
249 *hci_status = HCI_ERROR_UNSPECIFIED;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
263 BT_DBG("%s end: err %d", hdev->name, err);
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt, u32 timeout, u8 *hci_status)
274 if (!test_bit(HCI_UP, &hdev->flags))
277 /* Serialize all requests */
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
301 skb_put_data(skb, param, plen);
303 BT_DBG("skb len %d", skb->len);
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
315 struct hci_dev *hdev = req->hdev;
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
337 bt_cb(skb)->hci.req_event = event;
339 skb_queue_tail(&req->cmd_q, skb);
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
345 hci_req_add_ev(req, opcode, plen, param, 0);
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
361 type = PAGE_SCAN_TYPE_INTERLACED;
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381 /* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
385 * This function requires the caller holds hdev->lock.
387 static void __hci_update_background_scan(struct hci_request *req)
389 struct hci_dev *hdev = req->hdev;
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
414 hci_discovery_filter_clear(hdev);
416 BT_DBG("%s ADV monitoring is %s", hdev->name,
417 hci_is_adv_monitoring(hdev) ? "on" : "off");
419 if (list_empty(&hdev->pend_le_conns) &&
420 list_empty(&hdev->pend_le_reports) &&
421 !hci_is_adv_monitoring(hdev)) {
422 /* If there is no pending LE connections or devices
423 * to be scanned for or no ADV monitors, we should stop the
424 * background scanning.
427 /* If controller is not scanning we are done. */
428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 hci_req_add_le_scan_disable(req);
433 BT_DBG("%s stopping background scanning", hdev->name);
435 /* If there is at least one pending LE connection, we should
436 * keep the background scan running.
439 /* If controller is connecting, we should not start scanning
440 * since some controllers are not able to scan and connect at
443 if (hci_lookup_le_connect(hdev))
446 /* If controller is currently scanning, we stop it to ensure we
447 * don't miss any advertising (due to duplicates filter).
449 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450 hci_req_add_le_scan_disable(req);
452 hci_req_add_le_passive_scan(req);
454 BT_DBG("%s starting background scanning", hdev->name);
458 void __hci_req_update_name(struct hci_request *req)
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_local_name cp;
463 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
465 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468 #define PNP_INFO_SVCLASS_ID 0x1200
470 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
472 u8 *ptr = data, *uuids_start = NULL;
473 struct bt_uuid *uuid;
478 list_for_each_entry(uuid, &hdev->uuids, list) {
481 if (uuid->size != 16)
484 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
488 if (uuid16 == PNP_INFO_SVCLASS_ID)
494 uuids_start[1] = EIR_UUID16_ALL;
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u16) > len) {
500 uuids_start[1] = EIR_UUID16_SOME;
504 *ptr++ = (uuid16 & 0x00ff);
505 *ptr++ = (uuid16 & 0xff00) >> 8;
506 uuids_start[0] += sizeof(uuid16);
512 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 32)
527 uuids_start[1] = EIR_UUID32_ALL;
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + sizeof(u32) > len) {
533 uuids_start[1] = EIR_UUID32_SOME;
537 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
539 uuids_start[0] += sizeof(u32);
545 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
547 u8 *ptr = data, *uuids_start = NULL;
548 struct bt_uuid *uuid;
553 list_for_each_entry(uuid, &hdev->uuids, list) {
554 if (uuid->size != 128)
560 uuids_start[1] = EIR_UUID128_ALL;
564 /* Stop if not enough space to put next UUID */
565 if ((ptr - data) + 16 > len) {
566 uuids_start[1] = EIR_UUID128_SOME;
570 memcpy(ptr, uuid->uuid, 16);
572 uuids_start[0] += 16;
578 static void create_eir(struct hci_dev *hdev, u8 *data)
583 name_len = strlen(hdev->dev_name);
589 ptr[1] = EIR_NAME_SHORT;
591 ptr[1] = EIR_NAME_COMPLETE;
593 /* EIR Data length */
594 ptr[0] = name_len + 1;
596 memcpy(ptr + 2, hdev->dev_name, name_len);
598 ptr += (name_len + 2);
601 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
603 ptr[1] = EIR_TX_POWER;
604 ptr[2] = (u8) hdev->inq_tx_power;
609 if (hdev->devid_source > 0) {
611 ptr[1] = EIR_DEVICE_ID;
613 put_unaligned_le16(hdev->devid_source, ptr + 2);
614 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 put_unaligned_le16(hdev->devid_product, ptr + 6);
616 put_unaligned_le16(hdev->devid_version, ptr + 8);
621 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 void __hci_req_update_eir(struct hci_request *req)
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_write_eir cp;
631 if (!hdev_is_powered(hdev))
634 if (!lmp_ext_inq_capable(hdev))
637 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 memset(&cp, 0, sizeof(cp));
645 create_eir(hdev, cp.data);
647 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 memcpy(hdev->eir, cp.data, sizeof(cp.data));
652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655 void hci_req_add_le_scan_disable(struct hci_request *req)
657 struct hci_dev *hdev = req->hdev;
659 if (hdev->scanning_paused) {
660 bt_dev_dbg(hdev, "Scanning is paused for suspend");
664 if (use_ext_scan(hdev)) {
665 struct hci_cp_le_set_ext_scan_enable cp;
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
672 struct hci_cp_le_set_scan_enable cp;
674 memset(&cp, 0, sizeof(cp));
675 cp.enable = LE_SCAN_DISABLE;
676 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
680 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
683 struct hci_cp_le_del_from_white_list cp;
685 cp.bdaddr_type = bdaddr_type;
686 bacpy(&cp.bdaddr, bdaddr);
688 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
690 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
693 /* Adds connection to white list if needed. On error, returns -1. */
694 static int add_to_white_list(struct hci_request *req,
695 struct hci_conn_params *params, u8 *num_entries,
698 struct hci_cp_le_add_to_white_list cp;
699 struct hci_dev *hdev = req->hdev;
701 /* Already in white list */
702 if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
706 /* Select filter policy to accept all advertising */
707 if (*num_entries >= hdev->le_white_list_size)
710 /* White list can not be used with RPAs */
712 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
716 /* During suspend, only wakeable devices can be in whitelist */
717 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
718 params->current_flags))
722 cp.bdaddr_type = params->addr_type;
723 bacpy(&cp.bdaddr, ¶ms->addr);
725 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
727 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
732 static u8 update_white_list(struct hci_request *req)
734 struct hci_dev *hdev = req->hdev;
735 struct hci_conn_params *params;
736 struct bdaddr_list *b;
738 bool pend_conn, pend_report;
739 /* We allow whitelisting even with RPAs in suspend. In the worst case,
740 * we won't be able to wake from devices that use the privacy1.2
741 * features. Additionally, once we support privacy1.2 and IRK
742 * offloading, we can update this to also check for those conditions.
744 bool allow_rpa = hdev->suspended;
746 /* Go through the current white list programmed into the
747 * controller one by one and check if that address is still
748 * in the list of pending connections or list of devices to
749 * report. If not present in either list, then queue the
750 * command to remove it from the controller.
752 list_for_each_entry(b, &hdev->le_white_list, list) {
753 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
756 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
760 /* If the device is not likely to connect or report,
761 * remove it from the whitelist.
763 if (!pend_conn && !pend_report) {
764 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
768 /* White list can not be used with RPAs */
770 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
777 /* Since all no longer valid white list entries have been
778 * removed, walk through the list of pending connections
779 * and ensure that any new device gets programmed into
782 * If the list of the devices is larger than the list of
783 * available white list entries in the controller, then
784 * just abort and return filer policy value to not use the
787 list_for_each_entry(params, &hdev->pend_le_conns, action) {
788 if (add_to_white_list(req, params, &num_entries, allow_rpa))
792 /* After adding all new pending connections, walk through
793 * the list of pending reports and also add these to the
794 * white list if there is still space. Abort if space runs out.
796 list_for_each_entry(params, &hdev->pend_le_reports, action) {
797 if (add_to_white_list(req, params, &num_entries, allow_rpa))
801 /* Once the controller offloading of advertisement monitor is in place,
802 * the if condition should include the support of MSFT extension
805 if (!idr_is_empty(&hdev->adv_monitors_idr))
808 /* Select filter policy to use white list */
812 static bool scan_use_rpa(struct hci_dev *hdev)
814 return hci_dev_test_flag(hdev, HCI_PRIVACY);
817 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
818 u16 window, u8 own_addr_type, u8 filter_policy)
820 struct hci_dev *hdev = req->hdev;
822 if (hdev->scanning_paused) {
823 bt_dev_dbg(hdev, "Scanning is paused for suspend");
827 /* Use ext scanning if set ext scan param and ext scan enable is
830 if (use_ext_scan(hdev)) {
831 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
832 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
833 struct hci_cp_le_scan_phy_params *phy_params;
834 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
837 ext_param_cp = (void *)data;
838 phy_params = (void *)ext_param_cp->data;
840 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
841 ext_param_cp->own_addr_type = own_addr_type;
842 ext_param_cp->filter_policy = filter_policy;
844 plen = sizeof(*ext_param_cp);
846 if (scan_1m(hdev) || scan_2m(hdev)) {
847 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
849 memset(phy_params, 0, sizeof(*phy_params));
850 phy_params->type = type;
851 phy_params->interval = cpu_to_le16(interval);
852 phy_params->window = cpu_to_le16(window);
854 plen += sizeof(*phy_params);
858 if (scan_coded(hdev)) {
859 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
861 memset(phy_params, 0, sizeof(*phy_params));
862 phy_params->type = type;
863 phy_params->interval = cpu_to_le16(interval);
864 phy_params->window = cpu_to_le16(window);
866 plen += sizeof(*phy_params);
870 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
873 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
874 ext_enable_cp.enable = LE_SCAN_ENABLE;
875 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
877 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
878 sizeof(ext_enable_cp), &ext_enable_cp);
880 struct hci_cp_le_set_scan_param param_cp;
881 struct hci_cp_le_set_scan_enable enable_cp;
883 memset(¶m_cp, 0, sizeof(param_cp));
884 param_cp.type = type;
885 param_cp.interval = cpu_to_le16(interval);
886 param_cp.window = cpu_to_le16(window);
887 param_cp.own_address_type = own_addr_type;
888 param_cp.filter_policy = filter_policy;
889 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
892 memset(&enable_cp, 0, sizeof(enable_cp));
893 enable_cp.enable = LE_SCAN_ENABLE;
894 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
895 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
900 void hci_req_add_le_passive_scan(struct hci_request *req)
902 struct hci_dev *hdev = req->hdev;
905 u16 window, interval;
907 if (hdev->scanning_paused) {
908 bt_dev_dbg(hdev, "Scanning is paused for suspend");
912 /* Set require_privacy to false since no SCAN_REQ are send
913 * during passive scanning. Not using an non-resolvable address
914 * here is important so that peer devices using direct
915 * advertising with our address will be correctly reported
918 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
922 /* Adding or removing entries from the white list must
923 * happen before enabling scanning. The controller does
924 * not allow white list modification while scanning.
926 filter_policy = update_white_list(req);
928 /* When the controller is using random resolvable addresses and
929 * with that having LE privacy enabled, then controllers with
930 * Extended Scanner Filter Policies support can now enable support
931 * for handling directed advertising.
933 * So instead of using filter polices 0x00 (no whitelist)
934 * and 0x01 (whitelist enabled) use the new filter policies
935 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
937 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
938 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
939 filter_policy |= 0x02;
941 if (hdev->suspended) {
942 window = hdev->le_scan_window_suspend;
943 interval = hdev->le_scan_int_suspend;
945 window = hdev->le_scan_window;
946 interval = hdev->le_scan_interval;
949 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
950 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
951 own_addr_type, filter_policy);
954 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
956 struct adv_info *adv_instance;
958 /* Instance 0x00 always set local name */
959 if (instance == 0x00)
962 adv_instance = hci_find_adv_instance(hdev, instance);
966 /* TODO: Take into account the "appearance" and "local-name" flags here.
967 * These are currently being ignored as they are not supported.
969 return adv_instance->scan_rsp_len;
972 static void hci_req_clear_event_filter(struct hci_request *req)
974 struct hci_cp_set_event_filter f;
976 memset(&f, 0, sizeof(f));
977 f.flt_type = HCI_FLT_CLEAR_ALL;
978 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
980 /* Update page scan state (since we may have modified it when setting
983 __hci_req_update_scan(req);
986 static void hci_req_set_event_filter(struct hci_request *req)
988 struct bdaddr_list_with_flags *b;
989 struct hci_cp_set_event_filter f;
990 struct hci_dev *hdev = req->hdev;
991 u8 scan = SCAN_DISABLED;
993 /* Always clear event filter when starting */
994 hci_req_clear_event_filter(req);
996 list_for_each_entry(b, &hdev->whitelist, list) {
997 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1001 memset(&f, 0, sizeof(f));
1002 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1003 f.flt_type = HCI_FLT_CONN_SETUP;
1004 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1005 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1007 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1008 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1012 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1015 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1017 /* Before changing params disable scan if enabled */
1018 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1019 hci_req_add_le_scan_disable(req);
1021 /* Configure params and enable scanning */
1022 hci_req_add_le_passive_scan(req);
1024 /* Block suspend notifier on response */
1025 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1028 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1030 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1032 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1033 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1034 wake_up(&hdev->suspend_wait_q);
1038 /* Call with hci_dev_lock */
1039 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1042 struct hci_conn *conn;
1043 struct hci_request req;
1045 int disconnect_counter;
1047 if (next == hdev->suspend_state) {
1048 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1052 hdev->suspend_state = next;
1053 hci_req_init(&req, hdev);
1055 if (next == BT_SUSPEND_DISCONNECT) {
1056 /* Mark device as suspended */
1057 hdev->suspended = true;
1059 /* Pause discovery if not already stopped */
1060 old_state = hdev->discovery.state;
1061 if (old_state != DISCOVERY_STOPPED) {
1062 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1063 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1064 queue_work(hdev->req_workqueue, &hdev->discov_update);
1067 hdev->discovery_paused = true;
1068 hdev->discovery_old_state = old_state;
1070 /* Stop advertising */
1071 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1073 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1074 cancel_delayed_work(&hdev->discov_off);
1075 queue_delayed_work(hdev->req_workqueue,
1076 &hdev->discov_off, 0);
1079 hdev->advertising_paused = true;
1080 hdev->advertising_old_state = old_state;
1081 /* Disable page scan */
1082 page_scan = SCAN_DISABLED;
1083 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1085 /* Disable LE passive scan if enabled */
1086 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1087 hci_req_add_le_scan_disable(&req);
1089 /* Mark task needing completion */
1090 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1092 /* Prevent disconnects from causing scanning to be re-enabled */
1093 hdev->scanning_paused = true;
1095 /* Run commands before disconnecting */
1096 hci_req_run(&req, suspend_req_complete);
1098 disconnect_counter = 0;
1099 /* Soft disconnect everything (power off) */
1100 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1101 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1102 disconnect_counter++;
1105 if (disconnect_counter > 0) {
1107 "Had %d disconnects. Will wait on them",
1108 disconnect_counter);
1109 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1111 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1112 /* Unpause to take care of updating scanning params */
1113 hdev->scanning_paused = false;
1114 /* Enable event filter for paired devices */
1115 hci_req_set_event_filter(&req);
1116 /* Enable passive scan at lower duty cycle */
1117 hci_req_config_le_suspend_scan(&req);
1118 /* Pause scan changes again. */
1119 hdev->scanning_paused = true;
1120 hci_req_run(&req, suspend_req_complete);
1122 hdev->suspended = false;
1123 hdev->scanning_paused = false;
1125 hci_req_clear_event_filter(&req);
1126 /* Reset passive/background scanning to normal */
1127 hci_req_config_le_suspend_scan(&req);
1129 /* Unpause advertising */
1130 hdev->advertising_paused = false;
1131 if (hdev->advertising_old_state) {
1132 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1133 hdev->suspend_tasks);
1134 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1135 queue_work(hdev->req_workqueue,
1136 &hdev->discoverable_update);
1137 hdev->advertising_old_state = 0;
1140 /* Unpause discovery */
1141 hdev->discovery_paused = false;
1142 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1143 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1144 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1145 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1146 queue_work(hdev->req_workqueue, &hdev->discov_update);
1149 hci_req_run(&req, suspend_req_complete);
1152 hdev->suspend_state = next;
1155 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1156 wake_up(&hdev->suspend_wait_q);
1159 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1161 u8 instance = hdev->cur_adv_instance;
1162 struct adv_info *adv_instance;
1164 /* Instance 0x00 always set local name */
1165 if (instance == 0x00)
1168 adv_instance = hci_find_adv_instance(hdev, instance);
1172 /* TODO: Take into account the "appearance" and "local-name" flags here.
1173 * These are currently being ignored as they are not supported.
1175 return adv_instance->scan_rsp_len;
1178 void __hci_req_disable_advertising(struct hci_request *req)
1180 if (ext_adv_capable(req->hdev)) {
1181 struct hci_cp_le_set_ext_adv_enable cp;
1184 /* Disable all sets since we only support one set at the moment */
1185 cp.num_of_sets = 0x00;
1187 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
1191 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1195 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1198 struct adv_info *adv_instance;
1200 if (instance == 0x00) {
1201 /* Instance 0 always manages the "Tx Power" and "Flags"
1204 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1206 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1207 * corresponds to the "connectable" instance flag.
1209 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1210 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1212 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1213 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1214 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1215 flags |= MGMT_ADV_FLAG_DISCOV;
1220 adv_instance = hci_find_adv_instance(hdev, instance);
1222 /* Return 0 when we got an invalid instance identifier. */
1226 return adv_instance->flags;
1229 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1231 /* If privacy is not enabled don't use RPA */
1232 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1235 /* If basic privacy mode is enabled use RPA */
1236 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1239 /* If limited privacy mode is enabled don't use RPA if we're
1240 * both discoverable and bondable.
1242 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1243 hci_dev_test_flag(hdev, HCI_BONDABLE))
1246 /* We're neither bondable nor discoverable in the limited
1247 * privacy mode, therefore use RPA.
1252 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1254 /* If there is no connection we are OK to advertise. */
1255 if (hci_conn_num(hdev, LE_LINK) == 0)
1258 /* Check le_states if there is any connection in slave role. */
1259 if (hdev->conn_hash.le_num_slave > 0) {
1260 /* Slave connection state and non connectable mode bit 20. */
1261 if (!connectable && !(hdev->le_states[2] & 0x10))
1264 /* Slave connection state and connectable mode bit 38
1265 * and scannable bit 21.
1267 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1268 !(hdev->le_states[2] & 0x20)))
1272 /* Check le_states if there is any connection in master role. */
1273 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1274 /* Master connection state and non connectable mode bit 18. */
1275 if (!connectable && !(hdev->le_states[2] & 0x02))
1278 /* Master connection state and connectable mode bit 35 and
1281 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1282 !(hdev->le_states[2] & 0x08)))
1289 void __hci_req_enable_advertising(struct hci_request *req)
1291 struct hci_dev *hdev = req->hdev;
1292 struct hci_cp_le_set_adv_param cp;
1293 u8 own_addr_type, enable = 0x01;
1295 u16 adv_min_interval, adv_max_interval;
1298 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1300 /* If the "connectable" instance flag was not set, then choose between
1301 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1303 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1304 mgmt_get_connectable(hdev);
1306 if (!is_advertising_allowed(hdev, connectable))
1309 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1310 __hci_req_disable_advertising(req);
1312 /* Clear the HCI_LE_ADV bit temporarily so that the
1313 * hci_update_random_address knows that it's safe to go ahead
1314 * and write a new random address. The flag will be set back on
1315 * as soon as the SET_ADV_ENABLE HCI command completes.
1317 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1319 /* Set require_privacy to true only when non-connectable
1320 * advertising is used. In that case it is fine to use a
1321 * non-resolvable private address.
1323 if (hci_update_random_address(req, !connectable,
1324 adv_use_rpa(hdev, flags),
1325 &own_addr_type) < 0)
1328 memset(&cp, 0, sizeof(cp));
1331 cp.type = LE_ADV_IND;
1333 adv_min_interval = hdev->le_adv_min_interval;
1334 adv_max_interval = hdev->le_adv_max_interval;
1336 if (get_cur_adv_instance_scan_rsp_len(hdev))
1337 cp.type = LE_ADV_SCAN_IND;
1339 cp.type = LE_ADV_NONCONN_IND;
1341 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1342 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1343 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1344 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1346 adv_min_interval = hdev->le_adv_min_interval;
1347 adv_max_interval = hdev->le_adv_max_interval;
1351 cp.min_interval = cpu_to_le16(adv_min_interval);
1352 cp.max_interval = cpu_to_le16(adv_max_interval);
1353 cp.own_address_type = own_addr_type;
1354 cp.channel_map = hdev->le_adv_channel_map;
1356 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1358 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1361 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1364 size_t complete_len;
1366 /* no space left for name (+ NULL + type + len) */
1367 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1370 /* use complete name if present and fits */
1371 complete_len = strlen(hdev->dev_name);
1372 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1373 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1374 hdev->dev_name, complete_len + 1);
1376 /* use short name if present */
1377 short_len = strlen(hdev->short_name);
1379 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1380 hdev->short_name, short_len + 1);
1382 /* use shortened full name if present, we already know that name
1383 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1386 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1388 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1389 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1391 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1398 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1400 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1403 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1405 u8 scan_rsp_len = 0;
1407 if (hdev->appearance) {
1408 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1411 return append_local_name(hdev, ptr, scan_rsp_len);
1414 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1417 struct adv_info *adv_instance;
1419 u8 scan_rsp_len = 0;
1421 adv_instance = hci_find_adv_instance(hdev, instance);
1425 instance_flags = adv_instance->flags;
1427 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1428 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1431 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1432 adv_instance->scan_rsp_len);
1434 scan_rsp_len += adv_instance->scan_rsp_len;
1436 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1437 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1439 return scan_rsp_len;
1442 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1444 struct hci_dev *hdev = req->hdev;
1447 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1450 if (ext_adv_capable(hdev)) {
1451 struct hci_cp_le_set_ext_scan_rsp_data cp;
1453 memset(&cp, 0, sizeof(cp));
1456 len = create_instance_scan_rsp_data(hdev, instance,
1459 len = create_default_scan_rsp_data(hdev, cp.data);
1461 if (hdev->scan_rsp_data_len == len &&
1462 !memcmp(cp.data, hdev->scan_rsp_data, len))
1465 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1466 hdev->scan_rsp_data_len = len;
1468 cp.handle = instance;
1470 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1471 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1473 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1476 struct hci_cp_le_set_scan_rsp_data cp;
1478 memset(&cp, 0, sizeof(cp));
1481 len = create_instance_scan_rsp_data(hdev, instance,
1484 len = create_default_scan_rsp_data(hdev, cp.data);
1486 if (hdev->scan_rsp_data_len == len &&
1487 !memcmp(cp.data, hdev->scan_rsp_data, len))
1490 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1491 hdev->scan_rsp_data_len = len;
1495 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1499 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1501 struct adv_info *adv_instance = NULL;
1502 u8 ad_len = 0, flags = 0;
1505 /* Return 0 when the current instance identifier is invalid. */
1507 adv_instance = hci_find_adv_instance(hdev, instance);
1512 instance_flags = get_adv_instance_flags(hdev, instance);
1514 /* If instance already has the flags set skip adding it once
1517 if (adv_instance && eir_get_data(adv_instance->adv_data,
1518 adv_instance->adv_data_len, EIR_FLAGS,
1522 /* The Add Advertising command allows userspace to set both the general
1523 * and limited discoverable flags.
1525 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1526 flags |= LE_AD_GENERAL;
1528 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1529 flags |= LE_AD_LIMITED;
1531 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1532 flags |= LE_AD_NO_BREDR;
1534 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1535 /* If a discovery flag wasn't provided, simply use the global
1539 flags |= mgmt_get_adv_discov_flags(hdev);
1541 /* If flags would still be empty, then there is no need to
1542 * include the "Flags" AD field".
1556 memcpy(ptr, adv_instance->adv_data,
1557 adv_instance->adv_data_len);
1558 ad_len += adv_instance->adv_data_len;
1559 ptr += adv_instance->adv_data_len;
1562 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1565 if (ext_adv_capable(hdev)) {
1567 adv_tx_power = adv_instance->tx_power;
1569 adv_tx_power = hdev->adv_tx_power;
1571 adv_tx_power = hdev->adv_tx_power;
1574 /* Provide Tx Power only if we can provide a valid value for it */
1575 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1577 ptr[1] = EIR_TX_POWER;
1578 ptr[2] = (u8)adv_tx_power;
1588 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1590 struct hci_dev *hdev = req->hdev;
1593 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1596 if (ext_adv_capable(hdev)) {
1597 struct hci_cp_le_set_ext_adv_data cp;
1599 memset(&cp, 0, sizeof(cp));
1601 len = create_instance_adv_data(hdev, instance, cp.data);
1603 /* There's nothing to do if the data hasn't changed */
1604 if (hdev->adv_data_len == len &&
1605 memcmp(cp.data, hdev->adv_data, len) == 0)
1608 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1609 hdev->adv_data_len = len;
1612 cp.handle = instance;
1613 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1614 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1616 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1618 struct hci_cp_le_set_adv_data cp;
1620 memset(&cp, 0, sizeof(cp));
1622 len = create_instance_adv_data(hdev, instance, cp.data);
1624 /* There's nothing to do if the data hasn't changed */
1625 if (hdev->adv_data_len == len &&
1626 memcmp(cp.data, hdev->adv_data, len) == 0)
1629 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1630 hdev->adv_data_len = len;
1634 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1638 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1640 struct hci_request req;
1642 hci_req_init(&req, hdev);
1643 __hci_req_update_adv_data(&req, instance);
1645 return hci_req_run(&req, NULL);
1648 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1650 BT_DBG("%s status %u", hdev->name, status);
1653 void hci_req_reenable_advertising(struct hci_dev *hdev)
1655 struct hci_request req;
1657 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1658 list_empty(&hdev->adv_instances))
1661 hci_req_init(&req, hdev);
1663 if (hdev->cur_adv_instance) {
1664 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1667 if (ext_adv_capable(hdev)) {
1668 __hci_req_start_ext_adv(&req, 0x00);
1670 __hci_req_update_adv_data(&req, 0x00);
1671 __hci_req_update_scan_rsp_data(&req, 0x00);
1672 __hci_req_enable_advertising(&req);
1676 hci_req_run(&req, adv_enable_complete);
1679 static void adv_timeout_expire(struct work_struct *work)
1681 struct hci_dev *hdev = container_of(work, struct hci_dev,
1682 adv_instance_expire.work);
1684 struct hci_request req;
1687 BT_DBG("%s", hdev->name);
1691 hdev->adv_instance_timeout = 0;
1693 instance = hdev->cur_adv_instance;
1694 if (instance == 0x00)
1697 hci_req_init(&req, hdev);
1699 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1701 if (list_empty(&hdev->adv_instances))
1702 __hci_req_disable_advertising(&req);
1704 hci_req_run(&req, NULL);
1707 hci_dev_unlock(hdev);
1710 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1711 bool use_rpa, struct adv_info *adv_instance,
1712 u8 *own_addr_type, bdaddr_t *rand_addr)
1716 bacpy(rand_addr, BDADDR_ANY);
1718 /* If privacy is enabled use a resolvable private address. If
1719 * current RPA has expired then generate a new one.
1724 *own_addr_type = ADDR_LE_DEV_RANDOM;
1727 if (!adv_instance->rpa_expired &&
1728 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1731 adv_instance->rpa_expired = false;
1733 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1734 !bacmp(&hdev->random_addr, &hdev->rpa))
1738 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1740 bt_dev_err(hdev, "failed to generate new RPA");
1744 bacpy(rand_addr, &hdev->rpa);
1746 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1748 queue_delayed_work(hdev->workqueue,
1749 &adv_instance->rpa_expired_cb, to);
1751 queue_delayed_work(hdev->workqueue,
1752 &hdev->rpa_expired, to);
1757 /* In case of required privacy without resolvable private address,
1758 * use an non-resolvable private address. This is useful for
1759 * non-connectable advertising.
1761 if (require_privacy) {
1765 /* The non-resolvable private address is generated
1766 * from random six bytes with the two most significant
1769 get_random_bytes(&nrpa, 6);
1772 /* The non-resolvable private address shall not be
1773 * equal to the public address.
1775 if (bacmp(&hdev->bdaddr, &nrpa))
1779 *own_addr_type = ADDR_LE_DEV_RANDOM;
1780 bacpy(rand_addr, &nrpa);
1785 /* No privacy so use a public address. */
1786 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1791 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1793 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1796 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1798 struct hci_cp_le_set_ext_adv_params cp;
1799 struct hci_dev *hdev = req->hdev;
1802 bdaddr_t random_addr;
1805 struct adv_info *adv_instance;
1809 adv_instance = hci_find_adv_instance(hdev, instance);
1813 adv_instance = NULL;
1816 flags = get_adv_instance_flags(hdev, instance);
1818 /* If the "connectable" instance flag was not set, then choose between
1819 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1821 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1822 mgmt_get_connectable(hdev);
1824 if (!is_advertising_allowed(hdev, connectable))
1827 /* Set require_privacy to true only when non-connectable
1828 * advertising is used. In that case it is fine to use a
1829 * non-resolvable private address.
1831 err = hci_get_random_address(hdev, !connectable,
1832 adv_use_rpa(hdev, flags), adv_instance,
1833 &own_addr_type, &random_addr);
1837 memset(&cp, 0, sizeof(cp));
1839 /* In ext adv set param interval is 3 octets */
1840 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1841 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1843 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1847 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1849 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1850 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1852 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1854 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1857 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1859 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1862 cp.own_addr_type = own_addr_type;
1863 cp.channel_map = hdev->le_adv_channel_map;
1865 cp.handle = instance;
1867 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1868 cp.primary_phy = HCI_ADV_PHY_1M;
1869 cp.secondary_phy = HCI_ADV_PHY_2M;
1870 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1871 cp.primary_phy = HCI_ADV_PHY_CODED;
1872 cp.secondary_phy = HCI_ADV_PHY_CODED;
1874 /* In all other cases use 1M */
1875 cp.primary_phy = HCI_ADV_PHY_1M;
1876 cp.secondary_phy = HCI_ADV_PHY_1M;
1879 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1881 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1882 bacmp(&random_addr, BDADDR_ANY)) {
1883 struct hci_cp_le_set_adv_set_rand_addr cp;
1885 /* Check if random address need to be updated */
1887 if (!bacmp(&random_addr, &adv_instance->random_addr))
1890 if (!bacmp(&random_addr, &hdev->random_addr))
1894 memset(&cp, 0, sizeof(cp));
1896 cp.handle = instance;
1897 bacpy(&cp.bdaddr, &random_addr);
1900 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1907 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1909 struct hci_dev *hdev = req->hdev;
1910 struct hci_cp_le_set_ext_adv_enable *cp;
1911 struct hci_cp_ext_adv_set *adv_set;
1912 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1913 struct adv_info *adv_instance;
1916 adv_instance = hci_find_adv_instance(hdev, instance);
1920 adv_instance = NULL;
1924 adv_set = (void *) cp->data;
1926 memset(cp, 0, sizeof(*cp));
1929 cp->num_of_sets = 0x01;
1931 memset(adv_set, 0, sizeof(*adv_set));
1933 adv_set->handle = instance;
1935 /* Set duration per instance since controller is responsible for
1938 if (adv_instance && adv_instance->duration) {
1939 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1941 /* Time = N * 10 ms */
1942 adv_set->duration = cpu_to_le16(duration / 10);
1945 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1946 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1952 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1954 struct hci_dev *hdev = req->hdev;
1957 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1958 __hci_req_disable_advertising(req);
1960 err = __hci_req_setup_ext_adv_instance(req, instance);
1964 __hci_req_update_scan_rsp_data(req, instance);
1965 __hci_req_enable_ext_advertising(req, instance);
1970 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1973 struct hci_dev *hdev = req->hdev;
1974 struct adv_info *adv_instance = NULL;
1977 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1978 list_empty(&hdev->adv_instances))
1981 if (hdev->adv_instance_timeout)
1984 adv_instance = hci_find_adv_instance(hdev, instance);
1988 /* A zero timeout means unlimited advertising. As long as there is
1989 * only one instance, duration should be ignored. We still set a timeout
1990 * in case further instances are being added later on.
1992 * If the remaining lifetime of the instance is more than the duration
1993 * then the timeout corresponds to the duration, otherwise it will be
1994 * reduced to the remaining instance lifetime.
1996 if (adv_instance->timeout == 0 ||
1997 adv_instance->duration <= adv_instance->remaining_time)
1998 timeout = adv_instance->duration;
2000 timeout = adv_instance->remaining_time;
2002 /* The remaining time is being reduced unless the instance is being
2003 * advertised without time limit.
2005 if (adv_instance->timeout)
2006 adv_instance->remaining_time =
2007 adv_instance->remaining_time - timeout;
2009 /* Only use work for scheduling instances with legacy advertising */
2010 if (!ext_adv_capable(hdev)) {
2011 hdev->adv_instance_timeout = timeout;
2012 queue_delayed_work(hdev->req_workqueue,
2013 &hdev->adv_instance_expire,
2014 msecs_to_jiffies(timeout * 1000));
2017 /* If we're just re-scheduling the same instance again then do not
2018 * execute any HCI commands. This happens when a single instance is
2021 if (!force && hdev->cur_adv_instance == instance &&
2022 hci_dev_test_flag(hdev, HCI_LE_ADV))
2025 hdev->cur_adv_instance = instance;
2026 if (ext_adv_capable(hdev)) {
2027 __hci_req_start_ext_adv(req, instance);
2029 __hci_req_update_adv_data(req, instance);
2030 __hci_req_update_scan_rsp_data(req, instance);
2031 __hci_req_enable_advertising(req);
2037 static void cancel_adv_timeout(struct hci_dev *hdev)
2039 if (hdev->adv_instance_timeout) {
2040 hdev->adv_instance_timeout = 0;
2041 cancel_delayed_work(&hdev->adv_instance_expire);
2045 /* For a single instance:
2046 * - force == true: The instance will be removed even when its remaining
2047 * lifetime is not zero.
2048 * - force == false: the instance will be deactivated but kept stored unless
2049 * the remaining lifetime is zero.
2051 * For instance == 0x00:
2052 * - force == true: All instances will be removed regardless of their timeout
2054 * - force == false: Only instances that have a timeout will be removed.
2056 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2057 struct hci_request *req, u8 instance,
2060 struct adv_info *adv_instance, *n, *next_instance = NULL;
2064 /* Cancel any timeout concerning the removed instance(s). */
2065 if (!instance || hdev->cur_adv_instance == instance)
2066 cancel_adv_timeout(hdev);
2068 /* Get the next instance to advertise BEFORE we remove
2069 * the current one. This can be the same instance again
2070 * if there is only one instance.
2072 if (instance && hdev->cur_adv_instance == instance)
2073 next_instance = hci_get_next_instance(hdev, instance);
2075 if (instance == 0x00) {
2076 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2078 if (!(force || adv_instance->timeout))
2081 rem_inst = adv_instance->instance;
2082 err = hci_remove_adv_instance(hdev, rem_inst);
2084 mgmt_advertising_removed(sk, hdev, rem_inst);
2087 adv_instance = hci_find_adv_instance(hdev, instance);
2089 if (force || (adv_instance && adv_instance->timeout &&
2090 !adv_instance->remaining_time)) {
2091 /* Don't advertise a removed instance. */
2092 if (next_instance &&
2093 next_instance->instance == instance)
2094 next_instance = NULL;
2096 err = hci_remove_adv_instance(hdev, instance);
2098 mgmt_advertising_removed(sk, hdev, instance);
2102 if (!req || !hdev_is_powered(hdev) ||
2103 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2107 __hci_req_schedule_adv_instance(req, next_instance->instance,
2111 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2113 struct hci_dev *hdev = req->hdev;
2115 /* If we're advertising or initiating an LE connection we can't
2116 * go ahead and change the random address at this time. This is
2117 * because the eventual initiator address used for the
2118 * subsequently created connection will be undefined (some
2119 * controllers use the new address and others the one we had
2120 * when the operation started).
2122 * In this kind of scenario skip the update and let the random
2123 * address be updated at the next cycle.
2125 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2126 hci_lookup_le_connect(hdev)) {
2127 BT_DBG("Deferring random address update");
2128 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2132 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2135 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2136 bool use_rpa, u8 *own_addr_type)
2138 struct hci_dev *hdev = req->hdev;
2141 /* If privacy is enabled use a resolvable private address. If
2142 * current RPA has expired or there is something else than
2143 * the current RPA in use, then generate a new one.
2148 *own_addr_type = ADDR_LE_DEV_RANDOM;
2150 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2151 !bacmp(&hdev->random_addr, &hdev->rpa))
2154 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2156 bt_dev_err(hdev, "failed to generate new RPA");
2160 set_random_addr(req, &hdev->rpa);
2162 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2163 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2168 /* In case of required privacy without resolvable private address,
2169 * use an non-resolvable private address. This is useful for active
2170 * scanning and non-connectable advertising.
2172 if (require_privacy) {
2176 /* The non-resolvable private address is generated
2177 * from random six bytes with the two most significant
2180 get_random_bytes(&nrpa, 6);
2183 /* The non-resolvable private address shall not be
2184 * equal to the public address.
2186 if (bacmp(&hdev->bdaddr, &nrpa))
2190 *own_addr_type = ADDR_LE_DEV_RANDOM;
2191 set_random_addr(req, &nrpa);
2195 /* If forcing static address is in use or there is no public
2196 * address use the static address as random address (but skip
2197 * the HCI command if the current random address is already the
2200 * In case BR/EDR has been disabled on a dual-mode controller
2201 * and a static address has been configured, then use that
2202 * address instead of the public BR/EDR address.
2204 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2205 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2206 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2207 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2208 *own_addr_type = ADDR_LE_DEV_RANDOM;
2209 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2210 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2211 &hdev->static_addr);
2215 /* Neither privacy nor static address is being used so use a
2218 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2223 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2225 struct bdaddr_list *b;
2227 list_for_each_entry(b, &hdev->whitelist, list) {
2228 struct hci_conn *conn;
2230 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2234 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2241 void __hci_req_update_scan(struct hci_request *req)
2243 struct hci_dev *hdev = req->hdev;
2246 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2249 if (!hdev_is_powered(hdev))
2252 if (mgmt_powering_down(hdev))
2255 if (hdev->scanning_paused)
2258 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2259 disconnected_whitelist_entries(hdev))
2262 scan = SCAN_DISABLED;
2264 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2265 scan |= SCAN_INQUIRY;
2267 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2268 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2271 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2274 static int update_scan(struct hci_request *req, unsigned long opt)
2276 hci_dev_lock(req->hdev);
2277 __hci_req_update_scan(req);
2278 hci_dev_unlock(req->hdev);
2282 static void scan_update_work(struct work_struct *work)
2284 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2286 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2289 static int connectable_update(struct hci_request *req, unsigned long opt)
2291 struct hci_dev *hdev = req->hdev;
2295 __hci_req_update_scan(req);
2297 /* If BR/EDR is not enabled and we disable advertising as a
2298 * by-product of disabling connectable, we need to update the
2299 * advertising flags.
2301 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2302 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2304 /* Update the advertising parameters if necessary */
2305 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2306 !list_empty(&hdev->adv_instances)) {
2307 if (ext_adv_capable(hdev))
2308 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2310 __hci_req_enable_advertising(req);
2313 __hci_update_background_scan(req);
2315 hci_dev_unlock(hdev);
2320 static void connectable_update_work(struct work_struct *work)
2322 struct hci_dev *hdev = container_of(work, struct hci_dev,
2323 connectable_update);
2326 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2327 mgmt_set_connectable_complete(hdev, status);
2330 static u8 get_service_classes(struct hci_dev *hdev)
2332 struct bt_uuid *uuid;
2335 list_for_each_entry(uuid, &hdev->uuids, list)
2336 val |= uuid->svc_hint;
2341 void __hci_req_update_class(struct hci_request *req)
2343 struct hci_dev *hdev = req->hdev;
2346 BT_DBG("%s", hdev->name);
2348 if (!hdev_is_powered(hdev))
2351 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2354 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2357 cod[0] = hdev->minor_class;
2358 cod[1] = hdev->major_class;
2359 cod[2] = get_service_classes(hdev);
2361 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2364 if (memcmp(cod, hdev->dev_class, 3) == 0)
2367 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2370 static void write_iac(struct hci_request *req)
2372 struct hci_dev *hdev = req->hdev;
2373 struct hci_cp_write_current_iac_lap cp;
2375 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2378 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2379 /* Limited discoverable mode */
2380 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2381 cp.iac_lap[0] = 0x00; /* LIAC */
2382 cp.iac_lap[1] = 0x8b;
2383 cp.iac_lap[2] = 0x9e;
2384 cp.iac_lap[3] = 0x33; /* GIAC */
2385 cp.iac_lap[4] = 0x8b;
2386 cp.iac_lap[5] = 0x9e;
2388 /* General discoverable mode */
2390 cp.iac_lap[0] = 0x33; /* GIAC */
2391 cp.iac_lap[1] = 0x8b;
2392 cp.iac_lap[2] = 0x9e;
2395 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2396 (cp.num_iac * 3) + 1, &cp);
2399 static int discoverable_update(struct hci_request *req, unsigned long opt)
2401 struct hci_dev *hdev = req->hdev;
2405 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2407 __hci_req_update_scan(req);
2408 __hci_req_update_class(req);
2411 /* Advertising instances don't use the global discoverable setting, so
2412 * only update AD if advertising was enabled using Set Advertising.
2414 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2415 __hci_req_update_adv_data(req, 0x00);
2417 /* Discoverable mode affects the local advertising
2418 * address in limited privacy mode.
2420 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2421 if (ext_adv_capable(hdev))
2422 __hci_req_start_ext_adv(req, 0x00);
2424 __hci_req_enable_advertising(req);
2428 hci_dev_unlock(hdev);
2433 static void discoverable_update_work(struct work_struct *work)
2435 struct hci_dev *hdev = container_of(work, struct hci_dev,
2436 discoverable_update);
2439 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2440 mgmt_set_discoverable_complete(hdev, status);
2443 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2446 switch (conn->state) {
2449 if (conn->type == AMP_LINK) {
2450 struct hci_cp_disconn_phy_link cp;
2452 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2454 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2457 struct hci_cp_disconnect dc;
2459 dc.handle = cpu_to_le16(conn->handle);
2461 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2464 conn->state = BT_DISCONN;
2468 if (conn->type == LE_LINK) {
2469 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2471 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2473 } else if (conn->type == ACL_LINK) {
2474 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2476 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2481 if (conn->type == ACL_LINK) {
2482 struct hci_cp_reject_conn_req rej;
2484 bacpy(&rej.bdaddr, &conn->dst);
2485 rej.reason = reason;
2487 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2489 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2490 struct hci_cp_reject_sync_conn_req rej;
2492 bacpy(&rej.bdaddr, &conn->dst);
2494 /* SCO rejection has its own limited set of
2495 * allowed error values (0x0D-0x0F) which isn't
2496 * compatible with most values passed to this
2497 * function. To be safe hard-code one of the
2498 * values that's suitable for SCO.
2500 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2502 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2507 conn->state = BT_CLOSED;
2512 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2515 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2518 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2520 struct hci_request req;
2523 hci_req_init(&req, conn->hdev);
2525 __hci_abort_conn(&req, conn, reason);
2527 err = hci_req_run(&req, abort_conn_complete);
2528 if (err && err != -ENODATA) {
2529 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2536 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2538 hci_dev_lock(req->hdev);
2539 __hci_update_background_scan(req);
2540 hci_dev_unlock(req->hdev);
2544 static void bg_scan_update(struct work_struct *work)
2546 struct hci_dev *hdev = container_of(work, struct hci_dev,
2548 struct hci_conn *conn;
2552 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2558 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2560 hci_le_conn_failed(conn, status);
2562 hci_dev_unlock(hdev);
2565 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2567 hci_req_add_le_scan_disable(req);
2571 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2574 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2575 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2576 struct hci_cp_inquiry cp;
2578 BT_DBG("%s", req->hdev->name);
2580 hci_dev_lock(req->hdev);
2581 hci_inquiry_cache_flush(req->hdev);
2582 hci_dev_unlock(req->hdev);
2584 memset(&cp, 0, sizeof(cp));
2586 if (req->hdev->discovery.limited)
2587 memcpy(&cp.lap, liac, sizeof(cp.lap));
2589 memcpy(&cp.lap, giac, sizeof(cp.lap));
2593 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2598 static void le_scan_disable_work(struct work_struct *work)
2600 struct hci_dev *hdev = container_of(work, struct hci_dev,
2601 le_scan_disable.work);
2604 BT_DBG("%s", hdev->name);
2606 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2609 cancel_delayed_work(&hdev->le_scan_restart);
2611 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2613 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2618 hdev->discovery.scan_start = 0;
2620 /* If we were running LE only scan, change discovery state. If
2621 * we were running both LE and BR/EDR inquiry simultaneously,
2622 * and BR/EDR inquiry is already finished, stop discovery,
2623 * otherwise BR/EDR inquiry will stop discovery when finished.
2624 * If we will resolve remote device name, do not change
2628 if (hdev->discovery.type == DISCOV_TYPE_LE)
2629 goto discov_stopped;
2631 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2634 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2635 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2636 hdev->discovery.state != DISCOVERY_RESOLVING)
2637 goto discov_stopped;
2642 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2643 HCI_CMD_TIMEOUT, &status);
2645 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2646 goto discov_stopped;
2653 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2654 hci_dev_unlock(hdev);
2657 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2659 struct hci_dev *hdev = req->hdev;
2661 /* If controller is not scanning we are done. */
2662 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2665 if (hdev->scanning_paused) {
2666 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2670 hci_req_add_le_scan_disable(req);
2672 if (use_ext_scan(hdev)) {
2673 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2675 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2676 ext_enable_cp.enable = LE_SCAN_ENABLE;
2677 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2679 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2680 sizeof(ext_enable_cp), &ext_enable_cp);
2682 struct hci_cp_le_set_scan_enable cp;
2684 memset(&cp, 0, sizeof(cp));
2685 cp.enable = LE_SCAN_ENABLE;
2686 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2687 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2693 static void le_scan_restart_work(struct work_struct *work)
2695 struct hci_dev *hdev = container_of(work, struct hci_dev,
2696 le_scan_restart.work);
2697 unsigned long timeout, duration, scan_start, now;
2700 BT_DBG("%s", hdev->name);
2702 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2704 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2711 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2712 !hdev->discovery.scan_start)
2715 /* When the scan was started, hdev->le_scan_disable has been queued
2716 * after duration from scan_start. During scan restart this job
2717 * has been canceled, and we need to queue it again after proper
2718 * timeout, to make sure that scan does not run indefinitely.
2720 duration = hdev->discovery.scan_duration;
2721 scan_start = hdev->discovery.scan_start;
2723 if (now - scan_start <= duration) {
2726 if (now >= scan_start)
2727 elapsed = now - scan_start;
2729 elapsed = ULONG_MAX - scan_start + now;
2731 timeout = duration - elapsed;
2736 queue_delayed_work(hdev->req_workqueue,
2737 &hdev->le_scan_disable, timeout);
2740 hci_dev_unlock(hdev);
2743 static int active_scan(struct hci_request *req, unsigned long opt)
2745 uint16_t interval = opt;
2746 struct hci_dev *hdev = req->hdev;
2748 /* White list is not used for discovery */
2749 u8 filter_policy = 0x00;
2752 BT_DBG("%s", hdev->name);
2754 /* If controller is scanning, it means the background scanning is
2755 * running. Thus, we should temporarily stop it in order to set the
2756 * discovery scanning parameters.
2758 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2759 hci_req_add_le_scan_disable(req);
2761 /* All active scans will be done with either a resolvable private
2762 * address (when privacy feature has been enabled) or non-resolvable
2765 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2768 own_addr_type = ADDR_LE_DEV_PUBLIC;
2770 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2771 hdev->le_scan_window_discovery, own_addr_type,
2776 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2780 BT_DBG("%s", req->hdev->name);
2782 err = active_scan(req, opt);
2786 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2789 static void start_discovery(struct hci_dev *hdev, u8 *status)
2791 unsigned long timeout;
2793 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2795 switch (hdev->discovery.type) {
2796 case DISCOV_TYPE_BREDR:
2797 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2798 hci_req_sync(hdev, bredr_inquiry,
2799 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2802 case DISCOV_TYPE_INTERLEAVED:
2803 /* When running simultaneous discovery, the LE scanning time
2804 * should occupy the whole discovery time sine BR/EDR inquiry
2805 * and LE scanning are scheduled by the controller.
2807 * For interleaving discovery in comparison, BR/EDR inquiry
2808 * and LE scanning are done sequentially with separate
2811 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2813 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2814 /* During simultaneous discovery, we double LE scan
2815 * interval. We must leave some time for the controller
2816 * to do BR/EDR inquiry.
2818 hci_req_sync(hdev, interleaved_discov,
2819 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2824 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2825 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2826 HCI_CMD_TIMEOUT, status);
2828 case DISCOV_TYPE_LE:
2829 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2830 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2831 HCI_CMD_TIMEOUT, status);
2834 *status = HCI_ERROR_UNSPECIFIED;
2841 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2843 /* When service discovery is used and the controller has a
2844 * strict duplicate filter, it is important to remember the
2845 * start and duration of the scan. This is required for
2846 * restarting scanning during the discovery phase.
2848 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2849 hdev->discovery.result_filtering) {
2850 hdev->discovery.scan_start = jiffies;
2851 hdev->discovery.scan_duration = timeout;
2854 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2858 bool hci_req_stop_discovery(struct hci_request *req)
2860 struct hci_dev *hdev = req->hdev;
2861 struct discovery_state *d = &hdev->discovery;
2862 struct hci_cp_remote_name_req_cancel cp;
2863 struct inquiry_entry *e;
2866 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2868 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2869 if (test_bit(HCI_INQUIRY, &hdev->flags))
2870 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2872 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2873 cancel_delayed_work(&hdev->le_scan_disable);
2874 hci_req_add_le_scan_disable(req);
2879 /* Passive scanning */
2880 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2881 hci_req_add_le_scan_disable(req);
2886 /* No further actions needed for LE-only discovery */
2887 if (d->type == DISCOV_TYPE_LE)
2890 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2891 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2896 bacpy(&cp.bdaddr, &e->data.bdaddr);
2897 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2905 static int stop_discovery(struct hci_request *req, unsigned long opt)
2907 hci_dev_lock(req->hdev);
2908 hci_req_stop_discovery(req);
2909 hci_dev_unlock(req->hdev);
2914 static void discov_update(struct work_struct *work)
2916 struct hci_dev *hdev = container_of(work, struct hci_dev,
2920 switch (hdev->discovery.state) {
2921 case DISCOVERY_STARTING:
2922 start_discovery(hdev, &status);
2923 mgmt_start_discovery_complete(hdev, status);
2925 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2927 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2929 case DISCOVERY_STOPPING:
2930 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2931 mgmt_stop_discovery_complete(hdev, status);
2933 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2935 case DISCOVERY_STOPPED:
2941 static void discov_off(struct work_struct *work)
2943 struct hci_dev *hdev = container_of(work, struct hci_dev,
2946 BT_DBG("%s", hdev->name);
2950 /* When discoverable timeout triggers, then just make sure
2951 * the limited discoverable flag is cleared. Even in the case
2952 * of a timeout triggered from general discoverable, it is
2953 * safe to unconditionally clear the flag.
2955 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2956 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2957 hdev->discov_timeout = 0;
2959 hci_dev_unlock(hdev);
2961 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2962 mgmt_new_settings(hdev);
2965 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2967 struct hci_dev *hdev = req->hdev;
2972 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2973 !lmp_host_ssp_capable(hdev)) {
2976 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2978 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2981 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2982 sizeof(support), &support);
2986 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2987 lmp_bredr_capable(hdev)) {
2988 struct hci_cp_write_le_host_supported cp;
2993 /* Check first if we already have the right
2994 * host state (host features set)
2996 if (cp.le != lmp_host_le_capable(hdev) ||
2997 cp.simul != lmp_host_le_br_capable(hdev))
2998 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3002 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3003 /* Make sure the controller has a good default for
3004 * advertising data. This also applies to the case
3005 * where BR/EDR was toggled during the AUTO_OFF phase.
3007 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3008 list_empty(&hdev->adv_instances)) {
3011 if (ext_adv_capable(hdev)) {
3012 err = __hci_req_setup_ext_adv_instance(req,
3015 __hci_req_update_scan_rsp_data(req,
3019 __hci_req_update_adv_data(req, 0x00);
3020 __hci_req_update_scan_rsp_data(req, 0x00);
3023 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3024 if (!ext_adv_capable(hdev))
3025 __hci_req_enable_advertising(req);
3027 __hci_req_enable_ext_advertising(req,
3030 } else if (!list_empty(&hdev->adv_instances)) {
3031 struct adv_info *adv_instance;
3033 adv_instance = list_first_entry(&hdev->adv_instances,
3034 struct adv_info, list);
3035 __hci_req_schedule_adv_instance(req,
3036 adv_instance->instance,
3041 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3042 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3043 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3044 sizeof(link_sec), &link_sec);
3046 if (lmp_bredr_capable(hdev)) {
3047 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3048 __hci_req_write_fast_connectable(req, true);
3050 __hci_req_write_fast_connectable(req, false);
3051 __hci_req_update_scan(req);
3052 __hci_req_update_class(req);
3053 __hci_req_update_name(req);
3054 __hci_req_update_eir(req);
3057 hci_dev_unlock(hdev);
3061 int __hci_req_hci_power_on(struct hci_dev *hdev)
3063 /* Register the available SMP channels (BR/EDR and LE) only when
3064 * successfully powering on the controller. This late
3065 * registration is required so that LE SMP can clearly decide if
3066 * the public address or static address is used.
3070 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3074 void hci_request_setup(struct hci_dev *hdev)
3076 INIT_WORK(&hdev->discov_update, discov_update);
3077 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3078 INIT_WORK(&hdev->scan_update, scan_update_work);
3079 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3080 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3081 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3082 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3083 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3084 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3087 void hci_request_cancel_all(struct hci_dev *hdev)
3089 hci_req_sync_cancel(hdev, ENODEV);
3091 cancel_work_sync(&hdev->discov_update);
3092 cancel_work_sync(&hdev->bg_scan_update);
3093 cancel_work_sync(&hdev->scan_update);
3094 cancel_work_sync(&hdev->connectable_update);
3095 cancel_work_sync(&hdev->discoverable_update);
3096 cancel_delayed_work_sync(&hdev->discov_off);
3097 cancel_delayed_work_sync(&hdev->le_scan_disable);
3098 cancel_delayed_work_sync(&hdev->le_scan_restart);
3100 if (hdev->adv_instance_timeout) {
3101 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3102 hdev->adv_instance_timeout = 0;