2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
52 struct hci_dev *hdev = req->hdev;
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
62 skb_queue_purge(&req->cmd_q);
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req->cmd_q))
70 skb = skb_peek_tail(&req->cmd_q);
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
82 queue_work(hdev->workqueue, &hdev->cmd_work);
87 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
89 return req_run(req, complete, NULL);
92 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
94 return req_run(req, NULL, complete);
97 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
111 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
122 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
125 struct hci_request req;
129 BT_DBG("%s", hdev->name);
131 hci_req_init(&req, hdev);
133 hci_req_add_ev(&req, opcode, plen, param, event);
135 hdev->req_status = HCI_REQ_PEND;
137 err = hci_req_run_skb(&req, hci_req_sync_complete);
141 err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 hdev->req_status != HCI_REQ_PEND, timeout);
144 if (err == -ERESTARTSYS)
145 return ERR_PTR(-EINTR);
147 switch (hdev->req_status) {
149 err = -bt_to_errno(hdev->req_result);
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
161 hdev->req_status = hdev->req_result = 0;
163 hdev->req_skb = NULL;
165 BT_DBG("%s end: err %d", hdev->name, err);
173 return ERR_PTR(-ENODATA);
177 EXPORT_SYMBOL(__hci_cmd_sync_ev);
179 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184 EXPORT_SYMBOL(__hci_cmd_sync);
186 /* Execute request and wait for completion. */
187 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt, u32 timeout, u8 *hci_status)
191 struct hci_request req;
194 BT_DBG("%s start", hdev->name);
196 hci_req_init(&req, hdev);
198 hdev->req_status = HCI_REQ_PEND;
200 err = func(&req, opt);
203 *hci_status = HCI_ERROR_UNSPECIFIED;
207 err = hci_req_run_skb(&req, hci_req_sync_complete);
209 hdev->req_status = 0;
211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
216 if (err == -ENODATA) {
223 *hci_status = HCI_ERROR_UNSPECIFIED;
228 err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 hdev->req_status != HCI_REQ_PEND, timeout);
231 if (err == -ERESTARTSYS)
234 switch (hdev->req_status) {
236 err = -bt_to_errno(hdev->req_result);
238 *hci_status = hdev->req_result;
241 case HCI_REQ_CANCELED:
242 err = -hdev->req_result;
244 *hci_status = HCI_ERROR_UNSPECIFIED;
250 *hci_status = HCI_ERROR_UNSPECIFIED;
254 kfree_skb(hdev->req_skb);
255 hdev->req_skb = NULL;
256 hdev->req_status = hdev->req_result = 0;
258 BT_DBG("%s end: err %d", hdev->name, err);
263 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
265 unsigned long opt, u32 timeout, u8 *hci_status)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
273 hci_req_sync_lock(hdev);
274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
275 hci_req_sync_unlock(hdev);
280 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
291 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
292 hdr->opcode = cpu_to_le16(opcode);
296 skb_put_data(skb, param, plen);
298 BT_DBG("skb len %d", skb->len);
300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
306 /* Queue a command to an asynchronous HCI request */
307 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
310 struct hci_dev *hdev = req->hdev;
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
323 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 if (skb_queue_empty(&req->cmd_q))
330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
332 bt_cb(skb)->hci.req_event = event;
334 skb_queue_tail(&req->cmd_q, skb);
337 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
340 hci_req_add_ev(req, opcode, plen, param, 0);
343 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
345 struct hci_dev *hdev = req->hdev;
346 struct hci_cp_write_page_scan_activity acp;
349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
356 type = PAGE_SCAN_TYPE_INTERLACED;
358 /* 160 msec page scan interval */
359 acp.interval = cpu_to_le16(0x0100);
361 type = PAGE_SCAN_TYPE_STANDARD; /* default */
363 /* default 1.28 sec page scan */
364 acp.interval = cpu_to_le16(0x0800);
367 acp.window = cpu_to_le16(0x0012);
369 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
374 if (hdev->page_scan_type != type)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
378 /* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
382 * This function requires the caller holds hdev->lock.
384 static void __hci_update_background_scan(struct hci_request *req)
386 struct hci_dev *hdev = req->hdev;
388 if (!test_bit(HCI_UP, &hdev->flags) ||
389 test_bit(HCI_INIT, &hdev->flags) ||
390 hci_dev_test_flag(hdev, HCI_SETUP) ||
391 hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 hci_dev_test_flag(hdev, HCI_UNREGISTER))
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
400 /* If discovery is active don't interfere with it */
401 if (hdev->discovery.state != DISCOVERY_STOPPED)
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
411 hci_discovery_filter_clear(hdev);
413 if (list_empty(&hdev->pend_le_conns) &&
414 list_empty(&hdev->pend_le_reports)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
424 hci_req_add_le_scan_disable(req);
426 BT_DBG("%s stopping background scanning", hdev->name);
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
436 if (hci_lookup_le_connect(hdev))
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
442 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 hci_req_add_le_scan_disable(req);
445 hci_req_add_le_passive_scan(req);
447 BT_DBG("%s starting background scanning", hdev->name);
451 void __hci_req_update_name(struct hci_request *req)
453 struct hci_dev *hdev = req->hdev;
454 struct hci_cp_write_local_name cp;
456 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
458 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
461 #define PNP_INFO_SVCLASS_ID 0x1200
463 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
471 list_for_each_entry(uuid, &hdev->uuids, list) {
474 if (uuid->size != 16)
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 uuids_start[1] = EIR_UUID16_ALL;
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
505 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
520 uuids_start[1] = EIR_UUID32_ALL;
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
532 uuids_start[0] += sizeof(u32);
538 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
553 uuids_start[1] = EIR_UUID128_ALL;
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
563 memcpy(ptr, uuid->uuid, 16);
565 uuids_start[0] += 16;
571 static void create_eir(struct hci_dev *hdev, u8 *data)
576 name_len = strlen(hdev->dev_name);
582 ptr[1] = EIR_NAME_SHORT;
584 ptr[1] = EIR_NAME_COMPLETE;
586 /* EIR Data length */
587 ptr[0] = name_len + 1;
589 memcpy(ptr + 2, hdev->dev_name, name_len);
591 ptr += (name_len + 2);
594 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
596 ptr[1] = EIR_TX_POWER;
597 ptr[2] = (u8) hdev->inq_tx_power;
602 if (hdev->devid_source > 0) {
604 ptr[1] = EIR_DEVICE_ID;
606 put_unaligned_le16(hdev->devid_source, ptr + 2);
607 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 put_unaligned_le16(hdev->devid_product, ptr + 6);
609 put_unaligned_le16(hdev->devid_version, ptr + 8);
614 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
619 void __hci_req_update_eir(struct hci_request *req)
621 struct hci_dev *hdev = req->hdev;
622 struct hci_cp_write_eir cp;
624 if (!hdev_is_powered(hdev))
627 if (!lmp_ext_inq_capable(hdev))
630 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
633 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
636 memset(&cp, 0, sizeof(cp));
638 create_eir(hdev, cp.data);
640 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
643 memcpy(hdev->eir, cp.data, sizeof(cp.data));
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
648 void hci_req_add_le_scan_disable(struct hci_request *req)
650 struct hci_cp_le_set_scan_enable cp;
652 memset(&cp, 0, sizeof(cp));
653 cp.enable = LE_SCAN_DISABLE;
654 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
657 static void add_to_white_list(struct hci_request *req,
658 struct hci_conn_params *params)
660 struct hci_cp_le_add_to_white_list cp;
662 cp.bdaddr_type = params->addr_type;
663 bacpy(&cp.bdaddr, ¶ms->addr);
665 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
668 static u8 update_white_list(struct hci_request *req)
670 struct hci_dev *hdev = req->hdev;
671 struct hci_conn_params *params;
672 struct bdaddr_list *b;
673 uint8_t white_list_entries = 0;
675 /* Go through the current white list programmed into the
676 * controller one by one and check if that address is still
677 * in the list of pending connections or list of devices to
678 * report. If not present in either list, then queue the
679 * command to remove it from the controller.
681 list_for_each_entry(b, &hdev->le_white_list, list) {
682 /* If the device is neither in pend_le_conns nor
683 * pend_le_reports then remove it from the whitelist.
685 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
686 &b->bdaddr, b->bdaddr_type) &&
687 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
688 &b->bdaddr, b->bdaddr_type)) {
689 struct hci_cp_le_del_from_white_list cp;
691 cp.bdaddr_type = b->bdaddr_type;
692 bacpy(&cp.bdaddr, &b->bdaddr);
694 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
699 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
700 /* White list can not be used with RPAs */
704 white_list_entries++;
707 /* Since all no longer valid white list entries have been
708 * removed, walk through the list of pending connections
709 * and ensure that any new device gets programmed into
712 * If the list of the devices is larger than the list of
713 * available white list entries in the controller, then
714 * just abort and return filer policy value to not use the
717 list_for_each_entry(params, &hdev->pend_le_conns, action) {
718 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
719 ¶ms->addr, params->addr_type))
722 if (white_list_entries >= hdev->le_white_list_size) {
723 /* Select filter policy to accept all advertising */
727 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
728 params->addr_type)) {
729 /* White list can not be used with RPAs */
733 white_list_entries++;
734 add_to_white_list(req, params);
737 /* After adding all new pending connections, walk through
738 * the list of pending reports and also add these to the
739 * white list if there is still space.
741 list_for_each_entry(params, &hdev->pend_le_reports, action) {
742 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
743 ¶ms->addr, params->addr_type))
746 if (white_list_entries >= hdev->le_white_list_size) {
747 /* Select filter policy to accept all advertising */
751 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
752 params->addr_type)) {
753 /* White list can not be used with RPAs */
757 white_list_entries++;
758 add_to_white_list(req, params);
761 /* Select filter policy to use white list */
765 static bool scan_use_rpa(struct hci_dev *hdev)
767 return hci_dev_test_flag(hdev, HCI_PRIVACY);
770 void hci_req_add_le_passive_scan(struct hci_request *req)
772 struct hci_cp_le_set_scan_param param_cp;
773 struct hci_cp_le_set_scan_enable enable_cp;
774 struct hci_dev *hdev = req->hdev;
778 /* Set require_privacy to false since no SCAN_REQ are send
779 * during passive scanning. Not using an non-resolvable address
780 * here is important so that peer devices using direct
781 * advertising with our address will be correctly reported
784 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
788 /* Adding or removing entries from the white list must
789 * happen before enabling scanning. The controller does
790 * not allow white list modification while scanning.
792 filter_policy = update_white_list(req);
794 /* When the controller is using random resolvable addresses and
795 * with that having LE privacy enabled, then controllers with
796 * Extended Scanner Filter Policies support can now enable support
797 * for handling directed advertising.
799 * So instead of using filter polices 0x00 (no whitelist)
800 * and 0x01 (whitelist enabled) use the new filter policies
801 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
803 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
804 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
805 filter_policy |= 0x02;
807 memset(¶m_cp, 0, sizeof(param_cp));
808 param_cp.type = LE_SCAN_PASSIVE;
809 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
810 param_cp.window = cpu_to_le16(hdev->le_scan_window);
811 param_cp.own_address_type = own_addr_type;
812 param_cp.filter_policy = filter_policy;
813 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
816 memset(&enable_cp, 0, sizeof(enable_cp));
817 enable_cp.enable = LE_SCAN_ENABLE;
818 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
819 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
823 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
825 u8 instance = hdev->cur_adv_instance;
826 struct adv_info *adv_instance;
828 /* Ignore instance 0 */
829 if (instance == 0x00)
832 adv_instance = hci_find_adv_instance(hdev, instance);
836 /* TODO: Take into account the "appearance" and "local-name" flags here.
837 * These are currently being ignored as they are not supported.
839 return adv_instance->scan_rsp_len;
842 void __hci_req_disable_advertising(struct hci_request *req)
846 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
849 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
852 struct adv_info *adv_instance;
854 if (instance == 0x00) {
855 /* Instance 0 always manages the "Tx Power" and "Flags"
858 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
860 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
861 * corresponds to the "connectable" instance flag.
863 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
864 flags |= MGMT_ADV_FLAG_CONNECTABLE;
866 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
867 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
868 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
869 flags |= MGMT_ADV_FLAG_DISCOV;
874 adv_instance = hci_find_adv_instance(hdev, instance);
876 /* Return 0 when we got an invalid instance identifier. */
880 return adv_instance->flags;
883 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
885 /* If privacy is not enabled don't use RPA */
886 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
889 /* If basic privacy mode is enabled use RPA */
890 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
893 /* If limited privacy mode is enabled don't use RPA if we're
894 * both discoverable and bondable.
896 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
897 hci_dev_test_flag(hdev, HCI_BONDABLE))
900 /* We're neither bondable nor discoverable in the limited
901 * privacy mode, therefore use RPA.
906 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
908 /* If there is no connection we are OK to advertise. */
909 if (hci_conn_num(hdev, LE_LINK) == 0)
912 /* Check le_states if there is any connection in slave role. */
913 if (hdev->conn_hash.le_num_slave > 0) {
914 /* Slave connection state and non connectable mode bit 20. */
915 if (!connectable && !(hdev->le_states[2] & 0x10))
918 /* Slave connection state and connectable mode bit 38
919 * and scannable bit 21.
921 if (connectable && (!(hdev->le_states[4] & 0x40) ||
922 !(hdev->le_states[2] & 0x20)))
926 /* Check le_states if there is any connection in master role. */
927 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
928 /* Master connection state and non connectable mode bit 18. */
929 if (!connectable && !(hdev->le_states[2] & 0x02))
932 /* Master connection state and connectable mode bit 35 and
935 if (connectable && (!(hdev->le_states[4] & 0x08) ||
936 !(hdev->le_states[2] & 0x08)))
943 void __hci_req_enable_advertising(struct hci_request *req)
945 struct hci_dev *hdev = req->hdev;
946 struct hci_cp_le_set_adv_param cp;
947 u8 own_addr_type, enable = 0x01;
951 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
953 /* If the "connectable" instance flag was not set, then choose between
954 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
956 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
957 mgmt_get_connectable(hdev);
959 if (!is_advertising_allowed(hdev, connectable))
962 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
963 __hci_req_disable_advertising(req);
965 /* Clear the HCI_LE_ADV bit temporarily so that the
966 * hci_update_random_address knows that it's safe to go ahead
967 * and write a new random address. The flag will be set back on
968 * as soon as the SET_ADV_ENABLE HCI command completes.
970 hci_dev_clear_flag(hdev, HCI_LE_ADV);
972 /* Set require_privacy to true only when non-connectable
973 * advertising is used. In that case it is fine to use a
974 * non-resolvable private address.
976 if (hci_update_random_address(req, !connectable,
977 adv_use_rpa(hdev, flags),
981 memset(&cp, 0, sizeof(cp));
982 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
983 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
986 cp.type = LE_ADV_IND;
987 else if (get_cur_adv_instance_scan_rsp_len(hdev))
988 cp.type = LE_ADV_SCAN_IND;
990 cp.type = LE_ADV_NONCONN_IND;
992 cp.own_address_type = own_addr_type;
993 cp.channel_map = hdev->le_adv_channel_map;
995 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
997 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1000 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1003 size_t complete_len;
1005 /* no space left for name (+ NULL + type + len) */
1006 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1009 /* use complete name if present and fits */
1010 complete_len = strlen(hdev->dev_name);
1011 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1012 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1013 hdev->dev_name, complete_len + 1);
1015 /* use short name if present */
1016 short_len = strlen(hdev->short_name);
1018 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1019 hdev->short_name, short_len + 1);
1021 /* use shortened full name if present, we already know that name
1022 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1025 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1027 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1028 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1030 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1037 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1039 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1042 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1044 u8 scan_rsp_len = 0;
1046 if (hdev->appearance) {
1047 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1050 return append_local_name(hdev, ptr, scan_rsp_len);
1053 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1056 struct adv_info *adv_instance;
1058 u8 scan_rsp_len = 0;
1060 adv_instance = hci_find_adv_instance(hdev, instance);
1064 instance_flags = adv_instance->flags;
1066 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1067 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1070 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1071 adv_instance->scan_rsp_len);
1073 scan_rsp_len += adv_instance->scan_rsp_len;
1075 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1076 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1078 return scan_rsp_len;
1081 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1083 struct hci_dev *hdev = req->hdev;
1084 struct hci_cp_le_set_scan_rsp_data cp;
1087 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1090 memset(&cp, 0, sizeof(cp));
1093 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1095 len = create_default_scan_rsp_data(hdev, cp.data);
1097 if (hdev->scan_rsp_data_len == len &&
1098 !memcmp(cp.data, hdev->scan_rsp_data, len))
1101 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1102 hdev->scan_rsp_data_len = len;
1106 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1109 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1111 struct adv_info *adv_instance = NULL;
1112 u8 ad_len = 0, flags = 0;
1115 /* Return 0 when the current instance identifier is invalid. */
1117 adv_instance = hci_find_adv_instance(hdev, instance);
1122 instance_flags = get_adv_instance_flags(hdev, instance);
1124 /* The Add Advertising command allows userspace to set both the general
1125 * and limited discoverable flags.
1127 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1128 flags |= LE_AD_GENERAL;
1130 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1131 flags |= LE_AD_LIMITED;
1133 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1134 flags |= LE_AD_NO_BREDR;
1136 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1137 /* If a discovery flag wasn't provided, simply use the global
1141 flags |= mgmt_get_adv_discov_flags(hdev);
1143 /* If flags would still be empty, then there is no need to
1144 * include the "Flags" AD field".
1157 memcpy(ptr, adv_instance->adv_data,
1158 adv_instance->adv_data_len);
1159 ad_len += adv_instance->adv_data_len;
1160 ptr += adv_instance->adv_data_len;
1163 /* Provide Tx Power only if we can provide a valid value for it */
1164 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1165 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1167 ptr[1] = EIR_TX_POWER;
1168 ptr[2] = (u8)hdev->adv_tx_power;
1177 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1179 struct hci_dev *hdev = req->hdev;
1180 struct hci_cp_le_set_adv_data cp;
1183 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186 memset(&cp, 0, sizeof(cp));
1188 len = create_instance_adv_data(hdev, instance, cp.data);
1190 /* There's nothing to do if the data hasn't changed */
1191 if (hdev->adv_data_len == len &&
1192 memcmp(cp.data, hdev->adv_data, len) == 0)
1195 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1196 hdev->adv_data_len = len;
1200 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1203 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1205 struct hci_request req;
1207 hci_req_init(&req, hdev);
1208 __hci_req_update_adv_data(&req, instance);
1210 return hci_req_run(&req, NULL);
1213 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1215 BT_DBG("%s status %u", hdev->name, status);
1218 void hci_req_reenable_advertising(struct hci_dev *hdev)
1220 struct hci_request req;
1222 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1223 list_empty(&hdev->adv_instances))
1226 hci_req_init(&req, hdev);
1228 if (hdev->cur_adv_instance) {
1229 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1232 __hci_req_update_adv_data(&req, 0x00);
1233 __hci_req_update_scan_rsp_data(&req, 0x00);
1234 __hci_req_enable_advertising(&req);
1237 hci_req_run(&req, adv_enable_complete);
1240 static void adv_timeout_expire(struct work_struct *work)
1242 struct hci_dev *hdev = container_of(work, struct hci_dev,
1243 adv_instance_expire.work);
1245 struct hci_request req;
1248 BT_DBG("%s", hdev->name);
1252 hdev->adv_instance_timeout = 0;
1254 instance = hdev->cur_adv_instance;
1255 if (instance == 0x00)
1258 hci_req_init(&req, hdev);
1260 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1262 if (list_empty(&hdev->adv_instances))
1263 __hci_req_disable_advertising(&req);
1265 hci_req_run(&req, NULL);
1268 hci_dev_unlock(hdev);
1271 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1274 struct hci_dev *hdev = req->hdev;
1275 struct adv_info *adv_instance = NULL;
1278 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1279 list_empty(&hdev->adv_instances))
1282 if (hdev->adv_instance_timeout)
1285 adv_instance = hci_find_adv_instance(hdev, instance);
1289 /* A zero timeout means unlimited advertising. As long as there is
1290 * only one instance, duration should be ignored. We still set a timeout
1291 * in case further instances are being added later on.
1293 * If the remaining lifetime of the instance is more than the duration
1294 * then the timeout corresponds to the duration, otherwise it will be
1295 * reduced to the remaining instance lifetime.
1297 if (adv_instance->timeout == 0 ||
1298 adv_instance->duration <= adv_instance->remaining_time)
1299 timeout = adv_instance->duration;
1301 timeout = adv_instance->remaining_time;
1303 /* The remaining time is being reduced unless the instance is being
1304 * advertised without time limit.
1306 if (adv_instance->timeout)
1307 adv_instance->remaining_time =
1308 adv_instance->remaining_time - timeout;
1310 hdev->adv_instance_timeout = timeout;
1311 queue_delayed_work(hdev->req_workqueue,
1312 &hdev->adv_instance_expire,
1313 msecs_to_jiffies(timeout * 1000));
1315 /* If we're just re-scheduling the same instance again then do not
1316 * execute any HCI commands. This happens when a single instance is
1319 if (!force && hdev->cur_adv_instance == instance &&
1320 hci_dev_test_flag(hdev, HCI_LE_ADV))
1323 hdev->cur_adv_instance = instance;
1324 __hci_req_update_adv_data(req, instance);
1325 __hci_req_update_scan_rsp_data(req, instance);
1326 __hci_req_enable_advertising(req);
1331 static void cancel_adv_timeout(struct hci_dev *hdev)
1333 if (hdev->adv_instance_timeout) {
1334 hdev->adv_instance_timeout = 0;
1335 cancel_delayed_work(&hdev->adv_instance_expire);
1339 /* For a single instance:
1340 * - force == true: The instance will be removed even when its remaining
1341 * lifetime is not zero.
1342 * - force == false: the instance will be deactivated but kept stored unless
1343 * the remaining lifetime is zero.
1345 * For instance == 0x00:
1346 * - force == true: All instances will be removed regardless of their timeout
1348 * - force == false: Only instances that have a timeout will be removed.
1350 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1351 struct hci_request *req, u8 instance,
1354 struct adv_info *adv_instance, *n, *next_instance = NULL;
1358 /* Cancel any timeout concerning the removed instance(s). */
1359 if (!instance || hdev->cur_adv_instance == instance)
1360 cancel_adv_timeout(hdev);
1362 /* Get the next instance to advertise BEFORE we remove
1363 * the current one. This can be the same instance again
1364 * if there is only one instance.
1366 if (instance && hdev->cur_adv_instance == instance)
1367 next_instance = hci_get_next_instance(hdev, instance);
1369 if (instance == 0x00) {
1370 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1372 if (!(force || adv_instance->timeout))
1375 rem_inst = adv_instance->instance;
1376 err = hci_remove_adv_instance(hdev, rem_inst);
1378 mgmt_advertising_removed(sk, hdev, rem_inst);
1381 adv_instance = hci_find_adv_instance(hdev, instance);
1383 if (force || (adv_instance && adv_instance->timeout &&
1384 !adv_instance->remaining_time)) {
1385 /* Don't advertise a removed instance. */
1386 if (next_instance &&
1387 next_instance->instance == instance)
1388 next_instance = NULL;
1390 err = hci_remove_adv_instance(hdev, instance);
1392 mgmt_advertising_removed(sk, hdev, instance);
1396 if (!req || !hdev_is_powered(hdev) ||
1397 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1401 __hci_req_schedule_adv_instance(req, next_instance->instance,
1405 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1407 struct hci_dev *hdev = req->hdev;
1409 /* If we're advertising or initiating an LE connection we can't
1410 * go ahead and change the random address at this time. This is
1411 * because the eventual initiator address used for the
1412 * subsequently created connection will be undefined (some
1413 * controllers use the new address and others the one we had
1414 * when the operation started).
1416 * In this kind of scenario skip the update and let the random
1417 * address be updated at the next cycle.
1419 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1420 hci_lookup_le_connect(hdev)) {
1421 BT_DBG("Deferring random address update");
1422 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1426 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1429 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1430 bool use_rpa, u8 *own_addr_type)
1432 struct hci_dev *hdev = req->hdev;
1435 /* If privacy is enabled use a resolvable private address. If
1436 * current RPA has expired or there is something else than
1437 * the current RPA in use, then generate a new one.
1442 *own_addr_type = ADDR_LE_DEV_RANDOM;
1444 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1445 !bacmp(&hdev->random_addr, &hdev->rpa))
1448 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1450 bt_dev_err(hdev, "failed to generate new RPA");
1454 set_random_addr(req, &hdev->rpa);
1456 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1457 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1462 /* In case of required privacy without resolvable private address,
1463 * use an non-resolvable private address. This is useful for active
1464 * scanning and non-connectable advertising.
1466 if (require_privacy) {
1470 /* The non-resolvable private address is generated
1471 * from random six bytes with the two most significant
1474 get_random_bytes(&nrpa, 6);
1477 /* The non-resolvable private address shall not be
1478 * equal to the public address.
1480 if (bacmp(&hdev->bdaddr, &nrpa))
1484 *own_addr_type = ADDR_LE_DEV_RANDOM;
1485 set_random_addr(req, &nrpa);
1489 /* If forcing static address is in use or there is no public
1490 * address use the static address as random address (but skip
1491 * the HCI command if the current random address is already the
1494 * In case BR/EDR has been disabled on a dual-mode controller
1495 * and a static address has been configured, then use that
1496 * address instead of the public BR/EDR address.
1498 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1499 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1500 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1501 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1502 *own_addr_type = ADDR_LE_DEV_RANDOM;
1503 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1504 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1505 &hdev->static_addr);
1509 /* Neither privacy nor static address is being used so use a
1512 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1517 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1519 struct bdaddr_list *b;
1521 list_for_each_entry(b, &hdev->whitelist, list) {
1522 struct hci_conn *conn;
1524 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1528 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1535 void __hci_req_update_scan(struct hci_request *req)
1537 struct hci_dev *hdev = req->hdev;
1540 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1543 if (!hdev_is_powered(hdev))
1546 if (mgmt_powering_down(hdev))
1549 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1550 disconnected_whitelist_entries(hdev))
1553 scan = SCAN_DISABLED;
1555 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1556 scan |= SCAN_INQUIRY;
1558 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1559 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1562 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1565 static int update_scan(struct hci_request *req, unsigned long opt)
1567 hci_dev_lock(req->hdev);
1568 __hci_req_update_scan(req);
1569 hci_dev_unlock(req->hdev);
1573 static void scan_update_work(struct work_struct *work)
1575 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1577 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1580 static int connectable_update(struct hci_request *req, unsigned long opt)
1582 struct hci_dev *hdev = req->hdev;
1586 __hci_req_update_scan(req);
1588 /* If BR/EDR is not enabled and we disable advertising as a
1589 * by-product of disabling connectable, we need to update the
1590 * advertising flags.
1592 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1593 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1595 /* Update the advertising parameters if necessary */
1596 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1597 !list_empty(&hdev->adv_instances))
1598 __hci_req_enable_advertising(req);
1600 __hci_update_background_scan(req);
1602 hci_dev_unlock(hdev);
1607 static void connectable_update_work(struct work_struct *work)
1609 struct hci_dev *hdev = container_of(work, struct hci_dev,
1610 connectable_update);
1613 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1614 mgmt_set_connectable_complete(hdev, status);
1617 static u8 get_service_classes(struct hci_dev *hdev)
1619 struct bt_uuid *uuid;
1622 list_for_each_entry(uuid, &hdev->uuids, list)
1623 val |= uuid->svc_hint;
1628 void __hci_req_update_class(struct hci_request *req)
1630 struct hci_dev *hdev = req->hdev;
1633 BT_DBG("%s", hdev->name);
1635 if (!hdev_is_powered(hdev))
1638 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1641 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1644 cod[0] = hdev->minor_class;
1645 cod[1] = hdev->major_class;
1646 cod[2] = get_service_classes(hdev);
1648 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1651 if (memcmp(cod, hdev->dev_class, 3) == 0)
1654 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1657 static void write_iac(struct hci_request *req)
1659 struct hci_dev *hdev = req->hdev;
1660 struct hci_cp_write_current_iac_lap cp;
1662 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1665 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1666 /* Limited discoverable mode */
1667 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1668 cp.iac_lap[0] = 0x00; /* LIAC */
1669 cp.iac_lap[1] = 0x8b;
1670 cp.iac_lap[2] = 0x9e;
1671 cp.iac_lap[3] = 0x33; /* GIAC */
1672 cp.iac_lap[4] = 0x8b;
1673 cp.iac_lap[5] = 0x9e;
1675 /* General discoverable mode */
1677 cp.iac_lap[0] = 0x33; /* GIAC */
1678 cp.iac_lap[1] = 0x8b;
1679 cp.iac_lap[2] = 0x9e;
1682 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1683 (cp.num_iac * 3) + 1, &cp);
1686 static int discoverable_update(struct hci_request *req, unsigned long opt)
1688 struct hci_dev *hdev = req->hdev;
1692 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1694 __hci_req_update_scan(req);
1695 __hci_req_update_class(req);
1698 /* Advertising instances don't use the global discoverable setting, so
1699 * only update AD if advertising was enabled using Set Advertising.
1701 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1702 __hci_req_update_adv_data(req, 0x00);
1704 /* Discoverable mode affects the local advertising
1705 * address in limited privacy mode.
1707 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1708 __hci_req_enable_advertising(req);
1711 hci_dev_unlock(hdev);
1716 static void discoverable_update_work(struct work_struct *work)
1718 struct hci_dev *hdev = container_of(work, struct hci_dev,
1719 discoverable_update);
1722 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1723 mgmt_set_discoverable_complete(hdev, status);
1726 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1729 switch (conn->state) {
1732 if (conn->type == AMP_LINK) {
1733 struct hci_cp_disconn_phy_link cp;
1735 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1737 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1740 struct hci_cp_disconnect dc;
1742 dc.handle = cpu_to_le16(conn->handle);
1744 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1747 conn->state = BT_DISCONN;
1751 if (conn->type == LE_LINK) {
1752 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1754 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1756 } else if (conn->type == ACL_LINK) {
1757 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1759 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1764 if (conn->type == ACL_LINK) {
1765 struct hci_cp_reject_conn_req rej;
1767 bacpy(&rej.bdaddr, &conn->dst);
1768 rej.reason = reason;
1770 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1772 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1773 struct hci_cp_reject_sync_conn_req rej;
1775 bacpy(&rej.bdaddr, &conn->dst);
1777 /* SCO rejection has its own limited set of
1778 * allowed error values (0x0D-0x0F) which isn't
1779 * compatible with most values passed to this
1780 * function. To be safe hard-code one of the
1781 * values that's suitable for SCO.
1783 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1785 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1790 conn->state = BT_CLOSED;
1795 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1798 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1801 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1803 struct hci_request req;
1806 hci_req_init(&req, conn->hdev);
1808 __hci_abort_conn(&req, conn, reason);
1810 err = hci_req_run(&req, abort_conn_complete);
1811 if (err && err != -ENODATA) {
1812 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1819 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1821 hci_dev_lock(req->hdev);
1822 __hci_update_background_scan(req);
1823 hci_dev_unlock(req->hdev);
1827 static void bg_scan_update(struct work_struct *work)
1829 struct hci_dev *hdev = container_of(work, struct hci_dev,
1831 struct hci_conn *conn;
1835 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1841 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1843 hci_le_conn_failed(conn, status);
1845 hci_dev_unlock(hdev);
1848 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1850 hci_req_add_le_scan_disable(req);
1854 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1857 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1858 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1859 struct hci_cp_inquiry cp;
1861 BT_DBG("%s", req->hdev->name);
1863 hci_dev_lock(req->hdev);
1864 hci_inquiry_cache_flush(req->hdev);
1865 hci_dev_unlock(req->hdev);
1867 memset(&cp, 0, sizeof(cp));
1869 if (req->hdev->discovery.limited)
1870 memcpy(&cp.lap, liac, sizeof(cp.lap));
1872 memcpy(&cp.lap, giac, sizeof(cp.lap));
1876 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1881 static void le_scan_disable_work(struct work_struct *work)
1883 struct hci_dev *hdev = container_of(work, struct hci_dev,
1884 le_scan_disable.work);
1887 BT_DBG("%s", hdev->name);
1889 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1892 cancel_delayed_work(&hdev->le_scan_restart);
1894 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1896 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1901 hdev->discovery.scan_start = 0;
1903 /* If we were running LE only scan, change discovery state. If
1904 * we were running both LE and BR/EDR inquiry simultaneously,
1905 * and BR/EDR inquiry is already finished, stop discovery,
1906 * otherwise BR/EDR inquiry will stop discovery when finished.
1907 * If we will resolve remote device name, do not change
1911 if (hdev->discovery.type == DISCOV_TYPE_LE)
1912 goto discov_stopped;
1914 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1917 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1918 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1919 hdev->discovery.state != DISCOVERY_RESOLVING)
1920 goto discov_stopped;
1925 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1926 HCI_CMD_TIMEOUT, &status);
1928 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
1929 goto discov_stopped;
1936 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1937 hci_dev_unlock(hdev);
1940 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1942 struct hci_dev *hdev = req->hdev;
1943 struct hci_cp_le_set_scan_enable cp;
1945 /* If controller is not scanning we are done. */
1946 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1949 hci_req_add_le_scan_disable(req);
1951 memset(&cp, 0, sizeof(cp));
1952 cp.enable = LE_SCAN_ENABLE;
1953 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1954 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1959 static void le_scan_restart_work(struct work_struct *work)
1961 struct hci_dev *hdev = container_of(work, struct hci_dev,
1962 le_scan_restart.work);
1963 unsigned long timeout, duration, scan_start, now;
1966 BT_DBG("%s", hdev->name);
1968 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1970 bt_dev_err(hdev, "failed to restart LE scan: status %d",
1977 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1978 !hdev->discovery.scan_start)
1981 /* When the scan was started, hdev->le_scan_disable has been queued
1982 * after duration from scan_start. During scan restart this job
1983 * has been canceled, and we need to queue it again after proper
1984 * timeout, to make sure that scan does not run indefinitely.
1986 duration = hdev->discovery.scan_duration;
1987 scan_start = hdev->discovery.scan_start;
1989 if (now - scan_start <= duration) {
1992 if (now >= scan_start)
1993 elapsed = now - scan_start;
1995 elapsed = ULONG_MAX - scan_start + now;
1997 timeout = duration - elapsed;
2002 queue_delayed_work(hdev->req_workqueue,
2003 &hdev->le_scan_disable, timeout);
2006 hci_dev_unlock(hdev);
2009 static int active_scan(struct hci_request *req, unsigned long opt)
2011 uint16_t interval = opt;
2012 struct hci_dev *hdev = req->hdev;
2013 struct hci_cp_le_set_scan_param param_cp;
2014 struct hci_cp_le_set_scan_enable enable_cp;
2018 BT_DBG("%s", hdev->name);
2020 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2023 /* Don't let discovery abort an outgoing connection attempt
2024 * that's using directed advertising.
2026 if (hci_lookup_le_connect(hdev)) {
2027 hci_dev_unlock(hdev);
2031 cancel_adv_timeout(hdev);
2032 hci_dev_unlock(hdev);
2034 __hci_req_disable_advertising(req);
2037 /* If controller is scanning, it means the background scanning is
2038 * running. Thus, we should temporarily stop it in order to set the
2039 * discovery scanning parameters.
2041 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2042 hci_req_add_le_scan_disable(req);
2044 /* All active scans will be done with either a resolvable private
2045 * address (when privacy feature has been enabled) or non-resolvable
2048 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2051 own_addr_type = ADDR_LE_DEV_PUBLIC;
2053 memset(¶m_cp, 0, sizeof(param_cp));
2054 param_cp.type = LE_SCAN_ACTIVE;
2055 param_cp.interval = cpu_to_le16(interval);
2056 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2057 param_cp.own_address_type = own_addr_type;
2059 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2062 memset(&enable_cp, 0, sizeof(enable_cp));
2063 enable_cp.enable = LE_SCAN_ENABLE;
2064 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2066 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2072 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2076 BT_DBG("%s", req->hdev->name);
2078 err = active_scan(req, opt);
2082 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2085 static void start_discovery(struct hci_dev *hdev, u8 *status)
2087 unsigned long timeout;
2089 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2091 switch (hdev->discovery.type) {
2092 case DISCOV_TYPE_BREDR:
2093 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2094 hci_req_sync(hdev, bredr_inquiry,
2095 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2098 case DISCOV_TYPE_INTERLEAVED:
2099 /* When running simultaneous discovery, the LE scanning time
2100 * should occupy the whole discovery time sine BR/EDR inquiry
2101 * and LE scanning are scheduled by the controller.
2103 * For interleaving discovery in comparison, BR/EDR inquiry
2104 * and LE scanning are done sequentially with separate
2107 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2109 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2110 /* During simultaneous discovery, we double LE scan
2111 * interval. We must leave some time for the controller
2112 * to do BR/EDR inquiry.
2114 hci_req_sync(hdev, interleaved_discov,
2115 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2120 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2121 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2122 HCI_CMD_TIMEOUT, status);
2124 case DISCOV_TYPE_LE:
2125 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2126 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2127 HCI_CMD_TIMEOUT, status);
2130 *status = HCI_ERROR_UNSPECIFIED;
2137 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2139 /* When service discovery is used and the controller has a
2140 * strict duplicate filter, it is important to remember the
2141 * start and duration of the scan. This is required for
2142 * restarting scanning during the discovery phase.
2144 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2145 hdev->discovery.result_filtering) {
2146 hdev->discovery.scan_start = jiffies;
2147 hdev->discovery.scan_duration = timeout;
2150 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2154 bool hci_req_stop_discovery(struct hci_request *req)
2156 struct hci_dev *hdev = req->hdev;
2157 struct discovery_state *d = &hdev->discovery;
2158 struct hci_cp_remote_name_req_cancel cp;
2159 struct inquiry_entry *e;
2162 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2164 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2165 if (test_bit(HCI_INQUIRY, &hdev->flags))
2166 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2168 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2169 cancel_delayed_work(&hdev->le_scan_disable);
2170 hci_req_add_le_scan_disable(req);
2175 /* Passive scanning */
2176 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2177 hci_req_add_le_scan_disable(req);
2182 /* No further actions needed for LE-only discovery */
2183 if (d->type == DISCOV_TYPE_LE)
2186 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2187 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2192 bacpy(&cp.bdaddr, &e->data.bdaddr);
2193 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2201 static int stop_discovery(struct hci_request *req, unsigned long opt)
2203 hci_dev_lock(req->hdev);
2204 hci_req_stop_discovery(req);
2205 hci_dev_unlock(req->hdev);
2210 static void discov_update(struct work_struct *work)
2212 struct hci_dev *hdev = container_of(work, struct hci_dev,
2216 switch (hdev->discovery.state) {
2217 case DISCOVERY_STARTING:
2218 start_discovery(hdev, &status);
2219 mgmt_start_discovery_complete(hdev, status);
2221 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2223 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2225 case DISCOVERY_STOPPING:
2226 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2227 mgmt_stop_discovery_complete(hdev, status);
2229 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2231 case DISCOVERY_STOPPED:
2237 static void discov_off(struct work_struct *work)
2239 struct hci_dev *hdev = container_of(work, struct hci_dev,
2242 BT_DBG("%s", hdev->name);
2246 /* When discoverable timeout triggers, then just make sure
2247 * the limited discoverable flag is cleared. Even in the case
2248 * of a timeout triggered from general discoverable, it is
2249 * safe to unconditionally clear the flag.
2251 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2252 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2253 hdev->discov_timeout = 0;
2255 hci_dev_unlock(hdev);
2257 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2258 mgmt_new_settings(hdev);
2261 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2263 struct hci_dev *hdev = req->hdev;
2268 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2269 !lmp_host_ssp_capable(hdev)) {
2272 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2274 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2277 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2278 sizeof(support), &support);
2282 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2283 lmp_bredr_capable(hdev)) {
2284 struct hci_cp_write_le_host_supported cp;
2289 /* Check first if we already have the right
2290 * host state (host features set)
2292 if (cp.le != lmp_host_le_capable(hdev) ||
2293 cp.simul != lmp_host_le_br_capable(hdev))
2294 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2298 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2299 /* Make sure the controller has a good default for
2300 * advertising data. This also applies to the case
2301 * where BR/EDR was toggled during the AUTO_OFF phase.
2303 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2304 list_empty(&hdev->adv_instances)) {
2305 __hci_req_update_adv_data(req, 0x00);
2306 __hci_req_update_scan_rsp_data(req, 0x00);
2308 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2309 __hci_req_enable_advertising(req);
2310 } else if (!list_empty(&hdev->adv_instances)) {
2311 struct adv_info *adv_instance;
2313 adv_instance = list_first_entry(&hdev->adv_instances,
2314 struct adv_info, list);
2315 __hci_req_schedule_adv_instance(req,
2316 adv_instance->instance,
2321 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2322 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2323 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2324 sizeof(link_sec), &link_sec);
2326 if (lmp_bredr_capable(hdev)) {
2327 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2328 __hci_req_write_fast_connectable(req, true);
2330 __hci_req_write_fast_connectable(req, false);
2331 __hci_req_update_scan(req);
2332 __hci_req_update_class(req);
2333 __hci_req_update_name(req);
2334 __hci_req_update_eir(req);
2337 hci_dev_unlock(hdev);
2341 int __hci_req_hci_power_on(struct hci_dev *hdev)
2343 /* Register the available SMP channels (BR/EDR and LE) only when
2344 * successfully powering on the controller. This late
2345 * registration is required so that LE SMP can clearly decide if
2346 * the public address or static address is used.
2350 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2354 void hci_request_setup(struct hci_dev *hdev)
2356 INIT_WORK(&hdev->discov_update, discov_update);
2357 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2358 INIT_WORK(&hdev->scan_update, scan_update_work);
2359 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2360 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2361 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2362 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2363 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2364 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2367 void hci_request_cancel_all(struct hci_dev *hdev)
2369 hci_req_sync_cancel(hdev, ENODEV);
2371 cancel_work_sync(&hdev->discov_update);
2372 cancel_work_sync(&hdev->bg_scan_update);
2373 cancel_work_sync(&hdev->scan_update);
2374 cancel_work_sync(&hdev->connectable_update);
2375 cancel_work_sync(&hdev->discoverable_update);
2376 cancel_delayed_work_sync(&hdev->discov_off);
2377 cancel_delayed_work_sync(&hdev->le_scan_disable);
2378 cancel_delayed_work_sync(&hdev->le_scan_restart);
2380 if (hdev->adv_instance_timeout) {
2381 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2382 hdev->adv_instance_timeout = 0;