2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
63 /* ---- HCI debugfs entries ---- */
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
68 struct hci_dev *hdev = file->private_data;
71 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
80 struct hci_dev *hdev = file->private_data;
85 if (!test_bit(HCI_UP, &hdev->flags))
88 err = kstrtobool_from_user(user_buf, count, &enable);
92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
95 hci_req_sync_lock(hdev);
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 hci_req_sync_unlock(hdev);
109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
114 static const struct file_operations dut_mode_fops = {
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
124 struct hci_dev *hdev = file->private_data;
127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
136 struct hci_dev *hdev = file->private_data;
140 err = kstrtobool_from_user(user_buf, count, &enable);
144 /* When the diagnostic flags are not persistent and the transport
145 * is not active or in user channel operation, then there is no need
146 * for the vendor callback. Instead just store the desired value and
147 * the setting will be programmed when the controller gets powered on.
149 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150 (!test_bit(HCI_RUNNING, &hdev->flags) ||
151 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
154 hci_req_sync_lock(hdev);
155 err = hdev->set_diag(hdev, enable);
156 hci_req_sync_unlock(hdev);
163 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
170 static const struct file_operations vendor_diag_fops = {
172 .read = vendor_diag_read,
173 .write = vendor_diag_write,
174 .llseek = default_llseek,
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
183 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 BT_DBG("%s %ld", req->hdev->name, opt);
192 set_bit(HCI_RESET, &req->hdev->flags);
193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
197 static void bredr_init(struct hci_request *req)
199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201 /* Read Local Supported Features */
202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204 /* Read Local Version */
205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207 /* Read BD Address */
208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
211 static void amp_init1(struct hci_request *req)
213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215 /* Read Local Version */
216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218 /* Read Local Supported Commands */
219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221 /* Read Local AMP Info */
222 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224 /* Read Data Blk size */
225 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227 /* Read Flow Control Mode */
228 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230 /* Read Location Data */
231 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
234 static int amp_init2(struct hci_request *req)
236 /* Read Local Supported Features. Not all AMP controllers
237 * support this so it's placed conditionally in the second
240 if (req->hdev->commands[14] & 0x20)
241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 struct hci_dev *hdev = req->hdev;
250 BT_DBG("%s %ld", hdev->name, opt);
253 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254 hci_reset_req(req, 0);
256 switch (hdev->dev_type) {
264 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
271 static void bredr_setup(struct hci_request *req)
276 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279 /* Read Class of Device */
280 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282 /* Read Local Name */
283 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285 /* Read Voice Setting */
286 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288 /* Read Number of Supported IAC */
289 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291 /* Read Current IAC LAP */
292 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294 /* Clear Event Filters */
295 flt_type = HCI_FLT_CLEAR_ALL;
296 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298 /* Connection accept timeout ~20 secs */
299 param = cpu_to_le16(0x7d00);
300 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
303 static void le_setup(struct hci_request *req)
305 struct hci_dev *hdev = req->hdev;
307 /* Read LE Buffer Size */
308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310 /* Read LE Local Supported Features */
311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313 /* Read LE Supported States */
314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316 /* LE-only controllers have LE implicitly enabled */
317 if (!lmp_bredr_capable(hdev))
318 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
321 static void hci_setup_event_mask(struct hci_request *req)
323 struct hci_dev *hdev = req->hdev;
325 /* The second byte is 0xff instead of 0x9f (two reserved bits
326 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
329 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332 * any event mask for pre 1.2 devices.
334 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
337 if (lmp_bredr_capable(hdev)) {
338 events[4] |= 0x01; /* Flow Specification Complete */
340 /* Use a different default for LE-only devices */
341 memset(events, 0, sizeof(events));
342 events[1] |= 0x20; /* Command Complete */
343 events[1] |= 0x40; /* Command Status */
344 events[1] |= 0x80; /* Hardware Error */
346 /* If the controller supports the Disconnect command, enable
347 * the corresponding event. In addition enable packet flow
348 * control related events.
350 if (hdev->commands[0] & 0x20) {
351 events[0] |= 0x10; /* Disconnection Complete */
352 events[2] |= 0x04; /* Number of Completed Packets */
353 events[3] |= 0x02; /* Data Buffer Overflow */
356 /* If the controller supports the Read Remote Version
357 * Information command, enable the corresponding event.
359 if (hdev->commands[2] & 0x80)
360 events[1] |= 0x08; /* Read Remote Version Information
364 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365 events[0] |= 0x80; /* Encryption Change */
366 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370 if (lmp_inq_rssi_capable(hdev) ||
371 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372 events[4] |= 0x02; /* Inquiry Result with RSSI */
374 if (lmp_ext_feat_capable(hdev))
375 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377 if (lmp_esco_capable(hdev)) {
378 events[5] |= 0x08; /* Synchronous Connection Complete */
379 events[5] |= 0x10; /* Synchronous Connection Changed */
382 if (lmp_sniffsubr_capable(hdev))
383 events[5] |= 0x20; /* Sniff Subrating */
385 if (lmp_pause_enc_capable(hdev))
386 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388 if (lmp_ext_inq_capable(hdev))
389 events[5] |= 0x40; /* Extended Inquiry Result */
391 if (lmp_no_flush_capable(hdev))
392 events[7] |= 0x01; /* Enhanced Flush Complete */
394 if (lmp_lsto_capable(hdev))
395 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397 if (lmp_ssp_capable(hdev)) {
398 events[6] |= 0x01; /* IO Capability Request */
399 events[6] |= 0x02; /* IO Capability Response */
400 events[6] |= 0x04; /* User Confirmation Request */
401 events[6] |= 0x08; /* User Passkey Request */
402 events[6] |= 0x10; /* Remote OOB Data Request */
403 events[6] |= 0x20; /* Simple Pairing Complete */
404 events[7] |= 0x04; /* User Passkey Notification */
405 events[7] |= 0x08; /* Keypress Notification */
406 events[7] |= 0x10; /* Remote Host Supported
407 * Features Notification
411 if (lmp_le_capable(hdev))
412 events[7] |= 0x20; /* LE Meta-Event */
414 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 struct hci_dev *hdev = req->hdev;
421 if (hdev->dev_type == HCI_AMP)
422 return amp_init2(req);
424 if (lmp_bredr_capable(hdev))
427 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429 if (lmp_le_capable(hdev))
432 /* All Bluetooth 1.2 and later controllers should support the
433 * HCI command for reading the local supported commands.
435 * Unfortunately some controllers indicate Bluetooth 1.2 support,
436 * but do not have support for this command. If that is the case,
437 * the driver can quirk the behavior and skip reading the local
438 * supported commands.
440 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444 if (lmp_ssp_capable(hdev)) {
445 /* When SSP is available, then the host features page
446 * should also be available as well. However some
447 * controllers list the max_page as 0 as long as SSP
448 * has not been enabled. To achieve proper debugging
449 * output, force the minimum max_page to 1 at least.
451 hdev->max_page = 0x01;
453 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
456 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457 sizeof(mode), &mode);
459 struct hci_cp_write_eir cp;
461 memset(hdev->eir, 0, sizeof(hdev->eir));
462 memset(&cp, 0, sizeof(cp));
464 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
468 if (lmp_inq_rssi_capable(hdev) ||
469 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
472 /* If Extended Inquiry Result events are supported, then
473 * they are clearly preferred over Inquiry Result with RSSI
476 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
481 if (lmp_inq_tx_pwr_capable(hdev))
482 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484 if (lmp_ext_feat_capable(hdev)) {
485 struct hci_cp_read_local_ext_features cp;
488 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
492 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 static void hci_setup_link_policy(struct hci_request *req)
503 struct hci_dev *hdev = req->hdev;
504 struct hci_cp_write_def_link_policy cp;
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
516 cp.policy = cpu_to_le16(link_policy);
517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
520 static void hci_set_le_support(struct hci_request *req)
522 struct hci_dev *hdev = req->hdev;
523 struct hci_cp_write_le_host_supported cp;
525 /* LE-only devices do not support explicit enablement */
526 if (!lmp_bredr_capable(hdev))
529 memset(&cp, 0, sizeof(cp));
531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
536 if (cp.le != lmp_host_le_capable(hdev))
537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
541 static void hci_set_event_mask_page_2(struct hci_request *req)
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545 bool changed = false;
547 /* If Connectionless Slave Broadcast master role is supported
548 * enable all necessary events for it.
550 if (lmp_csb_master_capable(hdev)) {
551 events[1] |= 0x40; /* Triggered Clock Capture */
552 events[1] |= 0x80; /* Synchronization Train Complete */
553 events[2] |= 0x10; /* Slave Page Response Timeout */
554 events[2] |= 0x20; /* CSB Channel Map Change */
558 /* If Connectionless Slave Broadcast slave role is supported
559 * enable all necessary events for it.
561 if (lmp_csb_slave_capable(hdev)) {
562 events[2] |= 0x01; /* Synchronization Train Received */
563 events[2] |= 0x02; /* CSB Receive */
564 events[2] |= 0x04; /* CSB Timeout */
565 events[2] |= 0x08; /* Truncated Page Complete */
569 /* Enable Authenticated Payload Timeout Expired event if supported */
570 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
575 /* Some Broadcom based controllers indicate support for Set Event
576 * Mask Page 2 command, but then actually do not support it. Since
577 * the default value is all bits set to zero, the command is only
578 * required if the event mask has to be changed. In case no change
579 * to the event mask is needed, skip this command.
582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583 sizeof(events), events);
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 struct hci_dev *hdev = req->hdev;
591 hci_setup_event_mask(req);
593 if (hdev->commands[6] & 0x20 &&
594 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595 struct hci_cp_read_stored_link_key cp;
597 bacpy(&cp.bdaddr, BDADDR_ANY);
599 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
602 if (hdev->commands[5] & 0x10)
603 hci_setup_link_policy(req);
605 if (hdev->commands[8] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608 if (hdev->commands[18] & 0x04 &&
609 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612 /* Some older Broadcom based Bluetooth 1.2 controllers do not
613 * support the Read Page Scan Type command. Check support for
614 * this command in the bit mask of supported commands.
616 if (hdev->commands[13] & 0x01)
617 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619 if (lmp_le_capable(hdev)) {
622 memset(events, 0, sizeof(events));
624 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625 events[0] |= 0x10; /* LE Long Term Key Request */
627 /* If controller supports the Connection Parameters Request
628 * Link Layer Procedure, enable the corresponding event.
630 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631 events[0] |= 0x20; /* LE Remote Connection
635 /* If the controller supports the Data Length Extension
636 * feature, enable the corresponding event.
638 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639 events[0] |= 0x40; /* LE Data Length Change */
641 /* If the controller supports LL Privacy feature, enable
642 * the corresponding event.
644 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645 events[1] |= 0x02; /* LE Enhanced Connection
649 /* If the controller supports Extended Scanner Filter
650 * Policies, enable the correspondig event.
652 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653 events[1] |= 0x04; /* LE Direct Advertising
657 /* If the controller supports Channel Selection Algorithm #2
658 * feature, enable the corresponding event.
660 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661 events[2] |= 0x08; /* LE Channel Selection
665 /* If the controller supports the LE Set Scan Enable command,
666 * enable the corresponding advertising report event.
668 if (hdev->commands[26] & 0x08)
669 events[0] |= 0x02; /* LE Advertising Report */
671 /* If the controller supports the LE Create Connection
672 * command, enable the corresponding event.
674 if (hdev->commands[26] & 0x10)
675 events[0] |= 0x01; /* LE Connection Complete */
677 /* If the controller supports the LE Connection Update
678 * command, enable the corresponding event.
680 if (hdev->commands[27] & 0x04)
681 events[0] |= 0x04; /* LE Connection Update
685 /* If the controller supports the LE Read Remote Used Features
686 * command, enable the corresponding event.
688 if (hdev->commands[27] & 0x20)
689 events[0] |= 0x08; /* LE Read Remote Used
693 /* If the controller supports the LE Read Local P-256
694 * Public Key command, enable the corresponding event.
696 if (hdev->commands[34] & 0x02)
697 events[0] |= 0x80; /* LE Read Local P-256
698 * Public Key Complete
701 /* If the controller supports the LE Generate DHKey
702 * command, enable the corresponding event.
704 if (hdev->commands[34] & 0x04)
705 events[1] |= 0x01; /* LE Generate DHKey Complete */
707 /* If the controller supports the LE Set Default PHY or
708 * LE Set PHY commands, enable the corresponding event.
710 if (hdev->commands[35] & (0x20 | 0x40))
711 events[1] |= 0x08; /* LE PHY Update Complete */
713 /* If the controller supports LE Set Extended Scan Parameters
714 * and LE Set Extended Scan Enable commands, enable the
715 * corresponding event.
717 if (use_ext_scan(hdev))
718 events[1] |= 0x10; /* LE Extended Advertising
722 /* If the controller supports the LE Extended Advertising
723 * command, enable the corresponding event.
725 if (ext_adv_capable(hdev))
726 events[2] |= 0x02; /* LE Advertising Set
730 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
733 /* Read LE Advertising Channel TX Power */
734 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735 /* HCI TS spec forbids mixing of legacy and extended
736 * advertising commands wherein READ_ADV_TX_POWER is
737 * also included. So do not call it if extended adv
738 * is supported otherwise controller will return
739 * COMMAND_DISALLOWED for extended commands.
741 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
744 if (hdev->commands[38] & 0x80) {
745 /* Read LE Min/Max Tx Power*/
746 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
750 if (hdev->commands[26] & 0x40) {
751 /* Read LE White List Size */
752 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
756 if (hdev->commands[26] & 0x80) {
757 /* Clear LE White List */
758 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
761 if (hdev->commands[34] & 0x40) {
762 /* Read LE Resolving List Size */
763 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
767 if (hdev->commands[34] & 0x20) {
768 /* Clear LE Resolving List */
769 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
772 if (hdev->commands[35] & 0x04) {
773 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775 /* Set RPA timeout */
776 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
780 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
781 /* Read LE Maximum Data Length */
782 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784 /* Read LE Suggested Default Data Length */
785 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
788 if (ext_adv_capable(hdev)) {
789 /* Read LE Number of Supported Advertising Sets */
790 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
794 hci_set_le_support(req);
797 /* Read features beyond page 1 if available */
798 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
799 struct hci_cp_read_local_ext_features cp;
802 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
809 static int hci_init4_req(struct hci_request *req, unsigned long opt)
811 struct hci_dev *hdev = req->hdev;
813 /* Some Broadcom based Bluetooth controllers do not support the
814 * Delete Stored Link Key command. They are clearly indicating its
815 * absence in the bit mask of supported commands.
817 * Check the supported commands and only if the command is marked
818 * as supported send it. If not supported assume that the controller
819 * does not have actual support for stored link keys which makes this
820 * command redundant anyway.
822 * Some controllers indicate that they support handling deleting
823 * stored link keys, but they don't. The quirk lets a driver
824 * just disable this command.
826 if (hdev->commands[6] & 0x80 &&
827 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
828 struct hci_cp_delete_stored_link_key cp;
830 bacpy(&cp.bdaddr, BDADDR_ANY);
831 cp.delete_all = 0x01;
832 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
836 /* Set event mask page 2 if the HCI command for it is supported */
837 if (hdev->commands[22] & 0x04)
838 hci_set_event_mask_page_2(req);
840 /* Read local codec list if the HCI command is supported */
841 if (hdev->commands[29] & 0x20)
842 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844 /* Read local pairing options if the HCI command is supported */
845 if (hdev->commands[41] & 0x08)
846 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848 /* Get MWS transport configuration if the HCI command is supported */
849 if (hdev->commands[30] & 0x08)
850 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852 /* Check for Synchronization Train support */
853 if (lmp_sync_train_capable(hdev))
854 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
856 /* Enable Secure Connections if supported and configured */
857 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
858 bredr_sc_enabled(hdev)) {
861 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
862 sizeof(support), &support);
865 /* Set erroneous data reporting if supported to the wideband speech
868 if (hdev->commands[18] & 0x08 &&
869 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
870 bool enabled = hci_dev_test_flag(hdev,
871 HCI_WIDEBAND_SPEECH_ENABLED);
874 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
875 struct hci_cp_write_def_err_data_reporting cp;
877 cp.err_data_reporting = enabled ?
878 ERR_DATA_REPORTING_ENABLED :
879 ERR_DATA_REPORTING_DISABLED;
881 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
886 /* Set Suggested Default Data Length to maximum if supported */
887 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
888 struct hci_cp_le_write_def_data_len cp;
890 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
891 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
892 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
895 /* Set Default PHY parameters if command is supported */
896 if (hdev->commands[35] & 0x20) {
897 struct hci_cp_le_set_default_phy cp;
900 cp.tx_phys = hdev->le_tx_def_phys;
901 cp.rx_phys = hdev->le_rx_def_phys;
903 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
909 static int __hci_init(struct hci_dev *hdev)
913 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
917 if (hci_dev_test_flag(hdev, HCI_SETUP))
918 hci_debugfs_create_basic(hdev);
920 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
924 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
925 * BR/EDR/LE type controllers. AMP controllers only need the
926 * first two stages of init.
928 if (hdev->dev_type != HCI_PRIMARY)
931 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
935 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
939 /* This function is only called when the controller is actually in
940 * configured state. When the controller is marked as unconfigured,
941 * this initialization procedure is not run.
943 * It means that it is possible that a controller runs through its
944 * setup phase and then discovers missing settings. If that is the
945 * case, then this function will not be called. It then will only
946 * be called during the config phase.
948 * So only when in setup phase or config phase, create the debugfs
949 * entries and register the SMP channels.
951 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952 !hci_dev_test_flag(hdev, HCI_CONFIG))
955 hci_debugfs_create_common(hdev);
957 if (lmp_bredr_capable(hdev))
958 hci_debugfs_create_bredr(hdev);
960 if (lmp_le_capable(hdev))
961 hci_debugfs_create_le(hdev);
966 static int hci_init0_req(struct hci_request *req, unsigned long opt)
968 struct hci_dev *hdev = req->hdev;
970 BT_DBG("%s %ld", hdev->name, opt);
973 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974 hci_reset_req(req, 0);
976 /* Read Local Version */
977 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979 /* Read BD Address */
980 if (hdev->set_bdaddr)
981 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
986 static int __hci_unconf_init(struct hci_dev *hdev)
990 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
993 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
997 if (hci_dev_test_flag(hdev, HCI_SETUP))
998 hci_debugfs_create_basic(hdev);
1003 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1007 BT_DBG("%s %x", req->hdev->name, scan);
1009 /* Inquiry and Page scans */
1010 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1014 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1018 BT_DBG("%s %x", req->hdev->name, auth);
1020 /* Authentication */
1021 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1025 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1029 BT_DBG("%s %x", req->hdev->name, encrypt);
1032 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1036 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038 __le16 policy = cpu_to_le16(opt);
1040 BT_DBG("%s %x", req->hdev->name, policy);
1042 /* Default link policy */
1043 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1047 /* Get HCI device by index.
1048 * Device is held on return. */
1049 struct hci_dev *hci_dev_get(int index)
1051 struct hci_dev *hdev = NULL, *d;
1053 BT_DBG("%d", index);
1058 read_lock(&hci_dev_list_lock);
1059 list_for_each_entry(d, &hci_dev_list, list) {
1060 if (d->id == index) {
1061 hdev = hci_dev_hold(d);
1065 read_unlock(&hci_dev_list_lock);
1069 /* ---- Inquiry support ---- */
1071 bool hci_discovery_active(struct hci_dev *hdev)
1073 struct discovery_state *discov = &hdev->discovery;
1075 switch (discov->state) {
1076 case DISCOVERY_FINDING:
1077 case DISCOVERY_RESOLVING:
1085 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 int old_state = hdev->discovery.state;
1089 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091 if (old_state == state)
1094 hdev->discovery.state = state;
1097 case DISCOVERY_STOPPED:
1098 hci_update_background_scan(hdev);
1100 if (old_state != DISCOVERY_STARTING)
1101 mgmt_discovering(hdev, 0);
1103 case DISCOVERY_STARTING:
1105 case DISCOVERY_FINDING:
1106 mgmt_discovering(hdev, 1);
1108 case DISCOVERY_RESOLVING:
1110 case DISCOVERY_STOPPING:
1115 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 struct discovery_state *cache = &hdev->discovery;
1118 struct inquiry_entry *p, *n;
1120 list_for_each_entry_safe(p, n, &cache->all, all) {
1125 INIT_LIST_HEAD(&cache->unknown);
1126 INIT_LIST_HEAD(&cache->resolve);
1129 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1132 struct discovery_state *cache = &hdev->discovery;
1133 struct inquiry_entry *e;
1135 BT_DBG("cache %p, %pMR", cache, bdaddr);
1137 list_for_each_entry(e, &cache->all, all) {
1138 if (!bacmp(&e->data.bdaddr, bdaddr))
1145 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1148 struct discovery_state *cache = &hdev->discovery;
1149 struct inquiry_entry *e;
1151 BT_DBG("cache %p, %pMR", cache, bdaddr);
1153 list_for_each_entry(e, &cache->unknown, list) {
1154 if (!bacmp(&e->data.bdaddr, bdaddr))
1161 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1165 struct discovery_state *cache = &hdev->discovery;
1166 struct inquiry_entry *e;
1168 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170 list_for_each_entry(e, &cache->resolve, list) {
1171 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 if (!bacmp(&e->data.bdaddr, bdaddr))
1180 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1181 struct inquiry_entry *ie)
1183 struct discovery_state *cache = &hdev->discovery;
1184 struct list_head *pos = &cache->resolve;
1185 struct inquiry_entry *p;
1187 list_del(&ie->list);
1189 list_for_each_entry(p, &cache->resolve, list) {
1190 if (p->name_state != NAME_PENDING &&
1191 abs(p->data.rssi) >= abs(ie->data.rssi))
1196 list_add(&ie->list, pos);
1199 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1202 struct discovery_state *cache = &hdev->discovery;
1203 struct inquiry_entry *ie;
1206 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210 if (!data->ssp_mode)
1211 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215 if (!ie->data.ssp_mode)
1216 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218 if (ie->name_state == NAME_NEEDED &&
1219 data->rssi != ie->data.rssi) {
1220 ie->data.rssi = data->rssi;
1221 hci_inquiry_cache_update_resolve(hdev, ie);
1227 /* Entry not in the cache. Add new one. */
1228 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1234 list_add(&ie->all, &cache->all);
1237 ie->name_state = NAME_KNOWN;
1239 ie->name_state = NAME_NOT_KNOWN;
1240 list_add(&ie->list, &cache->unknown);
1244 if (name_known && ie->name_state != NAME_KNOWN &&
1245 ie->name_state != NAME_PENDING) {
1246 ie->name_state = NAME_KNOWN;
1247 list_del(&ie->list);
1250 memcpy(&ie->data, data, sizeof(*data));
1251 ie->timestamp = jiffies;
1252 cache->timestamp = jiffies;
1254 if (ie->name_state == NAME_NOT_KNOWN)
1255 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1261 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 struct discovery_state *cache = &hdev->discovery;
1264 struct inquiry_info *info = (struct inquiry_info *) buf;
1265 struct inquiry_entry *e;
1268 list_for_each_entry(e, &cache->all, all) {
1269 struct inquiry_data *data = &e->data;
1274 bacpy(&info->bdaddr, &data->bdaddr);
1275 info->pscan_rep_mode = data->pscan_rep_mode;
1276 info->pscan_period_mode = data->pscan_period_mode;
1277 info->pscan_mode = data->pscan_mode;
1278 memcpy(info->dev_class, data->dev_class, 3);
1279 info->clock_offset = data->clock_offset;
1285 BT_DBG("cache %p, copied %d", cache, copied);
1289 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1291 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1292 struct hci_dev *hdev = req->hdev;
1293 struct hci_cp_inquiry cp;
1295 BT_DBG("%s", hdev->name);
1297 if (test_bit(HCI_INQUIRY, &hdev->flags))
1301 memcpy(&cp.lap, &ir->lap, 3);
1302 cp.length = ir->length;
1303 cp.num_rsp = ir->num_rsp;
1304 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1309 int hci_inquiry(void __user *arg)
1311 __u8 __user *ptr = arg;
1312 struct hci_inquiry_req ir;
1313 struct hci_dev *hdev;
1314 int err = 0, do_inquiry = 0, max_rsp;
1318 if (copy_from_user(&ir, ptr, sizeof(ir)))
1321 hdev = hci_dev_get(ir.dev_id);
1325 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1330 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1335 if (hdev->dev_type != HCI_PRIMARY) {
1340 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1346 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1347 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1348 hci_inquiry_cache_flush(hdev);
1351 hci_dev_unlock(hdev);
1353 timeo = ir.length * msecs_to_jiffies(2000);
1356 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1361 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1362 * cleared). If it is interrupted by a signal, return -EINTR.
1364 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1365 TASK_INTERRUPTIBLE)) {
1371 /* for unlimited number of responses we will use buffer with
1374 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1376 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1377 * copy it to the user space.
1379 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1386 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1387 hci_dev_unlock(hdev);
1389 BT_DBG("num_rsp %d", ir.num_rsp);
1391 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1393 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1407 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1408 * (BD_ADDR) for a HCI device from
1409 * a firmware node property.
1410 * @hdev: The HCI device
1412 * Search the firmware node for 'local-bd-address'.
1414 * All-zero BD addresses are rejected, because those could be properties
1415 * that exist in the firmware tables, but were not updated by the firmware. For
1416 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1418 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1420 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1424 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1425 (u8 *)&ba, sizeof(ba));
1426 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1429 bacpy(&hdev->public_addr, &ba);
1432 static int hci_dev_do_open(struct hci_dev *hdev)
1436 BT_DBG("%s %p", hdev->name, hdev);
1438 hci_req_sync_lock(hdev);
1440 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1445 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1447 /* Check for rfkill but allow the HCI setup stage to
1448 * proceed (which in itself doesn't cause any RF activity).
1450 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1455 /* Check for valid public address or a configured static
1456 * random adddress, but let the HCI setup proceed to
1457 * be able to determine if there is a public address
1460 * In case of user channel usage, it is not important
1461 * if a public address or static random address is
1464 * This check is only valid for BR/EDR controllers
1465 * since AMP controllers do not have an address.
1467 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1468 hdev->dev_type == HCI_PRIMARY &&
1469 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1470 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1471 ret = -EADDRNOTAVAIL;
1476 if (test_bit(HCI_UP, &hdev->flags)) {
1481 if (hdev->open(hdev)) {
1486 set_bit(HCI_RUNNING, &hdev->flags);
1487 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1489 atomic_set(&hdev->cmd_cnt, 1);
1490 set_bit(HCI_INIT, &hdev->flags);
1492 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1493 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1494 bool invalid_bdaddr;
1496 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1499 ret = hdev->setup(hdev);
1501 /* The transport driver can set the quirk to mark the
1502 * BD_ADDR invalid before creating the HCI device or in
1503 * its setup callback.
1505 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1511 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1512 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1513 hci_dev_get_bd_addr_from_property(hdev);
1515 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517 ret = hdev->set_bdaddr(hdev,
1518 &hdev->public_addr);
1520 /* If setting of the BD_ADDR from the device
1521 * property succeeds, then treat the address
1522 * as valid even if the invalid BD_ADDR
1523 * quirk indicates otherwise.
1526 invalid_bdaddr = false;
1531 /* The transport driver can set these quirks before
1532 * creating the HCI device or in its setup callback.
1534 * For the invalid BD_ADDR quirk it is possible that
1535 * it becomes a valid address if the bootloader does
1536 * provide it (see above).
1538 * In case any of them is set, the controller has to
1539 * start up as unconfigured.
1541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1543 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1545 /* For an unconfigured controller it is required to
1546 * read at least the version information provided by
1547 * the Read Local Version Information command.
1549 * If the set_bdaddr driver callback is provided, then
1550 * also the original Bluetooth public device address
1551 * will be read using the Read BD Address command.
1553 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1554 ret = __hci_unconf_init(hdev);
1557 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1558 /* If public address change is configured, ensure that
1559 * the address gets programmed. If the driver does not
1560 * support changing the public address, fail the power
1563 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1565 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1567 ret = -EADDRNOTAVAIL;
1571 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1572 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1573 ret = __hci_init(hdev);
1574 if (!ret && hdev->post_init)
1575 ret = hdev->post_init(hdev);
1579 /* If the HCI Reset command is clearing all diagnostic settings,
1580 * then they need to be reprogrammed after the init procedure
1583 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1584 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1585 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1586 ret = hdev->set_diag(hdev, true);
1590 clear_bit(HCI_INIT, &hdev->flags);
1594 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1595 hci_adv_instances_set_rpa_expired(hdev, true);
1596 set_bit(HCI_UP, &hdev->flags);
1597 hci_sock_dev_event(hdev, HCI_DEV_UP);
1598 hci_leds_update_powered(hdev, true);
1599 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1600 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1601 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1602 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1603 hci_dev_test_flag(hdev, HCI_MGMT) &&
1604 hdev->dev_type == HCI_PRIMARY) {
1605 ret = __hci_req_hci_power_on(hdev);
1606 mgmt_power_on(hdev, ret);
1609 /* Init failed, cleanup */
1610 flush_work(&hdev->tx_work);
1611 flush_work(&hdev->cmd_work);
1612 flush_work(&hdev->rx_work);
1614 skb_queue_purge(&hdev->cmd_q);
1615 skb_queue_purge(&hdev->rx_q);
1620 if (hdev->sent_cmd) {
1621 kfree_skb(hdev->sent_cmd);
1622 hdev->sent_cmd = NULL;
1625 clear_bit(HCI_RUNNING, &hdev->flags);
1626 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1629 hdev->flags &= BIT(HCI_RAW);
1633 hci_req_sync_unlock(hdev);
1637 /* ---- HCI ioctl helpers ---- */
1639 int hci_dev_open(__u16 dev)
1641 struct hci_dev *hdev;
1644 hdev = hci_dev_get(dev);
1648 /* Devices that are marked as unconfigured can only be powered
1649 * up as user channel. Trying to bring them up as normal devices
1650 * will result into a failure. Only user channel operation is
1653 * When this function is called for a user channel, the flag
1654 * HCI_USER_CHANNEL will be set first before attempting to
1657 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1658 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1663 /* We need to ensure that no other power on/off work is pending
1664 * before proceeding to call hci_dev_do_open. This is
1665 * particularly important if the setup procedure has not yet
1668 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1669 cancel_delayed_work(&hdev->power_off);
1671 /* After this call it is guaranteed that the setup procedure
1672 * has finished. This means that error conditions like RFKILL
1673 * or no valid public or static random address apply.
1675 flush_workqueue(hdev->req_workqueue);
1677 /* For controllers not using the management interface and that
1678 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1679 * so that pairing works for them. Once the management interface
1680 * is in use this bit will be cleared again and userspace has
1681 * to explicitly enable it.
1683 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1684 !hci_dev_test_flag(hdev, HCI_MGMT))
1685 hci_dev_set_flag(hdev, HCI_BONDABLE);
1687 err = hci_dev_do_open(hdev);
1694 /* This function requires the caller holds hdev->lock */
1695 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1697 struct hci_conn_params *p;
1699 list_for_each_entry(p, &hdev->le_conn_params, list) {
1701 hci_conn_drop(p->conn);
1702 hci_conn_put(p->conn);
1705 list_del_init(&p->action);
1708 BT_DBG("All LE pending actions cleared");
1711 int hci_dev_do_close(struct hci_dev *hdev)
1715 BT_DBG("%s %p", hdev->name, hdev);
1717 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1718 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1719 test_bit(HCI_UP, &hdev->flags)) {
1720 /* Execute vendor specific shutdown routine */
1722 hdev->shutdown(hdev);
1725 cancel_delayed_work(&hdev->power_off);
1727 hci_request_cancel_all(hdev);
1728 hci_req_sync_lock(hdev);
1730 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1731 cancel_delayed_work_sync(&hdev->cmd_timer);
1732 hci_req_sync_unlock(hdev);
1736 hci_leds_update_powered(hdev, false);
1738 /* Flush RX and TX works */
1739 flush_work(&hdev->tx_work);
1740 flush_work(&hdev->rx_work);
1742 if (hdev->discov_timeout > 0) {
1743 hdev->discov_timeout = 0;
1744 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1745 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1748 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1749 cancel_delayed_work(&hdev->service_cache);
1751 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1752 struct adv_info *adv_instance;
1754 cancel_delayed_work_sync(&hdev->rpa_expired);
1756 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1757 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1760 /* Avoid potential lockdep warnings from the *_flush() calls by
1761 * ensuring the workqueue is empty up front.
1763 drain_workqueue(hdev->workqueue);
1767 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1769 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1771 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1772 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1773 hci_dev_test_flag(hdev, HCI_MGMT))
1774 __mgmt_power_off(hdev);
1776 hci_inquiry_cache_flush(hdev);
1777 hci_pend_le_actions_clear(hdev);
1778 hci_conn_hash_flush(hdev);
1779 hci_dev_unlock(hdev);
1781 smp_unregister(hdev);
1783 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1785 msft_do_close(hdev);
1791 skb_queue_purge(&hdev->cmd_q);
1792 atomic_set(&hdev->cmd_cnt, 1);
1793 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1794 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1795 set_bit(HCI_INIT, &hdev->flags);
1796 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1797 clear_bit(HCI_INIT, &hdev->flags);
1800 /* flush cmd work */
1801 flush_work(&hdev->cmd_work);
1804 skb_queue_purge(&hdev->rx_q);
1805 skb_queue_purge(&hdev->cmd_q);
1806 skb_queue_purge(&hdev->raw_q);
1808 /* Drop last sent command */
1809 if (hdev->sent_cmd) {
1810 cancel_delayed_work_sync(&hdev->cmd_timer);
1811 kfree_skb(hdev->sent_cmd);
1812 hdev->sent_cmd = NULL;
1815 clear_bit(HCI_RUNNING, &hdev->flags);
1816 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1818 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1819 wake_up(&hdev->suspend_wait_q);
1821 /* After this point our queues are empty
1822 * and no tasks are scheduled. */
1826 hdev->flags &= BIT(HCI_RAW);
1827 hci_dev_clear_volatile_flags(hdev);
1829 /* Controller radio is available but is currently powered down */
1830 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1832 memset(hdev->eir, 0, sizeof(hdev->eir));
1833 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1834 bacpy(&hdev->random_addr, BDADDR_ANY);
1836 hci_req_sync_unlock(hdev);
1842 int hci_dev_close(__u16 dev)
1844 struct hci_dev *hdev;
1847 hdev = hci_dev_get(dev);
1851 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1856 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1857 cancel_delayed_work(&hdev->power_off);
1859 err = hci_dev_do_close(hdev);
1866 static int hci_dev_do_reset(struct hci_dev *hdev)
1870 BT_DBG("%s %p", hdev->name, hdev);
1872 hci_req_sync_lock(hdev);
1875 skb_queue_purge(&hdev->rx_q);
1876 skb_queue_purge(&hdev->cmd_q);
1878 /* Avoid potential lockdep warnings from the *_flush() calls by
1879 * ensuring the workqueue is empty up front.
1881 drain_workqueue(hdev->workqueue);
1884 hci_inquiry_cache_flush(hdev);
1885 hci_conn_hash_flush(hdev);
1886 hci_dev_unlock(hdev);
1891 atomic_set(&hdev->cmd_cnt, 1);
1892 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1894 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1896 hci_req_sync_unlock(hdev);
1900 int hci_dev_reset(__u16 dev)
1902 struct hci_dev *hdev;
1905 hdev = hci_dev_get(dev);
1909 if (!test_bit(HCI_UP, &hdev->flags)) {
1914 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1919 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1924 err = hci_dev_do_reset(hdev);
1931 int hci_dev_reset_stat(__u16 dev)
1933 struct hci_dev *hdev;
1936 hdev = hci_dev_get(dev);
1940 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1945 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1950 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1957 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1959 bool conn_changed, discov_changed;
1961 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1963 if ((scan & SCAN_PAGE))
1964 conn_changed = !hci_dev_test_and_set_flag(hdev,
1967 conn_changed = hci_dev_test_and_clear_flag(hdev,
1970 if ((scan & SCAN_INQUIRY)) {
1971 discov_changed = !hci_dev_test_and_set_flag(hdev,
1974 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1975 discov_changed = hci_dev_test_and_clear_flag(hdev,
1979 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1982 if (conn_changed || discov_changed) {
1983 /* In case this was disabled through mgmt */
1984 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1986 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1987 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1989 mgmt_new_settings(hdev);
1993 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1995 struct hci_dev *hdev;
1996 struct hci_dev_req dr;
1999 if (copy_from_user(&dr, arg, sizeof(dr)))
2002 hdev = hci_dev_get(dr.dev_id);
2006 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2011 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2016 if (hdev->dev_type != HCI_PRIMARY) {
2021 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2028 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2029 HCI_INIT_TIMEOUT, NULL);
2033 if (!lmp_encrypt_capable(hdev)) {
2038 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2039 /* Auth must be enabled first */
2040 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2041 HCI_INIT_TIMEOUT, NULL);
2046 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2047 HCI_INIT_TIMEOUT, NULL);
2051 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2052 HCI_INIT_TIMEOUT, NULL);
2054 /* Ensure that the connectable and discoverable states
2055 * get correctly modified as this was a non-mgmt change.
2058 hci_update_scan_state(hdev, dr.dev_opt);
2062 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2063 HCI_INIT_TIMEOUT, NULL);
2066 case HCISETLINKMODE:
2067 hdev->link_mode = ((__u16) dr.dev_opt) &
2068 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2072 if (hdev->pkt_type == (__u16) dr.dev_opt)
2075 hdev->pkt_type = (__u16) dr.dev_opt;
2076 mgmt_phy_configuration_changed(hdev, NULL);
2080 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2081 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2085 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2086 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2099 int hci_get_dev_list(void __user *arg)
2101 struct hci_dev *hdev;
2102 struct hci_dev_list_req *dl;
2103 struct hci_dev_req *dr;
2104 int n = 0, size, err;
2107 if (get_user(dev_num, (__u16 __user *) arg))
2110 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2113 size = sizeof(*dl) + dev_num * sizeof(*dr);
2115 dl = kzalloc(size, GFP_KERNEL);
2121 read_lock(&hci_dev_list_lock);
2122 list_for_each_entry(hdev, &hci_dev_list, list) {
2123 unsigned long flags = hdev->flags;
2125 /* When the auto-off is configured it means the transport
2126 * is running, but in that case still indicate that the
2127 * device is actually down.
2129 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2130 flags &= ~BIT(HCI_UP);
2132 (dr + n)->dev_id = hdev->id;
2133 (dr + n)->dev_opt = flags;
2138 read_unlock(&hci_dev_list_lock);
2141 size = sizeof(*dl) + n * sizeof(*dr);
2143 err = copy_to_user(arg, dl, size);
2146 return err ? -EFAULT : 0;
2149 int hci_get_dev_info(void __user *arg)
2151 struct hci_dev *hdev;
2152 struct hci_dev_info di;
2153 unsigned long flags;
2156 if (copy_from_user(&di, arg, sizeof(di)))
2159 hdev = hci_dev_get(di.dev_id);
2163 /* When the auto-off is configured it means the transport
2164 * is running, but in that case still indicate that the
2165 * device is actually down.
2167 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2168 flags = hdev->flags & ~BIT(HCI_UP);
2170 flags = hdev->flags;
2172 strcpy(di.name, hdev->name);
2173 di.bdaddr = hdev->bdaddr;
2174 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2176 di.pkt_type = hdev->pkt_type;
2177 if (lmp_bredr_capable(hdev)) {
2178 di.acl_mtu = hdev->acl_mtu;
2179 di.acl_pkts = hdev->acl_pkts;
2180 di.sco_mtu = hdev->sco_mtu;
2181 di.sco_pkts = hdev->sco_pkts;
2183 di.acl_mtu = hdev->le_mtu;
2184 di.acl_pkts = hdev->le_pkts;
2188 di.link_policy = hdev->link_policy;
2189 di.link_mode = hdev->link_mode;
2191 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2192 memcpy(&di.features, &hdev->features, sizeof(di.features));
2194 if (copy_to_user(arg, &di, sizeof(di)))
2202 /* ---- Interface to HCI drivers ---- */
2204 static int hci_rfkill_set_block(void *data, bool blocked)
2206 struct hci_dev *hdev = data;
2208 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2210 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2214 hci_dev_set_flag(hdev, HCI_RFKILLED);
2215 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2216 !hci_dev_test_flag(hdev, HCI_CONFIG))
2217 hci_dev_do_close(hdev);
2219 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2225 static const struct rfkill_ops hci_rfkill_ops = {
2226 .set_block = hci_rfkill_set_block,
2229 static void hci_power_on(struct work_struct *work)
2231 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2234 BT_DBG("%s", hdev->name);
2236 if (test_bit(HCI_UP, &hdev->flags) &&
2237 hci_dev_test_flag(hdev, HCI_MGMT) &&
2238 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2239 cancel_delayed_work(&hdev->power_off);
2240 hci_req_sync_lock(hdev);
2241 err = __hci_req_hci_power_on(hdev);
2242 hci_req_sync_unlock(hdev);
2243 mgmt_power_on(hdev, err);
2247 err = hci_dev_do_open(hdev);
2250 mgmt_set_powered_failed(hdev, err);
2251 hci_dev_unlock(hdev);
2255 /* During the HCI setup phase, a few error conditions are
2256 * ignored and they need to be checked now. If they are still
2257 * valid, it is important to turn the device back off.
2259 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2260 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2261 (hdev->dev_type == HCI_PRIMARY &&
2262 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2264 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2265 hci_dev_do_close(hdev);
2266 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2267 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2268 HCI_AUTO_OFF_TIMEOUT);
2271 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2272 /* For unconfigured devices, set the HCI_RAW flag
2273 * so that userspace can easily identify them.
2275 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2276 set_bit(HCI_RAW, &hdev->flags);
2278 /* For fully configured devices, this will send
2279 * the Index Added event. For unconfigured devices,
2280 * it will send Unconfigued Index Added event.
2282 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2283 * and no event will be send.
2285 mgmt_index_added(hdev);
2286 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2287 /* When the controller is now configured, then it
2288 * is important to clear the HCI_RAW flag.
2290 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2291 clear_bit(HCI_RAW, &hdev->flags);
2293 /* Powering on the controller with HCI_CONFIG set only
2294 * happens with the transition from unconfigured to
2295 * configured. This will send the Index Added event.
2297 mgmt_index_added(hdev);
2301 static void hci_power_off(struct work_struct *work)
2303 struct hci_dev *hdev = container_of(work, struct hci_dev,
2306 BT_DBG("%s", hdev->name);
2308 hci_dev_do_close(hdev);
2311 static void hci_error_reset(struct work_struct *work)
2313 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2315 BT_DBG("%s", hdev->name);
2318 hdev->hw_error(hdev, hdev->hw_error_code);
2320 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2322 if (hci_dev_do_close(hdev))
2325 hci_dev_do_open(hdev);
2328 void hci_uuids_clear(struct hci_dev *hdev)
2330 struct bt_uuid *uuid, *tmp;
2332 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2333 list_del(&uuid->list);
2338 void hci_link_keys_clear(struct hci_dev *hdev)
2340 struct link_key *key;
2342 list_for_each_entry(key, &hdev->link_keys, list) {
2343 list_del_rcu(&key->list);
2344 kfree_rcu(key, rcu);
2348 void hci_smp_ltks_clear(struct hci_dev *hdev)
2352 list_for_each_entry(k, &hdev->long_term_keys, list) {
2353 list_del_rcu(&k->list);
2358 void hci_smp_irks_clear(struct hci_dev *hdev)
2362 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2363 list_del_rcu(&k->list);
2368 void hci_blocked_keys_clear(struct hci_dev *hdev)
2370 struct blocked_key *b;
2372 list_for_each_entry(b, &hdev->blocked_keys, list) {
2373 list_del_rcu(&b->list);
2378 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2380 bool blocked = false;
2381 struct blocked_key *b;
2384 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2385 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2395 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2400 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2401 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2404 if (hci_is_blocked_key(hdev,
2405 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2407 bt_dev_warn_ratelimited(hdev,
2408 "Link key blocked for %pMR",
2421 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2422 u8 key_type, u8 old_key_type)
2425 if (key_type < 0x03)
2428 /* Debug keys are insecure so don't store them persistently */
2429 if (key_type == HCI_LK_DEBUG_COMBINATION)
2432 /* Changed combination key and there's no previous one */
2433 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2436 /* Security mode 3 case */
2440 /* BR/EDR key derived using SC from an LE link */
2441 if (conn->type == LE_LINK)
2444 /* Neither local nor remote side had no-bonding as requirement */
2445 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2448 /* Local side had dedicated bonding as requirement */
2449 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2452 /* Remote side had dedicated bonding as requirement */
2453 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2456 /* If none of the above criteria match, then don't store the key
2461 static u8 ltk_role(u8 type)
2463 if (type == SMP_LTK)
2464 return HCI_ROLE_MASTER;
2466 return HCI_ROLE_SLAVE;
2469 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2470 u8 addr_type, u8 role)
2475 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2476 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2479 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2482 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2484 bt_dev_warn_ratelimited(hdev,
2485 "LTK blocked for %pMR",
2498 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2500 struct smp_irk *irk_to_return = NULL;
2501 struct smp_irk *irk;
2504 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2505 if (!bacmp(&irk->rpa, rpa)) {
2506 irk_to_return = irk;
2511 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2512 if (smp_irk_matches(hdev, irk->val, rpa)) {
2513 bacpy(&irk->rpa, rpa);
2514 irk_to_return = irk;
2520 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2521 irk_to_return->val)) {
2522 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2523 &irk_to_return->bdaddr);
2524 irk_to_return = NULL;
2529 return irk_to_return;
2532 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2535 struct smp_irk *irk_to_return = NULL;
2536 struct smp_irk *irk;
2538 /* Identity Address must be public or static random */
2539 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2543 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2544 if (addr_type == irk->addr_type &&
2545 bacmp(bdaddr, &irk->bdaddr) == 0) {
2546 irk_to_return = irk;
2553 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2554 irk_to_return->val)) {
2555 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2556 &irk_to_return->bdaddr);
2557 irk_to_return = NULL;
2562 return irk_to_return;
2565 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2566 bdaddr_t *bdaddr, u8 *val, u8 type,
2567 u8 pin_len, bool *persistent)
2569 struct link_key *key, *old_key;
2572 old_key = hci_find_link_key(hdev, bdaddr);
2574 old_key_type = old_key->type;
2577 old_key_type = conn ? conn->key_type : 0xff;
2578 key = kzalloc(sizeof(*key), GFP_KERNEL);
2581 list_add_rcu(&key->list, &hdev->link_keys);
2584 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2586 /* Some buggy controller combinations generate a changed
2587 * combination key for legacy pairing even when there's no
2589 if (type == HCI_LK_CHANGED_COMBINATION &&
2590 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2591 type = HCI_LK_COMBINATION;
2593 conn->key_type = type;
2596 bacpy(&key->bdaddr, bdaddr);
2597 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2598 key->pin_len = pin_len;
2600 if (type == HCI_LK_CHANGED_COMBINATION)
2601 key->type = old_key_type;
2606 *persistent = hci_persistent_key(hdev, conn, type,
2612 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2613 u8 addr_type, u8 type, u8 authenticated,
2614 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2616 struct smp_ltk *key, *old_key;
2617 u8 role = ltk_role(type);
2619 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2623 key = kzalloc(sizeof(*key), GFP_KERNEL);
2626 list_add_rcu(&key->list, &hdev->long_term_keys);
2629 bacpy(&key->bdaddr, bdaddr);
2630 key->bdaddr_type = addr_type;
2631 memcpy(key->val, tk, sizeof(key->val));
2632 key->authenticated = authenticated;
2635 key->enc_size = enc_size;
2641 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2642 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2644 struct smp_irk *irk;
2646 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2648 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2652 bacpy(&irk->bdaddr, bdaddr);
2653 irk->addr_type = addr_type;
2655 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2658 memcpy(irk->val, val, 16);
2659 bacpy(&irk->rpa, rpa);
2664 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2666 struct link_key *key;
2668 key = hci_find_link_key(hdev, bdaddr);
2672 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2674 list_del_rcu(&key->list);
2675 kfree_rcu(key, rcu);
2680 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2685 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2686 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2689 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2691 list_del_rcu(&k->list);
2696 return removed ? 0 : -ENOENT;
2699 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2703 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2704 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2707 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2709 list_del_rcu(&k->list);
2714 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2717 struct smp_irk *irk;
2720 if (type == BDADDR_BREDR) {
2721 if (hci_find_link_key(hdev, bdaddr))
2726 /* Convert to HCI addr type which struct smp_ltk uses */
2727 if (type == BDADDR_LE_PUBLIC)
2728 addr_type = ADDR_LE_DEV_PUBLIC;
2730 addr_type = ADDR_LE_DEV_RANDOM;
2732 irk = hci_get_irk(hdev, bdaddr, addr_type);
2734 bdaddr = &irk->bdaddr;
2735 addr_type = irk->addr_type;
2739 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2740 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2750 /* HCI command timer function */
2751 static void hci_cmd_timeout(struct work_struct *work)
2753 struct hci_dev *hdev = container_of(work, struct hci_dev,
2756 if (hdev->sent_cmd) {
2757 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2758 u16 opcode = __le16_to_cpu(sent->opcode);
2760 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2762 bt_dev_err(hdev, "command tx timeout");
2765 if (hdev->cmd_timeout)
2766 hdev->cmd_timeout(hdev);
2768 atomic_set(&hdev->cmd_cnt, 1);
2769 queue_work(hdev->workqueue, &hdev->cmd_work);
2772 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2773 bdaddr_t *bdaddr, u8 bdaddr_type)
2775 struct oob_data *data;
2777 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2778 if (bacmp(bdaddr, &data->bdaddr) != 0)
2780 if (data->bdaddr_type != bdaddr_type)
2788 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2791 struct oob_data *data;
2793 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2797 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2799 list_del(&data->list);
2805 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2807 struct oob_data *data, *n;
2809 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2810 list_del(&data->list);
2815 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2816 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2817 u8 *hash256, u8 *rand256)
2819 struct oob_data *data;
2821 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2823 data = kmalloc(sizeof(*data), GFP_KERNEL);
2827 bacpy(&data->bdaddr, bdaddr);
2828 data->bdaddr_type = bdaddr_type;
2829 list_add(&data->list, &hdev->remote_oob_data);
2832 if (hash192 && rand192) {
2833 memcpy(data->hash192, hash192, sizeof(data->hash192));
2834 memcpy(data->rand192, rand192, sizeof(data->rand192));
2835 if (hash256 && rand256)
2836 data->present = 0x03;
2838 memset(data->hash192, 0, sizeof(data->hash192));
2839 memset(data->rand192, 0, sizeof(data->rand192));
2840 if (hash256 && rand256)
2841 data->present = 0x02;
2843 data->present = 0x00;
2846 if (hash256 && rand256) {
2847 memcpy(data->hash256, hash256, sizeof(data->hash256));
2848 memcpy(data->rand256, rand256, sizeof(data->rand256));
2850 memset(data->hash256, 0, sizeof(data->hash256));
2851 memset(data->rand256, 0, sizeof(data->rand256));
2852 if (hash192 && rand192)
2853 data->present = 0x01;
2856 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2861 /* This function requires the caller holds hdev->lock */
2862 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2864 struct adv_info *adv_instance;
2866 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2867 if (adv_instance->instance == instance)
2868 return adv_instance;
2874 /* This function requires the caller holds hdev->lock */
2875 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2877 struct adv_info *cur_instance;
2879 cur_instance = hci_find_adv_instance(hdev, instance);
2883 if (cur_instance == list_last_entry(&hdev->adv_instances,
2884 struct adv_info, list))
2885 return list_first_entry(&hdev->adv_instances,
2886 struct adv_info, list);
2888 return list_next_entry(cur_instance, list);
2891 /* This function requires the caller holds hdev->lock */
2892 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2894 struct adv_info *adv_instance;
2896 adv_instance = hci_find_adv_instance(hdev, instance);
2900 BT_DBG("%s removing %dMR", hdev->name, instance);
2902 if (hdev->cur_adv_instance == instance) {
2903 if (hdev->adv_instance_timeout) {
2904 cancel_delayed_work(&hdev->adv_instance_expire);
2905 hdev->adv_instance_timeout = 0;
2907 hdev->cur_adv_instance = 0x00;
2910 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2912 list_del(&adv_instance->list);
2913 kfree(adv_instance);
2915 hdev->adv_instance_cnt--;
2920 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2922 struct adv_info *adv_instance, *n;
2924 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2925 adv_instance->rpa_expired = rpa_expired;
2928 /* This function requires the caller holds hdev->lock */
2929 void hci_adv_instances_clear(struct hci_dev *hdev)
2931 struct adv_info *adv_instance, *n;
2933 if (hdev->adv_instance_timeout) {
2934 cancel_delayed_work(&hdev->adv_instance_expire);
2935 hdev->adv_instance_timeout = 0;
2938 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2939 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2940 list_del(&adv_instance->list);
2941 kfree(adv_instance);
2944 hdev->adv_instance_cnt = 0;
2945 hdev->cur_adv_instance = 0x00;
2948 static void adv_instance_rpa_expired(struct work_struct *work)
2950 struct adv_info *adv_instance = container_of(work, struct adv_info,
2951 rpa_expired_cb.work);
2955 adv_instance->rpa_expired = true;
2958 /* This function requires the caller holds hdev->lock */
2959 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2960 u16 adv_data_len, u8 *adv_data,
2961 u16 scan_rsp_len, u8 *scan_rsp_data,
2962 u16 timeout, u16 duration, s8 tx_power,
2963 u32 min_interval, u32 max_interval)
2965 struct adv_info *adv_instance;
2967 adv_instance = hci_find_adv_instance(hdev, instance);
2969 memset(adv_instance->adv_data, 0,
2970 sizeof(adv_instance->adv_data));
2971 memset(adv_instance->scan_rsp_data, 0,
2972 sizeof(adv_instance->scan_rsp_data));
2974 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2975 instance < 1 || instance > hdev->le_num_of_adv_sets)
2978 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2982 adv_instance->pending = true;
2983 adv_instance->instance = instance;
2984 list_add(&adv_instance->list, &hdev->adv_instances);
2985 hdev->adv_instance_cnt++;
2988 adv_instance->flags = flags;
2989 adv_instance->adv_data_len = adv_data_len;
2990 adv_instance->scan_rsp_len = scan_rsp_len;
2991 adv_instance->min_interval = min_interval;
2992 adv_instance->max_interval = max_interval;
2993 adv_instance->tx_power = tx_power;
2996 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2999 memcpy(adv_instance->scan_rsp_data,
3000 scan_rsp_data, scan_rsp_len);
3002 adv_instance->timeout = timeout;
3003 adv_instance->remaining_time = timeout;
3006 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3008 adv_instance->duration = duration;
3010 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3011 adv_instance_rpa_expired);
3013 BT_DBG("%s for %dMR", hdev->name, instance);
3018 /* This function requires the caller holds hdev->lock */
3019 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3020 u16 adv_data_len, u8 *adv_data,
3021 u16 scan_rsp_len, u8 *scan_rsp_data)
3023 struct adv_info *adv_instance;
3025 adv_instance = hci_find_adv_instance(hdev, instance);
3027 /* If advertisement doesn't exist, we can't modify its data */
3032 memset(adv_instance->adv_data, 0,
3033 sizeof(adv_instance->adv_data));
3034 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3035 adv_instance->adv_data_len = adv_data_len;
3039 memset(adv_instance->scan_rsp_data, 0,
3040 sizeof(adv_instance->scan_rsp_data));
3041 memcpy(adv_instance->scan_rsp_data,
3042 scan_rsp_data, scan_rsp_len);
3043 adv_instance->scan_rsp_len = scan_rsp_len;
3049 /* This function requires the caller holds hdev->lock */
3050 void hci_adv_monitors_clear(struct hci_dev *hdev)
3052 struct adv_monitor *monitor;
3055 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3056 hci_free_adv_monitor(hdev, monitor);
3058 idr_destroy(&hdev->adv_monitors_idr);
3061 /* Frees the monitor structure and do some bookkeepings.
3062 * This function requires the caller holds hdev->lock.
3064 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3066 struct adv_pattern *pattern;
3067 struct adv_pattern *tmp;
3072 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3073 list_del(&pattern->list);
3077 if (monitor->handle)
3078 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3080 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3081 hdev->adv_monitors_cnt--;
3082 mgmt_adv_monitor_removed(hdev, monitor->handle);
3088 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3090 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3093 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3095 return mgmt_remove_adv_monitor_complete(hdev, status);
3098 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3099 * also attempts to forward the request to the controller.
3100 * Returns true if request is forwarded (result is pending), false otherwise.
3101 * This function requires the caller holds hdev->lock.
3103 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3106 int min, max, handle;
3115 min = HCI_MIN_ADV_MONITOR_HANDLE;
3116 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3117 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3124 monitor->handle = handle;
3126 if (!hdev_is_powered(hdev))
3129 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3130 case HCI_ADV_MONITOR_EXT_NONE:
3131 hci_update_background_scan(hdev);
3132 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3133 /* Message was not forwarded to controller - not an error */
3135 case HCI_ADV_MONITOR_EXT_MSFT:
3136 *err = msft_add_monitor_pattern(hdev, monitor);
3137 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3145 /* Attempts to tell the controller and free the monitor. If somehow the
3146 * controller doesn't have a corresponding handle, remove anyway.
3147 * Returns true if request is forwarded (result is pending), false otherwise.
3148 * This function requires the caller holds hdev->lock.
3150 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3151 struct adv_monitor *monitor,
3152 u16 handle, int *err)
3156 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3157 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3159 case HCI_ADV_MONITOR_EXT_MSFT:
3160 *err = msft_remove_monitor(hdev, monitor, handle);
3164 /* In case no matching handle registered, just free the monitor */
3165 if (*err == -ENOENT)
3171 if (*err == -ENOENT)
3172 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3174 hci_free_adv_monitor(hdev, monitor);
3180 /* Returns true if request is forwarded (result is pending), false otherwise.
3181 * This function requires the caller holds hdev->lock.
3183 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3185 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3193 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3194 if (!*err && !pending)
3195 hci_update_background_scan(hdev);
3197 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3198 hdev->name, handle, *err, pending ? "" : "not ");
3203 /* Returns true if request is forwarded (result is pending), false otherwise.
3204 * This function requires the caller holds hdev->lock.
3206 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3208 struct adv_monitor *monitor;
3209 int idr_next_id = 0;
3210 bool pending = false;
3211 bool update = false;
3215 while (!*err && !pending) {
3216 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3220 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3222 if (!*err && !pending)
3227 hci_update_background_scan(hdev);
3229 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3230 hdev->name, *err, pending ? "" : "not ");
3235 /* This function requires the caller holds hdev->lock */
3236 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3238 return !idr_is_empty(&hdev->adv_monitors_idr);
3241 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3243 if (msft_monitor_supported(hdev))
3244 return HCI_ADV_MONITOR_EXT_MSFT;
3246 return HCI_ADV_MONITOR_EXT_NONE;
3249 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3250 bdaddr_t *bdaddr, u8 type)
3252 struct bdaddr_list *b;
3254 list_for_each_entry(b, bdaddr_list, list) {
3255 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3262 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3263 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3266 struct bdaddr_list_with_irk *b;
3268 list_for_each_entry(b, bdaddr_list, list) {
3269 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3276 struct bdaddr_list_with_flags *
3277 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3278 bdaddr_t *bdaddr, u8 type)
3280 struct bdaddr_list_with_flags *b;
3282 list_for_each_entry(b, bdaddr_list, list) {
3283 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3290 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3292 struct bdaddr_list *b, *n;
3294 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3300 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3302 struct bdaddr_list *entry;
3304 if (!bacmp(bdaddr, BDADDR_ANY))
3307 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3310 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3314 bacpy(&entry->bdaddr, bdaddr);
3315 entry->bdaddr_type = type;
3317 list_add(&entry->list, list);
3322 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3323 u8 type, u8 *peer_irk, u8 *local_irk)
3325 struct bdaddr_list_with_irk *entry;
3327 if (!bacmp(bdaddr, BDADDR_ANY))
3330 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3333 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3337 bacpy(&entry->bdaddr, bdaddr);
3338 entry->bdaddr_type = type;
3341 memcpy(entry->peer_irk, peer_irk, 16);
3344 memcpy(entry->local_irk, local_irk, 16);
3346 list_add(&entry->list, list);
3351 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3354 struct bdaddr_list_with_flags *entry;
3356 if (!bacmp(bdaddr, BDADDR_ANY))
3359 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3362 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3366 bacpy(&entry->bdaddr, bdaddr);
3367 entry->bdaddr_type = type;
3368 entry->current_flags = flags;
3370 list_add(&entry->list, list);
3375 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3377 struct bdaddr_list *entry;
3379 if (!bacmp(bdaddr, BDADDR_ANY)) {
3380 hci_bdaddr_list_clear(list);
3384 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3388 list_del(&entry->list);
3394 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3397 struct bdaddr_list_with_irk *entry;
3399 if (!bacmp(bdaddr, BDADDR_ANY)) {
3400 hci_bdaddr_list_clear(list);
3404 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3408 list_del(&entry->list);
3414 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3417 struct bdaddr_list_with_flags *entry;
3419 if (!bacmp(bdaddr, BDADDR_ANY)) {
3420 hci_bdaddr_list_clear(list);
3424 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3428 list_del(&entry->list);
3434 /* This function requires the caller holds hdev->lock */
3435 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3436 bdaddr_t *addr, u8 addr_type)
3438 struct hci_conn_params *params;
3440 list_for_each_entry(params, &hdev->le_conn_params, list) {
3441 if (bacmp(¶ms->addr, addr) == 0 &&
3442 params->addr_type == addr_type) {
3450 /* This function requires the caller holds hdev->lock */
3451 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3452 bdaddr_t *addr, u8 addr_type)
3454 struct hci_conn_params *param;
3456 switch (addr_type) {
3457 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3458 addr_type = ADDR_LE_DEV_PUBLIC;
3460 case ADDR_LE_DEV_RANDOM_RESOLVED:
3461 addr_type = ADDR_LE_DEV_RANDOM;
3465 list_for_each_entry(param, list, action) {
3466 if (bacmp(¶m->addr, addr) == 0 &&
3467 param->addr_type == addr_type)
3474 /* This function requires the caller holds hdev->lock */
3475 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3476 bdaddr_t *addr, u8 addr_type)
3478 struct hci_conn_params *params;
3480 params = hci_conn_params_lookup(hdev, addr, addr_type);
3484 params = kzalloc(sizeof(*params), GFP_KERNEL);
3486 bt_dev_err(hdev, "out of memory");
3490 bacpy(¶ms->addr, addr);
3491 params->addr_type = addr_type;
3493 list_add(¶ms->list, &hdev->le_conn_params);
3494 INIT_LIST_HEAD(¶ms->action);
3496 params->conn_min_interval = hdev->le_conn_min_interval;
3497 params->conn_max_interval = hdev->le_conn_max_interval;
3498 params->conn_latency = hdev->le_conn_latency;
3499 params->supervision_timeout = hdev->le_supv_timeout;
3500 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3502 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3507 static void hci_conn_params_free(struct hci_conn_params *params)
3510 hci_conn_drop(params->conn);
3511 hci_conn_put(params->conn);
3514 list_del(¶ms->action);
3515 list_del(¶ms->list);
3519 /* This function requires the caller holds hdev->lock */
3520 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3522 struct hci_conn_params *params;
3524 params = hci_conn_params_lookup(hdev, addr, addr_type);
3528 hci_conn_params_free(params);
3530 hci_update_background_scan(hdev);
3532 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3535 /* This function requires the caller holds hdev->lock */
3536 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3538 struct hci_conn_params *params, *tmp;
3540 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3541 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3544 /* If trying to estabilish one time connection to disabled
3545 * device, leave the params, but mark them as just once.
3547 if (params->explicit_connect) {
3548 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3552 list_del(¶ms->list);
3556 BT_DBG("All LE disabled connection parameters were removed");
3559 /* This function requires the caller holds hdev->lock */
3560 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3562 struct hci_conn_params *params, *tmp;
3564 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3565 hci_conn_params_free(params);
3567 BT_DBG("All LE connection parameters were removed");
3570 /* Copy the Identity Address of the controller.
3572 * If the controller has a public BD_ADDR, then by default use that one.
3573 * If this is a LE only controller without a public address, default to
3574 * the static random address.
3576 * For debugging purposes it is possible to force controllers with a
3577 * public address to use the static random address instead.
3579 * In case BR/EDR has been disabled on a dual-mode controller and
3580 * userspace has configured a static address, then that address
3581 * becomes the identity address instead of the public BR/EDR address.
3583 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3586 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3587 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3588 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3589 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3590 bacpy(bdaddr, &hdev->static_addr);
3591 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3593 bacpy(bdaddr, &hdev->bdaddr);
3594 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3598 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3602 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3603 clear_bit(i, hdev->suspend_tasks);
3605 wake_up(&hdev->suspend_wait_q);
3608 static int hci_suspend_wait_event(struct hci_dev *hdev)
3611 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3612 __SUSPEND_NUM_TASKS)
3615 int ret = wait_event_timeout(hdev->suspend_wait_q,
3616 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3619 bt_dev_err(hdev, "Timed out waiting for suspend events");
3620 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3621 if (test_bit(i, hdev->suspend_tasks))
3622 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3623 clear_bit(i, hdev->suspend_tasks);
3634 static void hci_prepare_suspend(struct work_struct *work)
3636 struct hci_dev *hdev =
3637 container_of(work, struct hci_dev, suspend_prepare);
3640 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3641 hci_dev_unlock(hdev);
3644 static int hci_change_suspend_state(struct hci_dev *hdev,
3645 enum suspended_state next)
3647 hdev->suspend_state_next = next;
3648 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3649 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3650 return hci_suspend_wait_event(hdev);
3653 static void hci_clear_wake_reason(struct hci_dev *hdev)
3657 hdev->wake_reason = 0;
3658 bacpy(&hdev->wake_addr, BDADDR_ANY);
3659 hdev->wake_addr_type = 0;
3661 hci_dev_unlock(hdev);
3664 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3667 struct hci_dev *hdev =
3668 container_of(nb, struct hci_dev, suspend_notifier);
3670 u8 state = BT_RUNNING;
3672 /* If powering down, wait for completion. */
3673 if (mgmt_powering_down(hdev)) {
3674 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3675 ret = hci_suspend_wait_event(hdev);
3680 /* Suspend notifier should only act on events when powered. */
3681 if (!hdev_is_powered(hdev) ||
3682 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3685 if (action == PM_SUSPEND_PREPARE) {
3686 /* Suspend consists of two actions:
3687 * - First, disconnect everything and make the controller not
3688 * connectable (disabling scanning)
3689 * - Second, program event filter/whitelist and enable scan
3691 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3693 state = BT_SUSPEND_DISCONNECT;
3695 /* Only configure whitelist if disconnect succeeded and wake
3696 * isn't being prevented.
3698 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3699 ret = hci_change_suspend_state(hdev,
3700 BT_SUSPEND_CONFIGURE_WAKE);
3702 state = BT_SUSPEND_CONFIGURE_WAKE;
3705 hci_clear_wake_reason(hdev);
3706 mgmt_suspending(hdev, state);
3708 } else if (action == PM_POST_SUSPEND) {
3709 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3711 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3712 hdev->wake_addr_type);
3716 /* We always allow suspend even if suspend preparation failed and
3717 * attempt to recover in resume.
3720 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3726 /* Alloc HCI device */
3727 struct hci_dev *hci_alloc_dev(void)
3729 struct hci_dev *hdev;
3731 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3735 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3736 hdev->esco_type = (ESCO_HV1);
3737 hdev->link_mode = (HCI_LM_ACCEPT);
3738 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3739 hdev->io_capability = 0x03; /* No Input No Output */
3740 hdev->manufacturer = 0xffff; /* Default to internal use */
3741 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3742 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3743 hdev->adv_instance_cnt = 0;
3744 hdev->cur_adv_instance = 0x00;
3745 hdev->adv_instance_timeout = 0;
3747 hdev->advmon_allowlist_duration = 300;
3748 hdev->advmon_no_filter_duration = 500;
3749 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
3751 hdev->sniff_max_interval = 800;
3752 hdev->sniff_min_interval = 80;
3754 hdev->le_adv_channel_map = 0x07;
3755 hdev->le_adv_min_interval = 0x0800;
3756 hdev->le_adv_max_interval = 0x0800;
3757 hdev->le_scan_interval = 0x0060;
3758 hdev->le_scan_window = 0x0030;
3759 hdev->le_scan_int_suspend = 0x0400;
3760 hdev->le_scan_window_suspend = 0x0012;
3761 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3762 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3763 hdev->le_scan_int_connect = 0x0060;
3764 hdev->le_scan_window_connect = 0x0060;
3765 hdev->le_conn_min_interval = 0x0018;
3766 hdev->le_conn_max_interval = 0x0028;
3767 hdev->le_conn_latency = 0x0000;
3768 hdev->le_supv_timeout = 0x002a;
3769 hdev->le_def_tx_len = 0x001b;
3770 hdev->le_def_tx_time = 0x0148;
3771 hdev->le_max_tx_len = 0x001b;
3772 hdev->le_max_tx_time = 0x0148;
3773 hdev->le_max_rx_len = 0x001b;
3774 hdev->le_max_rx_time = 0x0148;
3775 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3776 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3777 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3778 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3779 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3780 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3781 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3782 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3783 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3785 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3786 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3787 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3788 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3789 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3790 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3792 /* default 1.28 sec page scan */
3793 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3794 hdev->def_page_scan_int = 0x0800;
3795 hdev->def_page_scan_window = 0x0012;
3797 mutex_init(&hdev->lock);
3798 mutex_init(&hdev->req_lock);
3800 INIT_LIST_HEAD(&hdev->mgmt_pending);
3801 INIT_LIST_HEAD(&hdev->blacklist);
3802 INIT_LIST_HEAD(&hdev->whitelist);
3803 INIT_LIST_HEAD(&hdev->uuids);
3804 INIT_LIST_HEAD(&hdev->link_keys);
3805 INIT_LIST_HEAD(&hdev->long_term_keys);
3806 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3807 INIT_LIST_HEAD(&hdev->remote_oob_data);
3808 INIT_LIST_HEAD(&hdev->le_white_list);
3809 INIT_LIST_HEAD(&hdev->le_resolv_list);
3810 INIT_LIST_HEAD(&hdev->le_conn_params);
3811 INIT_LIST_HEAD(&hdev->pend_le_conns);
3812 INIT_LIST_HEAD(&hdev->pend_le_reports);
3813 INIT_LIST_HEAD(&hdev->conn_hash.list);
3814 INIT_LIST_HEAD(&hdev->adv_instances);
3815 INIT_LIST_HEAD(&hdev->blocked_keys);
3817 INIT_WORK(&hdev->rx_work, hci_rx_work);
3818 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3819 INIT_WORK(&hdev->tx_work, hci_tx_work);
3820 INIT_WORK(&hdev->power_on, hci_power_on);
3821 INIT_WORK(&hdev->error_reset, hci_error_reset);
3822 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3824 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3826 skb_queue_head_init(&hdev->rx_q);
3827 skb_queue_head_init(&hdev->cmd_q);
3828 skb_queue_head_init(&hdev->raw_q);
3830 init_waitqueue_head(&hdev->req_wait_q);
3831 init_waitqueue_head(&hdev->suspend_wait_q);
3833 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3835 hci_request_setup(hdev);
3837 hci_init_sysfs(hdev);
3838 discovery_init(hdev);
3842 EXPORT_SYMBOL(hci_alloc_dev);
3844 /* Free HCI device */
3845 void hci_free_dev(struct hci_dev *hdev)
3847 /* will free via device release */
3848 put_device(&hdev->dev);
3850 EXPORT_SYMBOL(hci_free_dev);
3852 /* Register HCI device */
3853 int hci_register_dev(struct hci_dev *hdev)
3857 if (!hdev->open || !hdev->close || !hdev->send)
3860 /* Do not allow HCI_AMP devices to register at index 0,
3861 * so the index can be used as the AMP controller ID.
3863 switch (hdev->dev_type) {
3865 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3868 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3877 sprintf(hdev->name, "hci%d", id);
3880 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3882 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3883 if (!hdev->workqueue) {
3888 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3890 if (!hdev->req_workqueue) {
3891 destroy_workqueue(hdev->workqueue);
3896 if (!IS_ERR_OR_NULL(bt_debugfs))
3897 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3899 dev_set_name(&hdev->dev, "%s", hdev->name);
3901 error = device_add(&hdev->dev);
3905 hci_leds_init(hdev);
3907 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3908 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3911 if (rfkill_register(hdev->rfkill) < 0) {
3912 rfkill_destroy(hdev->rfkill);
3913 hdev->rfkill = NULL;
3917 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3918 hci_dev_set_flag(hdev, HCI_RFKILLED);
3920 hci_dev_set_flag(hdev, HCI_SETUP);
3921 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3923 if (hdev->dev_type == HCI_PRIMARY) {
3924 /* Assume BR/EDR support until proven otherwise (such as
3925 * through reading supported features during init.
3927 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3930 write_lock(&hci_dev_list_lock);
3931 list_add(&hdev->list, &hci_dev_list);
3932 write_unlock(&hci_dev_list_lock);
3934 /* Devices that are marked for raw-only usage are unconfigured
3935 * and should not be included in normal operation.
3937 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3938 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3940 hci_sock_dev_event(hdev, HCI_DEV_REG);
3943 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3944 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3945 error = register_pm_notifier(&hdev->suspend_notifier);
3950 queue_work(hdev->req_workqueue, &hdev->power_on);
3952 idr_init(&hdev->adv_monitors_idr);
3957 destroy_workqueue(hdev->workqueue);
3958 destroy_workqueue(hdev->req_workqueue);
3960 ida_simple_remove(&hci_index_ida, hdev->id);
3964 EXPORT_SYMBOL(hci_register_dev);
3966 /* Unregister HCI device */
3967 void hci_unregister_dev(struct hci_dev *hdev)
3971 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3973 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3977 write_lock(&hci_dev_list_lock);
3978 list_del(&hdev->list);
3979 write_unlock(&hci_dev_list_lock);
3981 cancel_work_sync(&hdev->power_on);
3983 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3984 hci_suspend_clear_tasks(hdev);
3985 unregister_pm_notifier(&hdev->suspend_notifier);
3986 cancel_work_sync(&hdev->suspend_prepare);
3989 hci_dev_do_close(hdev);
3991 if (!test_bit(HCI_INIT, &hdev->flags) &&
3992 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3993 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3995 mgmt_index_removed(hdev);
3996 hci_dev_unlock(hdev);
3999 /* mgmt_index_removed should take care of emptying the
4001 BUG_ON(!list_empty(&hdev->mgmt_pending));
4003 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4006 rfkill_unregister(hdev->rfkill);
4007 rfkill_destroy(hdev->rfkill);
4010 device_del(&hdev->dev);
4012 debugfs_remove_recursive(hdev->debugfs);
4013 kfree_const(hdev->hw_info);
4014 kfree_const(hdev->fw_info);
4016 destroy_workqueue(hdev->workqueue);
4017 destroy_workqueue(hdev->req_workqueue);
4020 hci_bdaddr_list_clear(&hdev->blacklist);
4021 hci_bdaddr_list_clear(&hdev->whitelist);
4022 hci_uuids_clear(hdev);
4023 hci_link_keys_clear(hdev);
4024 hci_smp_ltks_clear(hdev);
4025 hci_smp_irks_clear(hdev);
4026 hci_remote_oob_data_clear(hdev);
4027 hci_adv_instances_clear(hdev);
4028 hci_adv_monitors_clear(hdev);
4029 hci_bdaddr_list_clear(&hdev->le_white_list);
4030 hci_bdaddr_list_clear(&hdev->le_resolv_list);
4031 hci_conn_params_clear_all(hdev);
4032 hci_discovery_filter_clear(hdev);
4033 hci_blocked_keys_clear(hdev);
4034 hci_dev_unlock(hdev);
4038 ida_simple_remove(&hci_index_ida, id);
4040 EXPORT_SYMBOL(hci_unregister_dev);
4042 /* Suspend HCI device */
4043 int hci_suspend_dev(struct hci_dev *hdev)
4045 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4048 EXPORT_SYMBOL(hci_suspend_dev);
4050 /* Resume HCI device */
4051 int hci_resume_dev(struct hci_dev *hdev)
4053 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4056 EXPORT_SYMBOL(hci_resume_dev);
4058 /* Reset HCI device */
4059 int hci_reset_dev(struct hci_dev *hdev)
4061 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4062 struct sk_buff *skb;
4064 skb = bt_skb_alloc(3, GFP_ATOMIC);
4068 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4069 skb_put_data(skb, hw_err, 3);
4071 /* Send Hardware Error to upper stack */
4072 return hci_recv_frame(hdev, skb);
4074 EXPORT_SYMBOL(hci_reset_dev);
4076 /* Receive frame from HCI drivers */
4077 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4079 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4080 && !test_bit(HCI_INIT, &hdev->flags))) {
4085 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4086 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4087 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4088 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4094 bt_cb(skb)->incoming = 1;
4097 __net_timestamp(skb);
4099 skb_queue_tail(&hdev->rx_q, skb);
4100 queue_work(hdev->workqueue, &hdev->rx_work);
4104 EXPORT_SYMBOL(hci_recv_frame);
4106 /* Receive diagnostic message from HCI drivers */
4107 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4109 /* Mark as diagnostic packet */
4110 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4113 __net_timestamp(skb);
4115 skb_queue_tail(&hdev->rx_q, skb);
4116 queue_work(hdev->workqueue, &hdev->rx_work);
4120 EXPORT_SYMBOL(hci_recv_diag);
4122 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4126 va_start(vargs, fmt);
4127 kfree_const(hdev->hw_info);
4128 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4131 EXPORT_SYMBOL(hci_set_hw_info);
4133 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4137 va_start(vargs, fmt);
4138 kfree_const(hdev->fw_info);
4139 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4142 EXPORT_SYMBOL(hci_set_fw_info);
4144 /* ---- Interface to upper protocols ---- */
4146 int hci_register_cb(struct hci_cb *cb)
4148 BT_DBG("%p name %s", cb, cb->name);
4150 mutex_lock(&hci_cb_list_lock);
4151 list_add_tail(&cb->list, &hci_cb_list);
4152 mutex_unlock(&hci_cb_list_lock);
4156 EXPORT_SYMBOL(hci_register_cb);
4158 int hci_unregister_cb(struct hci_cb *cb)
4160 BT_DBG("%p name %s", cb, cb->name);
4162 mutex_lock(&hci_cb_list_lock);
4163 list_del(&cb->list);
4164 mutex_unlock(&hci_cb_list_lock);
4168 EXPORT_SYMBOL(hci_unregister_cb);
4170 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4174 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4178 __net_timestamp(skb);
4180 /* Send copy to monitor */
4181 hci_send_to_monitor(hdev, skb);
4183 if (atomic_read(&hdev->promisc)) {
4184 /* Send copy to the sockets */
4185 hci_send_to_sock(hdev, skb);
4188 /* Get rid of skb owner, prior to sending to the driver. */
4191 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4196 err = hdev->send(hdev, skb);
4198 bt_dev_err(hdev, "sending frame failed (%d)", err);
4203 /* Send HCI command */
4204 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4207 struct sk_buff *skb;
4209 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4211 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4213 bt_dev_err(hdev, "no memory for command");
4217 /* Stand-alone HCI commands must be flagged as
4218 * single-command requests.
4220 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4222 skb_queue_tail(&hdev->cmd_q, skb);
4223 queue_work(hdev->workqueue, &hdev->cmd_work);
4228 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4231 struct sk_buff *skb;
4233 if (hci_opcode_ogf(opcode) != 0x3f) {
4234 /* A controller receiving a command shall respond with either
4235 * a Command Status Event or a Command Complete Event.
4236 * Therefore, all standard HCI commands must be sent via the
4237 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4238 * Some vendors do not comply with this rule for vendor-specific
4239 * commands and do not return any event. We want to support
4240 * unresponded commands for such cases only.
4242 bt_dev_err(hdev, "unresponded command not supported");
4246 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4248 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4253 hci_send_frame(hdev, skb);
4257 EXPORT_SYMBOL(__hci_cmd_send);
4259 /* Get data from the previously sent command */
4260 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4262 struct hci_command_hdr *hdr;
4264 if (!hdev->sent_cmd)
4267 hdr = (void *) hdev->sent_cmd->data;
4269 if (hdr->opcode != cpu_to_le16(opcode))
4272 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4274 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4277 /* Send HCI command and wait for command commplete event */
4278 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4279 const void *param, u32 timeout)
4281 struct sk_buff *skb;
4283 if (!test_bit(HCI_UP, &hdev->flags))
4284 return ERR_PTR(-ENETDOWN);
4286 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4288 hci_req_sync_lock(hdev);
4289 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4290 hci_req_sync_unlock(hdev);
4294 EXPORT_SYMBOL(hci_cmd_sync);
4297 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4299 struct hci_acl_hdr *hdr;
4302 skb_push(skb, HCI_ACL_HDR_SIZE);
4303 skb_reset_transport_header(skb);
4304 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4305 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4306 hdr->dlen = cpu_to_le16(len);
4309 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4310 struct sk_buff *skb, __u16 flags)
4312 struct hci_conn *conn = chan->conn;
4313 struct hci_dev *hdev = conn->hdev;
4314 struct sk_buff *list;
4316 skb->len = skb_headlen(skb);
4319 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4321 switch (hdev->dev_type) {
4323 hci_add_acl_hdr(skb, conn->handle, flags);
4326 hci_add_acl_hdr(skb, chan->handle, flags);
4329 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4333 list = skb_shinfo(skb)->frag_list;
4335 /* Non fragmented */
4336 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4338 skb_queue_tail(queue, skb);
4341 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4343 skb_shinfo(skb)->frag_list = NULL;
4345 /* Queue all fragments atomically. We need to use spin_lock_bh
4346 * here because of 6LoWPAN links, as there this function is
4347 * called from softirq and using normal spin lock could cause
4350 spin_lock_bh(&queue->lock);
4352 __skb_queue_tail(queue, skb);
4354 flags &= ~ACL_START;
4357 skb = list; list = list->next;
4359 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4360 hci_add_acl_hdr(skb, conn->handle, flags);
4362 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4364 __skb_queue_tail(queue, skb);
4367 spin_unlock_bh(&queue->lock);
4371 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4373 struct hci_dev *hdev = chan->conn->hdev;
4375 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4377 hci_queue_acl(chan, &chan->data_q, skb, flags);
4379 queue_work(hdev->workqueue, &hdev->tx_work);
4383 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4385 struct hci_dev *hdev = conn->hdev;
4386 struct hci_sco_hdr hdr;
4388 BT_DBG("%s len %d", hdev->name, skb->len);
4390 hdr.handle = cpu_to_le16(conn->handle);
4391 hdr.dlen = skb->len;
4393 skb_push(skb, HCI_SCO_HDR_SIZE);
4394 skb_reset_transport_header(skb);
4395 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4397 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4399 skb_queue_tail(&conn->data_q, skb);
4400 queue_work(hdev->workqueue, &hdev->tx_work);
4403 /* ---- HCI TX task (outgoing data) ---- */
4405 /* HCI Connection scheduler */
4406 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4409 struct hci_conn_hash *h = &hdev->conn_hash;
4410 struct hci_conn *conn = NULL, *c;
4411 unsigned int num = 0, min = ~0;
4413 /* We don't have to lock device here. Connections are always
4414 * added and removed with TX task disabled. */
4418 list_for_each_entry_rcu(c, &h->list, list) {
4419 if (c->type != type || skb_queue_empty(&c->data_q))
4422 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4427 if (c->sent < min) {
4432 if (hci_conn_num(hdev, type) == num)
4441 switch (conn->type) {
4443 cnt = hdev->acl_cnt;
4447 cnt = hdev->sco_cnt;
4450 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4454 bt_dev_err(hdev, "unknown link type %d", conn->type);
4462 BT_DBG("conn %p quote %d", conn, *quote);
4466 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4468 struct hci_conn_hash *h = &hdev->conn_hash;
4471 bt_dev_err(hdev, "link tx timeout");
4475 /* Kill stalled connections */
4476 list_for_each_entry_rcu(c, &h->list, list) {
4477 if (c->type == type && c->sent) {
4478 bt_dev_err(hdev, "killing stalled connection %pMR",
4480 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4487 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4490 struct hci_conn_hash *h = &hdev->conn_hash;
4491 struct hci_chan *chan = NULL;
4492 unsigned int num = 0, min = ~0, cur_prio = 0;
4493 struct hci_conn *conn;
4494 int cnt, q, conn_num = 0;
4496 BT_DBG("%s", hdev->name);
4500 list_for_each_entry_rcu(conn, &h->list, list) {
4501 struct hci_chan *tmp;
4503 if (conn->type != type)
4506 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4511 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4512 struct sk_buff *skb;
4514 if (skb_queue_empty(&tmp->data_q))
4517 skb = skb_peek(&tmp->data_q);
4518 if (skb->priority < cur_prio)
4521 if (skb->priority > cur_prio) {
4524 cur_prio = skb->priority;
4529 if (conn->sent < min) {
4535 if (hci_conn_num(hdev, type) == conn_num)
4544 switch (chan->conn->type) {
4546 cnt = hdev->acl_cnt;
4549 cnt = hdev->block_cnt;
4553 cnt = hdev->sco_cnt;
4556 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4560 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4565 BT_DBG("chan %p quote %d", chan, *quote);
4569 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4571 struct hci_conn_hash *h = &hdev->conn_hash;
4572 struct hci_conn *conn;
4575 BT_DBG("%s", hdev->name);
4579 list_for_each_entry_rcu(conn, &h->list, list) {
4580 struct hci_chan *chan;
4582 if (conn->type != type)
4585 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4590 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4591 struct sk_buff *skb;
4598 if (skb_queue_empty(&chan->data_q))
4601 skb = skb_peek(&chan->data_q);
4602 if (skb->priority >= HCI_PRIO_MAX - 1)
4605 skb->priority = HCI_PRIO_MAX - 1;
4607 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4611 if (hci_conn_num(hdev, type) == num)
4619 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4621 /* Calculate count of blocks used by this packet */
4622 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4625 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4627 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4628 /* ACL tx timeout must be longer than maximum
4629 * link supervision timeout (40.9 seconds) */
4630 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4631 HCI_ACL_TX_TIMEOUT))
4632 hci_link_tx_to(hdev, ACL_LINK);
4637 static void hci_sched_sco(struct hci_dev *hdev)
4639 struct hci_conn *conn;
4640 struct sk_buff *skb;
4643 BT_DBG("%s", hdev->name);
4645 if (!hci_conn_num(hdev, SCO_LINK))
4648 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4649 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4650 BT_DBG("skb %p len %d", skb, skb->len);
4651 hci_send_frame(hdev, skb);
4654 if (conn->sent == ~0)
4660 static void hci_sched_esco(struct hci_dev *hdev)
4662 struct hci_conn *conn;
4663 struct sk_buff *skb;
4666 BT_DBG("%s", hdev->name);
4668 if (!hci_conn_num(hdev, ESCO_LINK))
4671 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4673 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4674 BT_DBG("skb %p len %d", skb, skb->len);
4675 hci_send_frame(hdev, skb);
4678 if (conn->sent == ~0)
4684 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4686 unsigned int cnt = hdev->acl_cnt;
4687 struct hci_chan *chan;
4688 struct sk_buff *skb;
4691 __check_timeout(hdev, cnt);
4693 while (hdev->acl_cnt &&
4694 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4695 u32 priority = (skb_peek(&chan->data_q))->priority;
4696 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4697 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4698 skb->len, skb->priority);
4700 /* Stop if priority has changed */
4701 if (skb->priority < priority)
4704 skb = skb_dequeue(&chan->data_q);
4706 hci_conn_enter_active_mode(chan->conn,
4707 bt_cb(skb)->force_active);
4709 hci_send_frame(hdev, skb);
4710 hdev->acl_last_tx = jiffies;
4716 /* Send pending SCO packets right away */
4717 hci_sched_sco(hdev);
4718 hci_sched_esco(hdev);
4722 if (cnt != hdev->acl_cnt)
4723 hci_prio_recalculate(hdev, ACL_LINK);
4726 static void hci_sched_acl_blk(struct hci_dev *hdev)
4728 unsigned int cnt = hdev->block_cnt;
4729 struct hci_chan *chan;
4730 struct sk_buff *skb;
4734 __check_timeout(hdev, cnt);
4736 BT_DBG("%s", hdev->name);
4738 if (hdev->dev_type == HCI_AMP)
4743 while (hdev->block_cnt > 0 &&
4744 (chan = hci_chan_sent(hdev, type, "e))) {
4745 u32 priority = (skb_peek(&chan->data_q))->priority;
4746 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4749 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4750 skb->len, skb->priority);
4752 /* Stop if priority has changed */
4753 if (skb->priority < priority)
4756 skb = skb_dequeue(&chan->data_q);
4758 blocks = __get_blocks(hdev, skb);
4759 if (blocks > hdev->block_cnt)
4762 hci_conn_enter_active_mode(chan->conn,
4763 bt_cb(skb)->force_active);
4765 hci_send_frame(hdev, skb);
4766 hdev->acl_last_tx = jiffies;
4768 hdev->block_cnt -= blocks;
4771 chan->sent += blocks;
4772 chan->conn->sent += blocks;
4776 if (cnt != hdev->block_cnt)
4777 hci_prio_recalculate(hdev, type);
4780 static void hci_sched_acl(struct hci_dev *hdev)
4782 BT_DBG("%s", hdev->name);
4784 /* No ACL link over BR/EDR controller */
4785 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4788 /* No AMP link over AMP controller */
4789 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4792 switch (hdev->flow_ctl_mode) {
4793 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4794 hci_sched_acl_pkt(hdev);
4797 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4798 hci_sched_acl_blk(hdev);
4803 static void hci_sched_le(struct hci_dev *hdev)
4805 struct hci_chan *chan;
4806 struct sk_buff *skb;
4807 int quote, cnt, tmp;
4809 BT_DBG("%s", hdev->name);
4811 if (!hci_conn_num(hdev, LE_LINK))
4814 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4816 __check_timeout(hdev, cnt);
4819 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4820 u32 priority = (skb_peek(&chan->data_q))->priority;
4821 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4822 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4823 skb->len, skb->priority);
4825 /* Stop if priority has changed */
4826 if (skb->priority < priority)
4829 skb = skb_dequeue(&chan->data_q);
4831 hci_send_frame(hdev, skb);
4832 hdev->le_last_tx = jiffies;
4838 /* Send pending SCO packets right away */
4839 hci_sched_sco(hdev);
4840 hci_sched_esco(hdev);
4847 hdev->acl_cnt = cnt;
4850 hci_prio_recalculate(hdev, LE_LINK);
4853 static void hci_tx_work(struct work_struct *work)
4855 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4856 struct sk_buff *skb;
4858 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4859 hdev->sco_cnt, hdev->le_cnt);
4861 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4862 /* Schedule queues and send stuff to HCI driver */
4863 hci_sched_sco(hdev);
4864 hci_sched_esco(hdev);
4865 hci_sched_acl(hdev);
4869 /* Send next queued raw (unknown type) packet */
4870 while ((skb = skb_dequeue(&hdev->raw_q)))
4871 hci_send_frame(hdev, skb);
4874 /* ----- HCI RX task (incoming data processing) ----- */
4876 /* ACL data packet */
4877 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4879 struct hci_acl_hdr *hdr = (void *) skb->data;
4880 struct hci_conn *conn;
4881 __u16 handle, flags;
4883 skb_pull(skb, HCI_ACL_HDR_SIZE);
4885 handle = __le16_to_cpu(hdr->handle);
4886 flags = hci_flags(handle);
4887 handle = hci_handle(handle);
4889 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4892 hdev->stat.acl_rx++;
4895 conn = hci_conn_hash_lookup_handle(hdev, handle);
4896 hci_dev_unlock(hdev);
4899 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4901 /* Send to upper protocol */
4902 l2cap_recv_acldata(conn, skb, flags);
4905 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4912 /* SCO data packet */
4913 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4915 struct hci_sco_hdr *hdr = (void *) skb->data;
4916 struct hci_conn *conn;
4917 __u16 handle, flags;
4919 skb_pull(skb, HCI_SCO_HDR_SIZE);
4921 handle = __le16_to_cpu(hdr->handle);
4922 flags = hci_flags(handle);
4923 handle = hci_handle(handle);
4925 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4928 hdev->stat.sco_rx++;
4931 conn = hci_conn_hash_lookup_handle(hdev, handle);
4932 hci_dev_unlock(hdev);
4935 /* Send to upper protocol */
4936 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4937 sco_recv_scodata(conn, skb);
4940 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4947 static bool hci_req_is_complete(struct hci_dev *hdev)
4949 struct sk_buff *skb;
4951 skb = skb_peek(&hdev->cmd_q);
4955 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4958 static void hci_resend_last(struct hci_dev *hdev)
4960 struct hci_command_hdr *sent;
4961 struct sk_buff *skb;
4964 if (!hdev->sent_cmd)
4967 sent = (void *) hdev->sent_cmd->data;
4968 opcode = __le16_to_cpu(sent->opcode);
4969 if (opcode == HCI_OP_RESET)
4972 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4976 skb_queue_head(&hdev->cmd_q, skb);
4977 queue_work(hdev->workqueue, &hdev->cmd_work);
4980 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4981 hci_req_complete_t *req_complete,
4982 hci_req_complete_skb_t *req_complete_skb)
4984 struct sk_buff *skb;
4985 unsigned long flags;
4987 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4989 /* If the completed command doesn't match the last one that was
4990 * sent we need to do special handling of it.
4992 if (!hci_sent_cmd_data(hdev, opcode)) {
4993 /* Some CSR based controllers generate a spontaneous
4994 * reset complete event during init and any pending
4995 * command will never be completed. In such a case we
4996 * need to resend whatever was the last sent
4999 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5000 hci_resend_last(hdev);
5005 /* If we reach this point this event matches the last command sent */
5006 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5008 /* If the command succeeded and there's still more commands in
5009 * this request the request is not yet complete.
5011 if (!status && !hci_req_is_complete(hdev))
5014 /* If this was the last command in a request the complete
5015 * callback would be found in hdev->sent_cmd instead of the
5016 * command queue (hdev->cmd_q).
5018 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5019 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5023 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5024 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5028 /* Remove all pending commands belonging to this request */
5029 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5030 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5031 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5032 __skb_queue_head(&hdev->cmd_q, skb);
5036 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5037 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5039 *req_complete = bt_cb(skb)->hci.req_complete;
5042 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5045 static void hci_rx_work(struct work_struct *work)
5047 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5048 struct sk_buff *skb;
5050 BT_DBG("%s", hdev->name);
5052 while ((skb = skb_dequeue(&hdev->rx_q))) {
5053 /* Send copy to monitor */
5054 hci_send_to_monitor(hdev, skb);
5056 if (atomic_read(&hdev->promisc)) {
5057 /* Send copy to the sockets */
5058 hci_send_to_sock(hdev, skb);
5061 /* If the device has been opened in HCI_USER_CHANNEL,
5062 * the userspace has exclusive access to device.
5063 * When device is HCI_INIT, we still need to process
5064 * the data packets to the driver in order
5065 * to complete its setup().
5067 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5068 !test_bit(HCI_INIT, &hdev->flags)) {
5073 if (test_bit(HCI_INIT, &hdev->flags)) {
5074 /* Don't process data packets in this states. */
5075 switch (hci_skb_pkt_type(skb)) {
5076 case HCI_ACLDATA_PKT:
5077 case HCI_SCODATA_PKT:
5078 case HCI_ISODATA_PKT:
5085 switch (hci_skb_pkt_type(skb)) {
5087 BT_DBG("%s Event packet", hdev->name);
5088 hci_event_packet(hdev, skb);
5091 case HCI_ACLDATA_PKT:
5092 BT_DBG("%s ACL data packet", hdev->name);
5093 hci_acldata_packet(hdev, skb);
5096 case HCI_SCODATA_PKT:
5097 BT_DBG("%s SCO data packet", hdev->name);
5098 hci_scodata_packet(hdev, skb);
5108 static void hci_cmd_work(struct work_struct *work)
5110 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5111 struct sk_buff *skb;
5113 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5114 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5116 /* Send queued commands */
5117 if (atomic_read(&hdev->cmd_cnt)) {
5118 skb = skb_dequeue(&hdev->cmd_q);
5122 kfree_skb(hdev->sent_cmd);
5124 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5125 if (hdev->sent_cmd) {
5126 if (hci_req_status_pend(hdev))
5127 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5128 atomic_dec(&hdev->cmd_cnt);
5129 hci_send_frame(hdev, skb);
5130 if (test_bit(HCI_RESET, &hdev->flags))
5131 cancel_delayed_work(&hdev->cmd_timer);
5133 schedule_delayed_work(&hdev->cmd_timer,
5136 skb_queue_head(&hdev->cmd_q, skb);
5137 queue_work(hdev->workqueue, &hdev->cmd_work);