2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
60 /* ---- HCI debugfs entries ---- */
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
65 struct hci_dev *hdev = file->private_data;
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
77 struct hci_dev *hdev = file->private_data;
80 size_t buf_size = min(count, (sizeof(buf)-1));
83 if (!test_bit(HCI_UP, &hdev->flags))
86 if (copy_from_user(buf, user_buf, buf_size))
90 if (strtobool(buf, &enable))
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
96 hci_req_sync_lock(hdev);
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_sync_unlock(hdev);
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
115 static const struct file_operations dut_mode_fops = {
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
125 struct hci_dev *hdev = file->private_data;
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
137 struct hci_dev *hdev = file->private_data;
139 size_t buf_size = min(count, (sizeof(buf)-1));
143 if (copy_from_user(buf, user_buf, buf_size))
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active, then there is no need for the vendor callback.
153 * Instead just store the desired value. If needed the setting
154 * will be programmed when the controller gets powered on.
156 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157 !test_bit(HCI_RUNNING, &hdev->flags))
160 hci_req_sync_lock(hdev);
161 err = hdev->set_diag(hdev, enable);
162 hci_req_sync_unlock(hdev);
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176 static const struct file_operations vendor_diag_fops = {
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
195 BT_DBG("%s %ld", req->hdev->name, opt);
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
203 static void bredr_init(struct hci_request *req)
205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
207 /* Read Local Supported Features */
208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210 /* Read Local Version */
211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213 /* Read BD Address */
214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
217 static void amp_init1(struct hci_request *req)
219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
221 /* Read Local Version */
222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
227 /* Read Local AMP Info */
228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
230 /* Read Data Blk size */
231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
240 static int amp_init2(struct hci_request *req)
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
254 struct hci_dev *hdev = req->hdev;
256 BT_DBG("%s %ld", hdev->name, opt);
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 hci_reset_req(req, 0);
262 switch (hdev->dev_type) {
270 BT_ERR("Unknown device type %d", hdev->dev_type);
277 static void bredr_setup(struct hci_request *req)
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
285 /* Read Class of Device */
286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
288 /* Read Local Name */
289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
291 /* Read Voice Setting */
292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
294 /* Read Number of Supported IAC */
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
297 /* Read Current IAC LAP */
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
300 /* Clear Event Filters */
301 flt_type = HCI_FLT_CLEAR_ALL;
302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
304 /* Connection accept timeout ~20 secs */
305 param = cpu_to_le16(0x7d00);
306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
309 static void le_setup(struct hci_request *req)
311 struct hci_dev *hdev = req->hdev;
313 /* Read LE Buffer Size */
314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
316 /* Read LE Local Supported Features */
317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
319 /* Read LE Supported States */
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev))
324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
327 static void hci_setup_event_mask(struct hci_request *req)
329 struct hci_dev *hdev = req->hdev;
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01; /* Flow Specification Complete */
346 /* Use a different default for LE-only devices */
347 memset(events, 0, sizeof(events));
348 events[1] |= 0x20; /* Command Complete */
349 events[1] |= 0x40; /* Command Status */
350 events[1] |= 0x80; /* Hardware Error */
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10; /* Disconnection Complete */
358 events[2] |= 0x04; /* Number of Completed Packets */
359 events[3] |= 0x02; /* Data Buffer Overflow */
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08; /* Read Remote Version Information
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80; /* Encryption Change */
372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378 events[4] |= 0x02; /* Inquiry Result with RSSI */
380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04; /* Read Remote Extended Features Complete */
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08; /* Synchronous Connection Complete */
385 events[5] |= 0x10; /* Synchronous Connection Changed */
388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20; /* Sniff Subrating */
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80; /* Encryption Key Refresh Complete */
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40; /* Extended Inquiry Result */
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01; /* Enhanced Flush Complete */
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80; /* Link Supervision Timeout Changed */
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01; /* IO Capability Request */
405 events[6] |= 0x02; /* IO Capability Response */
406 events[6] |= 0x04; /* User Confirmation Request */
407 events[6] |= 0x08; /* User Passkey Request */
408 events[6] |= 0x10; /* Remote OOB Data Request */
409 events[6] |= 0x20; /* Simple Pairing Complete */
410 events[7] |= 0x04; /* User Passkey Notification */
411 events[7] |= 0x08; /* Keypress Notification */
412 events[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20; /* LE Meta-Event */
420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
425 struct hci_dev *hdev = req->hdev;
427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
430 if (lmp_bredr_capable(hdev))
433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
435 if (lmp_le_capable(hdev))
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
450 if (lmp_ssp_capable(hdev)) {
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
457 hdev->max_page = 0x01;
459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
465 struct hci_cp_write_eir cp;
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 if (lmp_inq_tx_pwr_capable(hdev))
488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
507 static void hci_setup_link_policy(struct hci_request *req)
509 struct hci_dev *hdev = req->hdev;
510 struct hci_cp_write_def_link_policy cp;
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
522 cp.policy = cpu_to_le16(link_policy);
523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
526 static void hci_set_le_support(struct hci_request *req)
528 struct hci_dev *hdev = req->hdev;
529 struct hci_cp_write_le_host_supported cp;
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev))
535 memset(&cp, 0, sizeof(cp));
537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
542 if (cp.le != lmp_host_le_capable(hdev))
543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
547 static void hci_set_event_mask_page_2(struct hci_request *req)
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
552 /* If Connectionless Slave Broadcast master role is supported
553 * enable all necessary events for it.
555 if (lmp_csb_master_capable(hdev)) {
556 events[1] |= 0x40; /* Triggered Clock Capture */
557 events[1] |= 0x80; /* Synchronization Train Complete */
558 events[2] |= 0x10; /* Slave Page Response Timeout */
559 events[2] |= 0x20; /* CSB Channel Map Change */
562 /* If Connectionless Slave Broadcast slave role is supported
563 * enable all necessary events for it.
565 if (lmp_csb_slave_capable(hdev)) {
566 events[2] |= 0x01; /* Synchronization Train Received */
567 events[2] |= 0x02; /* CSB Receive */
568 events[2] |= 0x04; /* CSB Timeout */
569 events[2] |= 0x08; /* Truncated Page Complete */
572 /* Enable Authenticated Payload Timeout Expired event if supported */
573 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
576 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
579 static int hci_init3_req(struct hci_request *req, unsigned long opt)
581 struct hci_dev *hdev = req->hdev;
584 hci_setup_event_mask(req);
586 if (hdev->commands[6] & 0x20 &&
587 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
588 struct hci_cp_read_stored_link_key cp;
590 bacpy(&cp.bdaddr, BDADDR_ANY);
592 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
595 if (hdev->commands[5] & 0x10)
596 hci_setup_link_policy(req);
598 if (hdev->commands[8] & 0x01)
599 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
601 /* Some older Broadcom based Bluetooth 1.2 controllers do not
602 * support the Read Page Scan Type command. Check support for
603 * this command in the bit mask of supported commands.
605 if (hdev->commands[13] & 0x01)
606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
608 if (lmp_le_capable(hdev)) {
611 memset(events, 0, sizeof(events));
613 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
614 events[0] |= 0x10; /* LE Long Term Key Request */
616 /* If controller supports the Connection Parameters Request
617 * Link Layer Procedure, enable the corresponding event.
619 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
620 events[0] |= 0x20; /* LE Remote Connection
624 /* If the controller supports the Data Length Extension
625 * feature, enable the corresponding event.
627 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
628 events[0] |= 0x40; /* LE Data Length Change */
630 /* If the controller supports Extended Scanner Filter
631 * Policies, enable the correspondig event.
633 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
634 events[1] |= 0x04; /* LE Direct Advertising
638 /* If the controller supports the LE Set Scan Enable command,
639 * enable the corresponding advertising report event.
641 if (hdev->commands[26] & 0x08)
642 events[0] |= 0x02; /* LE Advertising Report */
644 /* If the controller supports the LE Create Connection
645 * command, enable the corresponding event.
647 if (hdev->commands[26] & 0x10)
648 events[0] |= 0x01; /* LE Connection Complete */
650 /* If the controller supports the LE Connection Update
651 * command, enable the corresponding event.
653 if (hdev->commands[27] & 0x04)
654 events[0] |= 0x04; /* LE Connection Update
658 /* If the controller supports the LE Read Remote Used Features
659 * command, enable the corresponding event.
661 if (hdev->commands[27] & 0x20)
662 events[0] |= 0x08; /* LE Read Remote Used
666 /* If the controller supports the LE Read Local P-256
667 * Public Key command, enable the corresponding event.
669 if (hdev->commands[34] & 0x02)
670 events[0] |= 0x80; /* LE Read Local P-256
671 * Public Key Complete
674 /* If the controller supports the LE Generate DHKey
675 * command, enable the corresponding event.
677 if (hdev->commands[34] & 0x04)
678 events[1] |= 0x01; /* LE Generate DHKey Complete */
680 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
683 if (hdev->commands[25] & 0x40) {
684 /* Read LE Advertising Channel TX Power */
685 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
688 if (hdev->commands[26] & 0x40) {
689 /* Read LE White List Size */
690 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
694 if (hdev->commands[26] & 0x80) {
695 /* Clear LE White List */
696 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
699 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
700 /* Read LE Maximum Data Length */
701 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
703 /* Read LE Suggested Default Data Length */
704 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
707 hci_set_le_support(req);
710 /* Read features beyond page 1 if available */
711 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
712 struct hci_cp_read_local_ext_features cp;
715 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
722 static int hci_init4_req(struct hci_request *req, unsigned long opt)
724 struct hci_dev *hdev = req->hdev;
726 /* Some Broadcom based Bluetooth controllers do not support the
727 * Delete Stored Link Key command. They are clearly indicating its
728 * absence in the bit mask of supported commands.
730 * Check the supported commands and only if the the command is marked
731 * as supported send it. If not supported assume that the controller
732 * does not have actual support for stored link keys which makes this
733 * command redundant anyway.
735 * Some controllers indicate that they support handling deleting
736 * stored link keys, but they don't. The quirk lets a driver
737 * just disable this command.
739 if (hdev->commands[6] & 0x80 &&
740 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
741 struct hci_cp_delete_stored_link_key cp;
743 bacpy(&cp.bdaddr, BDADDR_ANY);
744 cp.delete_all = 0x01;
745 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
749 /* Set event mask page 2 if the HCI command for it is supported */
750 if (hdev->commands[22] & 0x04)
751 hci_set_event_mask_page_2(req);
753 /* Read local codec list if the HCI command is supported */
754 if (hdev->commands[29] & 0x20)
755 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
757 /* Get MWS transport configuration if the HCI command is supported */
758 if (hdev->commands[30] & 0x08)
759 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
761 /* Check for Synchronization Train support */
762 if (lmp_sync_train_capable(hdev))
763 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
765 /* Enable Secure Connections if supported and configured */
766 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
767 bredr_sc_enabled(hdev)) {
770 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
771 sizeof(support), &support);
774 /* Set Suggested Default Data Length to maximum if supported */
775 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776 struct hci_cp_le_write_def_data_len cp;
778 cp.tx_len = hdev->le_max_tx_len;
779 cp.tx_time = hdev->le_max_tx_time;
780 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
786 static int __hci_init(struct hci_dev *hdev)
790 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
794 if (hci_dev_test_flag(hdev, HCI_SETUP))
795 hci_debugfs_create_basic(hdev);
797 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
801 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
802 * BR/EDR/LE type controllers. AMP controllers only need the
803 * first two stages of init.
805 if (hdev->dev_type != HCI_PRIMARY)
808 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
812 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
816 /* This function is only called when the controller is actually in
817 * configured state. When the controller is marked as unconfigured,
818 * this initialization procedure is not run.
820 * It means that it is possible that a controller runs through its
821 * setup phase and then discovers missing settings. If that is the
822 * case, then this function will not be called. It then will only
823 * be called during the config phase.
825 * So only when in setup phase or config phase, create the debugfs
826 * entries and register the SMP channels.
828 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
829 !hci_dev_test_flag(hdev, HCI_CONFIG))
832 hci_debugfs_create_common(hdev);
834 if (lmp_bredr_capable(hdev))
835 hci_debugfs_create_bredr(hdev);
837 if (lmp_le_capable(hdev))
838 hci_debugfs_create_le(hdev);
843 static int hci_init0_req(struct hci_request *req, unsigned long opt)
845 struct hci_dev *hdev = req->hdev;
847 BT_DBG("%s %ld", hdev->name, opt);
850 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
851 hci_reset_req(req, 0);
853 /* Read Local Version */
854 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
856 /* Read BD Address */
857 if (hdev->set_bdaddr)
858 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
863 static int __hci_unconf_init(struct hci_dev *hdev)
867 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
870 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
874 if (hci_dev_test_flag(hdev, HCI_SETUP))
875 hci_debugfs_create_basic(hdev);
880 static int hci_scan_req(struct hci_request *req, unsigned long opt)
884 BT_DBG("%s %x", req->hdev->name, scan);
886 /* Inquiry and Page scans */
887 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
891 static int hci_auth_req(struct hci_request *req, unsigned long opt)
895 BT_DBG("%s %x", req->hdev->name, auth);
898 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
902 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
906 BT_DBG("%s %x", req->hdev->name, encrypt);
909 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
913 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
915 __le16 policy = cpu_to_le16(opt);
917 BT_DBG("%s %x", req->hdev->name, policy);
919 /* Default link policy */
920 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
924 /* Get HCI device by index.
925 * Device is held on return. */
926 struct hci_dev *hci_dev_get(int index)
928 struct hci_dev *hdev = NULL, *d;
935 read_lock(&hci_dev_list_lock);
936 list_for_each_entry(d, &hci_dev_list, list) {
937 if (d->id == index) {
938 hdev = hci_dev_hold(d);
942 read_unlock(&hci_dev_list_lock);
946 /* ---- Inquiry support ---- */
948 bool hci_discovery_active(struct hci_dev *hdev)
950 struct discovery_state *discov = &hdev->discovery;
952 switch (discov->state) {
953 case DISCOVERY_FINDING:
954 case DISCOVERY_RESOLVING:
962 void hci_discovery_set_state(struct hci_dev *hdev, int state)
964 int old_state = hdev->discovery.state;
966 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
968 if (old_state == state)
971 hdev->discovery.state = state;
974 case DISCOVERY_STOPPED:
975 hci_update_background_scan(hdev);
977 if (old_state != DISCOVERY_STARTING)
978 mgmt_discovering(hdev, 0);
980 case DISCOVERY_STARTING:
982 case DISCOVERY_FINDING:
983 mgmt_discovering(hdev, 1);
985 case DISCOVERY_RESOLVING:
987 case DISCOVERY_STOPPING:
992 void hci_inquiry_cache_flush(struct hci_dev *hdev)
994 struct discovery_state *cache = &hdev->discovery;
995 struct inquiry_entry *p, *n;
997 list_for_each_entry_safe(p, n, &cache->all, all) {
1002 INIT_LIST_HEAD(&cache->unknown);
1003 INIT_LIST_HEAD(&cache->resolve);
1006 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1009 struct discovery_state *cache = &hdev->discovery;
1010 struct inquiry_entry *e;
1012 BT_DBG("cache %p, %pMR", cache, bdaddr);
1014 list_for_each_entry(e, &cache->all, all) {
1015 if (!bacmp(&e->data.bdaddr, bdaddr))
1022 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1025 struct discovery_state *cache = &hdev->discovery;
1026 struct inquiry_entry *e;
1028 BT_DBG("cache %p, %pMR", cache, bdaddr);
1030 list_for_each_entry(e, &cache->unknown, list) {
1031 if (!bacmp(&e->data.bdaddr, bdaddr))
1038 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1042 struct discovery_state *cache = &hdev->discovery;
1043 struct inquiry_entry *e;
1045 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1047 list_for_each_entry(e, &cache->resolve, list) {
1048 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1050 if (!bacmp(&e->data.bdaddr, bdaddr))
1057 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1058 struct inquiry_entry *ie)
1060 struct discovery_state *cache = &hdev->discovery;
1061 struct list_head *pos = &cache->resolve;
1062 struct inquiry_entry *p;
1064 list_del(&ie->list);
1066 list_for_each_entry(p, &cache->resolve, list) {
1067 if (p->name_state != NAME_PENDING &&
1068 abs(p->data.rssi) >= abs(ie->data.rssi))
1073 list_add(&ie->list, pos);
1076 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1079 struct discovery_state *cache = &hdev->discovery;
1080 struct inquiry_entry *ie;
1083 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1085 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1087 if (!data->ssp_mode)
1088 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1090 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1092 if (!ie->data.ssp_mode)
1093 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1095 if (ie->name_state == NAME_NEEDED &&
1096 data->rssi != ie->data.rssi) {
1097 ie->data.rssi = data->rssi;
1098 hci_inquiry_cache_update_resolve(hdev, ie);
1104 /* Entry not in the cache. Add new one. */
1105 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1107 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1111 list_add(&ie->all, &cache->all);
1114 ie->name_state = NAME_KNOWN;
1116 ie->name_state = NAME_NOT_KNOWN;
1117 list_add(&ie->list, &cache->unknown);
1121 if (name_known && ie->name_state != NAME_KNOWN &&
1122 ie->name_state != NAME_PENDING) {
1123 ie->name_state = NAME_KNOWN;
1124 list_del(&ie->list);
1127 memcpy(&ie->data, data, sizeof(*data));
1128 ie->timestamp = jiffies;
1129 cache->timestamp = jiffies;
1131 if (ie->name_state == NAME_NOT_KNOWN)
1132 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1138 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1140 struct discovery_state *cache = &hdev->discovery;
1141 struct inquiry_info *info = (struct inquiry_info *) buf;
1142 struct inquiry_entry *e;
1145 list_for_each_entry(e, &cache->all, all) {
1146 struct inquiry_data *data = &e->data;
1151 bacpy(&info->bdaddr, &data->bdaddr);
1152 info->pscan_rep_mode = data->pscan_rep_mode;
1153 info->pscan_period_mode = data->pscan_period_mode;
1154 info->pscan_mode = data->pscan_mode;
1155 memcpy(info->dev_class, data->dev_class, 3);
1156 info->clock_offset = data->clock_offset;
1162 BT_DBG("cache %p, copied %d", cache, copied);
1166 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1168 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1169 struct hci_dev *hdev = req->hdev;
1170 struct hci_cp_inquiry cp;
1172 BT_DBG("%s", hdev->name);
1174 if (test_bit(HCI_INQUIRY, &hdev->flags))
1178 memcpy(&cp.lap, &ir->lap, 3);
1179 cp.length = ir->length;
1180 cp.num_rsp = ir->num_rsp;
1181 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1186 int hci_inquiry(void __user *arg)
1188 __u8 __user *ptr = arg;
1189 struct hci_inquiry_req ir;
1190 struct hci_dev *hdev;
1191 int err = 0, do_inquiry = 0, max_rsp;
1195 if (copy_from_user(&ir, ptr, sizeof(ir)))
1198 hdev = hci_dev_get(ir.dev_id);
1202 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1207 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1212 if (hdev->dev_type != HCI_PRIMARY) {
1217 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1223 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1224 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1225 hci_inquiry_cache_flush(hdev);
1228 hci_dev_unlock(hdev);
1230 timeo = ir.length * msecs_to_jiffies(2000);
1233 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1238 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1239 * cleared). If it is interrupted by a signal, return -EINTR.
1241 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1242 TASK_INTERRUPTIBLE))
1246 /* for unlimited number of responses we will use buffer with
1249 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1251 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1252 * copy it to the user space.
1254 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1261 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1262 hci_dev_unlock(hdev);
1264 BT_DBG("num_rsp %d", ir.num_rsp);
1266 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1268 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1281 static int hci_dev_do_open(struct hci_dev *hdev)
1285 BT_DBG("%s %p", hdev->name, hdev);
1287 hci_req_sync_lock(hdev);
1289 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1294 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1295 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1296 /* Check for rfkill but allow the HCI setup stage to
1297 * proceed (which in itself doesn't cause any RF activity).
1299 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1304 /* Check for valid public address or a configured static
1305 * random adddress, but let the HCI setup proceed to
1306 * be able to determine if there is a public address
1309 * In case of user channel usage, it is not important
1310 * if a public address or static random address is
1313 * This check is only valid for BR/EDR controllers
1314 * since AMP controllers do not have an address.
1316 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1317 hdev->dev_type == HCI_PRIMARY &&
1318 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1319 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1320 ret = -EADDRNOTAVAIL;
1325 if (test_bit(HCI_UP, &hdev->flags)) {
1330 if (hdev->open(hdev)) {
1335 set_bit(HCI_RUNNING, &hdev->flags);
1336 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1338 atomic_set(&hdev->cmd_cnt, 1);
1339 set_bit(HCI_INIT, &hdev->flags);
1341 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1342 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1345 ret = hdev->setup(hdev);
1347 /* The transport driver can set these quirks before
1348 * creating the HCI device or in its setup callback.
1350 * In case any of them is set, the controller has to
1351 * start up as unconfigured.
1353 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1354 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1355 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1357 /* For an unconfigured controller it is required to
1358 * read at least the version information provided by
1359 * the Read Local Version Information command.
1361 * If the set_bdaddr driver callback is provided, then
1362 * also the original Bluetooth public device address
1363 * will be read using the Read BD Address command.
1365 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1366 ret = __hci_unconf_init(hdev);
1369 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1370 /* If public address change is configured, ensure that
1371 * the address gets programmed. If the driver does not
1372 * support changing the public address, fail the power
1375 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1377 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1379 ret = -EADDRNOTAVAIL;
1383 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1384 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1385 ret = __hci_init(hdev);
1386 if (!ret && hdev->post_init)
1387 ret = hdev->post_init(hdev);
1391 /* If the HCI Reset command is clearing all diagnostic settings,
1392 * then they need to be reprogrammed after the init procedure
1395 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1396 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1397 ret = hdev->set_diag(hdev, true);
1399 clear_bit(HCI_INIT, &hdev->flags);
1403 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1404 set_bit(HCI_UP, &hdev->flags);
1405 hci_sock_dev_event(hdev, HCI_DEV_UP);
1406 hci_leds_update_powered(hdev, true);
1407 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1408 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1409 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1410 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1411 hci_dev_test_flag(hdev, HCI_MGMT) &&
1412 hdev->dev_type == HCI_PRIMARY) {
1413 ret = __hci_req_hci_power_on(hdev);
1414 mgmt_power_on(hdev, ret);
1417 /* Init failed, cleanup */
1418 flush_work(&hdev->tx_work);
1419 flush_work(&hdev->cmd_work);
1420 flush_work(&hdev->rx_work);
1422 skb_queue_purge(&hdev->cmd_q);
1423 skb_queue_purge(&hdev->rx_q);
1428 if (hdev->sent_cmd) {
1429 kfree_skb(hdev->sent_cmd);
1430 hdev->sent_cmd = NULL;
1433 clear_bit(HCI_RUNNING, &hdev->flags);
1434 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1437 hdev->flags &= BIT(HCI_RAW);
1441 hci_req_sync_unlock(hdev);
1445 /* ---- HCI ioctl helpers ---- */
1447 int hci_dev_open(__u16 dev)
1449 struct hci_dev *hdev;
1452 hdev = hci_dev_get(dev);
1456 /* Devices that are marked as unconfigured can only be powered
1457 * up as user channel. Trying to bring them up as normal devices
1458 * will result into a failure. Only user channel operation is
1461 * When this function is called for a user channel, the flag
1462 * HCI_USER_CHANNEL will be set first before attempting to
1465 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1466 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1471 /* We need to ensure that no other power on/off work is pending
1472 * before proceeding to call hci_dev_do_open. This is
1473 * particularly important if the setup procedure has not yet
1476 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1477 cancel_delayed_work(&hdev->power_off);
1479 /* After this call it is guaranteed that the setup procedure
1480 * has finished. This means that error conditions like RFKILL
1481 * or no valid public or static random address apply.
1483 flush_workqueue(hdev->req_workqueue);
1485 /* For controllers not using the management interface and that
1486 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1487 * so that pairing works for them. Once the management interface
1488 * is in use this bit will be cleared again and userspace has
1489 * to explicitly enable it.
1491 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1492 !hci_dev_test_flag(hdev, HCI_MGMT))
1493 hci_dev_set_flag(hdev, HCI_BONDABLE);
1495 err = hci_dev_do_open(hdev);
1502 /* This function requires the caller holds hdev->lock */
1503 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1505 struct hci_conn_params *p;
1507 list_for_each_entry(p, &hdev->le_conn_params, list) {
1509 hci_conn_drop(p->conn);
1510 hci_conn_put(p->conn);
1513 list_del_init(&p->action);
1516 BT_DBG("All LE pending actions cleared");
1519 int hci_dev_do_close(struct hci_dev *hdev)
1523 BT_DBG("%s %p", hdev->name, hdev);
1525 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1526 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1527 test_bit(HCI_UP, &hdev->flags)) {
1528 /* Execute vendor specific shutdown routine */
1530 hdev->shutdown(hdev);
1533 cancel_delayed_work(&hdev->power_off);
1535 hci_request_cancel_all(hdev);
1536 hci_req_sync_lock(hdev);
1538 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1539 cancel_delayed_work_sync(&hdev->cmd_timer);
1540 hci_req_sync_unlock(hdev);
1544 hci_leds_update_powered(hdev, false);
1546 /* Flush RX and TX works */
1547 flush_work(&hdev->tx_work);
1548 flush_work(&hdev->rx_work);
1550 if (hdev->discov_timeout > 0) {
1551 hdev->discov_timeout = 0;
1552 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1553 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1556 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1557 cancel_delayed_work(&hdev->service_cache);
1559 if (hci_dev_test_flag(hdev, HCI_MGMT))
1560 cancel_delayed_work_sync(&hdev->rpa_expired);
1562 /* Avoid potential lockdep warnings from the *_flush() calls by
1563 * ensuring the workqueue is empty up front.
1565 drain_workqueue(hdev->workqueue);
1569 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1571 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1573 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1574 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1575 hci_dev_test_flag(hdev, HCI_MGMT))
1576 __mgmt_power_off(hdev);
1578 hci_inquiry_cache_flush(hdev);
1579 hci_pend_le_actions_clear(hdev);
1580 hci_conn_hash_flush(hdev);
1581 hci_dev_unlock(hdev);
1583 smp_unregister(hdev);
1585 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1591 skb_queue_purge(&hdev->cmd_q);
1592 atomic_set(&hdev->cmd_cnt, 1);
1593 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1594 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1595 set_bit(HCI_INIT, &hdev->flags);
1596 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1597 clear_bit(HCI_INIT, &hdev->flags);
1600 /* flush cmd work */
1601 flush_work(&hdev->cmd_work);
1604 skb_queue_purge(&hdev->rx_q);
1605 skb_queue_purge(&hdev->cmd_q);
1606 skb_queue_purge(&hdev->raw_q);
1608 /* Drop last sent command */
1609 if (hdev->sent_cmd) {
1610 cancel_delayed_work_sync(&hdev->cmd_timer);
1611 kfree_skb(hdev->sent_cmd);
1612 hdev->sent_cmd = NULL;
1615 clear_bit(HCI_RUNNING, &hdev->flags);
1616 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1618 /* After this point our queues are empty
1619 * and no tasks are scheduled. */
1623 hdev->flags &= BIT(HCI_RAW);
1624 hci_dev_clear_volatile_flags(hdev);
1626 /* Controller radio is available but is currently powered down */
1627 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1629 memset(hdev->eir, 0, sizeof(hdev->eir));
1630 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1631 bacpy(&hdev->random_addr, BDADDR_ANY);
1633 hci_req_sync_unlock(hdev);
1639 int hci_dev_close(__u16 dev)
1641 struct hci_dev *hdev;
1644 hdev = hci_dev_get(dev);
1648 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1653 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1654 cancel_delayed_work(&hdev->power_off);
1656 err = hci_dev_do_close(hdev);
1663 static int hci_dev_do_reset(struct hci_dev *hdev)
1667 BT_DBG("%s %p", hdev->name, hdev);
1669 hci_req_sync_lock(hdev);
1672 skb_queue_purge(&hdev->rx_q);
1673 skb_queue_purge(&hdev->cmd_q);
1675 /* Avoid potential lockdep warnings from the *_flush() calls by
1676 * ensuring the workqueue is empty up front.
1678 drain_workqueue(hdev->workqueue);
1681 hci_inquiry_cache_flush(hdev);
1682 hci_conn_hash_flush(hdev);
1683 hci_dev_unlock(hdev);
1688 atomic_set(&hdev->cmd_cnt, 1);
1689 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1691 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1693 hci_req_sync_unlock(hdev);
1697 int hci_dev_reset(__u16 dev)
1699 struct hci_dev *hdev;
1702 hdev = hci_dev_get(dev);
1706 if (!test_bit(HCI_UP, &hdev->flags)) {
1711 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1716 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1721 err = hci_dev_do_reset(hdev);
1728 int hci_dev_reset_stat(__u16 dev)
1730 struct hci_dev *hdev;
1733 hdev = hci_dev_get(dev);
1737 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1742 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1747 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1754 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1756 bool conn_changed, discov_changed;
1758 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1760 if ((scan & SCAN_PAGE))
1761 conn_changed = !hci_dev_test_and_set_flag(hdev,
1764 conn_changed = hci_dev_test_and_clear_flag(hdev,
1767 if ((scan & SCAN_INQUIRY)) {
1768 discov_changed = !hci_dev_test_and_set_flag(hdev,
1771 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1772 discov_changed = hci_dev_test_and_clear_flag(hdev,
1776 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1779 if (conn_changed || discov_changed) {
1780 /* In case this was disabled through mgmt */
1781 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1783 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1784 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1786 mgmt_new_settings(hdev);
1790 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1792 struct hci_dev *hdev;
1793 struct hci_dev_req dr;
1796 if (copy_from_user(&dr, arg, sizeof(dr)))
1799 hdev = hci_dev_get(dr.dev_id);
1803 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1808 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1813 if (hdev->dev_type != HCI_PRIMARY) {
1818 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1825 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1826 HCI_INIT_TIMEOUT, NULL);
1830 if (!lmp_encrypt_capable(hdev)) {
1835 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1836 /* Auth must be enabled first */
1837 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1838 HCI_INIT_TIMEOUT, NULL);
1843 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1844 HCI_INIT_TIMEOUT, NULL);
1848 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1849 HCI_INIT_TIMEOUT, NULL);
1851 /* Ensure that the connectable and discoverable states
1852 * get correctly modified as this was a non-mgmt change.
1855 hci_update_scan_state(hdev, dr.dev_opt);
1859 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1860 HCI_INIT_TIMEOUT, NULL);
1863 case HCISETLINKMODE:
1864 hdev->link_mode = ((__u16) dr.dev_opt) &
1865 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1869 hdev->pkt_type = (__u16) dr.dev_opt;
1873 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1874 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1878 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1879 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1892 int hci_get_dev_list(void __user *arg)
1894 struct hci_dev *hdev;
1895 struct hci_dev_list_req *dl;
1896 struct hci_dev_req *dr;
1897 int n = 0, size, err;
1900 if (get_user(dev_num, (__u16 __user *) arg))
1903 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1906 size = sizeof(*dl) + dev_num * sizeof(*dr);
1908 dl = kzalloc(size, GFP_KERNEL);
1914 read_lock(&hci_dev_list_lock);
1915 list_for_each_entry(hdev, &hci_dev_list, list) {
1916 unsigned long flags = hdev->flags;
1918 /* When the auto-off is configured it means the transport
1919 * is running, but in that case still indicate that the
1920 * device is actually down.
1922 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1923 flags &= ~BIT(HCI_UP);
1925 (dr + n)->dev_id = hdev->id;
1926 (dr + n)->dev_opt = flags;
1931 read_unlock(&hci_dev_list_lock);
1934 size = sizeof(*dl) + n * sizeof(*dr);
1936 err = copy_to_user(arg, dl, size);
1939 return err ? -EFAULT : 0;
1942 int hci_get_dev_info(void __user *arg)
1944 struct hci_dev *hdev;
1945 struct hci_dev_info di;
1946 unsigned long flags;
1949 if (copy_from_user(&di, arg, sizeof(di)))
1952 hdev = hci_dev_get(di.dev_id);
1956 /* When the auto-off is configured it means the transport
1957 * is running, but in that case still indicate that the
1958 * device is actually down.
1960 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1961 flags = hdev->flags & ~BIT(HCI_UP);
1963 flags = hdev->flags;
1965 strcpy(di.name, hdev->name);
1966 di.bdaddr = hdev->bdaddr;
1967 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1969 di.pkt_type = hdev->pkt_type;
1970 if (lmp_bredr_capable(hdev)) {
1971 di.acl_mtu = hdev->acl_mtu;
1972 di.acl_pkts = hdev->acl_pkts;
1973 di.sco_mtu = hdev->sco_mtu;
1974 di.sco_pkts = hdev->sco_pkts;
1976 di.acl_mtu = hdev->le_mtu;
1977 di.acl_pkts = hdev->le_pkts;
1981 di.link_policy = hdev->link_policy;
1982 di.link_mode = hdev->link_mode;
1984 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1985 memcpy(&di.features, &hdev->features, sizeof(di.features));
1987 if (copy_to_user(arg, &di, sizeof(di)))
1995 /* ---- Interface to HCI drivers ---- */
1997 static int hci_rfkill_set_block(void *data, bool blocked)
1999 struct hci_dev *hdev = data;
2001 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2003 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2007 hci_dev_set_flag(hdev, HCI_RFKILLED);
2008 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2009 !hci_dev_test_flag(hdev, HCI_CONFIG))
2010 hci_dev_do_close(hdev);
2012 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2018 static const struct rfkill_ops hci_rfkill_ops = {
2019 .set_block = hci_rfkill_set_block,
2022 static void hci_power_on(struct work_struct *work)
2024 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2027 BT_DBG("%s", hdev->name);
2029 if (test_bit(HCI_UP, &hdev->flags) &&
2030 hci_dev_test_flag(hdev, HCI_MGMT) &&
2031 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2032 cancel_delayed_work(&hdev->power_off);
2033 hci_req_sync_lock(hdev);
2034 err = __hci_req_hci_power_on(hdev);
2035 hci_req_sync_unlock(hdev);
2036 mgmt_power_on(hdev, err);
2040 err = hci_dev_do_open(hdev);
2043 mgmt_set_powered_failed(hdev, err);
2044 hci_dev_unlock(hdev);
2048 /* During the HCI setup phase, a few error conditions are
2049 * ignored and they need to be checked now. If they are still
2050 * valid, it is important to turn the device back off.
2052 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2053 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2054 (hdev->dev_type == HCI_PRIMARY &&
2055 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2056 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2057 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2058 hci_dev_do_close(hdev);
2059 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2060 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2061 HCI_AUTO_OFF_TIMEOUT);
2064 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2065 /* For unconfigured devices, set the HCI_RAW flag
2066 * so that userspace can easily identify them.
2068 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2069 set_bit(HCI_RAW, &hdev->flags);
2071 /* For fully configured devices, this will send
2072 * the Index Added event. For unconfigured devices,
2073 * it will send Unconfigued Index Added event.
2075 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2076 * and no event will be send.
2078 mgmt_index_added(hdev);
2079 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2080 /* When the controller is now configured, then it
2081 * is important to clear the HCI_RAW flag.
2083 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2084 clear_bit(HCI_RAW, &hdev->flags);
2086 /* Powering on the controller with HCI_CONFIG set only
2087 * happens with the transition from unconfigured to
2088 * configured. This will send the Index Added event.
2090 mgmt_index_added(hdev);
2094 static void hci_power_off(struct work_struct *work)
2096 struct hci_dev *hdev = container_of(work, struct hci_dev,
2099 BT_DBG("%s", hdev->name);
2101 hci_dev_do_close(hdev);
2104 static void hci_error_reset(struct work_struct *work)
2106 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2108 BT_DBG("%s", hdev->name);
2111 hdev->hw_error(hdev, hdev->hw_error_code);
2113 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2114 hdev->hw_error_code);
2116 if (hci_dev_do_close(hdev))
2119 hci_dev_do_open(hdev);
2122 void hci_uuids_clear(struct hci_dev *hdev)
2124 struct bt_uuid *uuid, *tmp;
2126 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2127 list_del(&uuid->list);
2132 void hci_link_keys_clear(struct hci_dev *hdev)
2134 struct link_key *key;
2136 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2137 list_del_rcu(&key->list);
2138 kfree_rcu(key, rcu);
2142 void hci_smp_ltks_clear(struct hci_dev *hdev)
2146 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2147 list_del_rcu(&k->list);
2152 void hci_smp_irks_clear(struct hci_dev *hdev)
2156 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2157 list_del_rcu(&k->list);
2162 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2167 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2168 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2178 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2179 u8 key_type, u8 old_key_type)
2182 if (key_type < 0x03)
2185 /* Debug keys are insecure so don't store them persistently */
2186 if (key_type == HCI_LK_DEBUG_COMBINATION)
2189 /* Changed combination key and there's no previous one */
2190 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2193 /* Security mode 3 case */
2197 /* BR/EDR key derived using SC from an LE link */
2198 if (conn->type == LE_LINK)
2201 /* Neither local nor remote side had no-bonding as requirement */
2202 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2205 /* Local side had dedicated bonding as requirement */
2206 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2209 /* Remote side had dedicated bonding as requirement */
2210 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2213 /* If none of the above criteria match, then don't store the key
2218 static u8 ltk_role(u8 type)
2220 if (type == SMP_LTK)
2221 return HCI_ROLE_MASTER;
2223 return HCI_ROLE_SLAVE;
2226 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2227 u8 addr_type, u8 role)
2232 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2233 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2236 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2246 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2248 struct smp_irk *irk;
2251 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2252 if (!bacmp(&irk->rpa, rpa)) {
2258 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2259 if (smp_irk_matches(hdev, irk->val, rpa)) {
2260 bacpy(&irk->rpa, rpa);
2270 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273 struct smp_irk *irk;
2275 /* Identity Address must be public or static random */
2276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2281 if (addr_type == irk->addr_type &&
2282 bacmp(bdaddr, &irk->bdaddr) == 0) {
2292 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2293 bdaddr_t *bdaddr, u8 *val, u8 type,
2294 u8 pin_len, bool *persistent)
2296 struct link_key *key, *old_key;
2299 old_key = hci_find_link_key(hdev, bdaddr);
2301 old_key_type = old_key->type;
2304 old_key_type = conn ? conn->key_type : 0xff;
2305 key = kzalloc(sizeof(*key), GFP_KERNEL);
2308 list_add_rcu(&key->list, &hdev->link_keys);
2311 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2313 /* Some buggy controller combinations generate a changed
2314 * combination key for legacy pairing even when there's no
2316 if (type == HCI_LK_CHANGED_COMBINATION &&
2317 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2318 type = HCI_LK_COMBINATION;
2320 conn->key_type = type;
2323 bacpy(&key->bdaddr, bdaddr);
2324 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2325 key->pin_len = pin_len;
2327 if (type == HCI_LK_CHANGED_COMBINATION)
2328 key->type = old_key_type;
2333 *persistent = hci_persistent_key(hdev, conn, type,
2339 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2340 u8 addr_type, u8 type, u8 authenticated,
2341 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2343 struct smp_ltk *key, *old_key;
2344 u8 role = ltk_role(type);
2346 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2350 key = kzalloc(sizeof(*key), GFP_KERNEL);
2353 list_add_rcu(&key->list, &hdev->long_term_keys);
2356 bacpy(&key->bdaddr, bdaddr);
2357 key->bdaddr_type = addr_type;
2358 memcpy(key->val, tk, sizeof(key->val));
2359 key->authenticated = authenticated;
2362 key->enc_size = enc_size;
2368 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2369 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2371 struct smp_irk *irk;
2373 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2375 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2379 bacpy(&irk->bdaddr, bdaddr);
2380 irk->addr_type = addr_type;
2382 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2385 memcpy(irk->val, val, 16);
2386 bacpy(&irk->rpa, rpa);
2391 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2393 struct link_key *key;
2395 key = hci_find_link_key(hdev, bdaddr);
2399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2401 list_del_rcu(&key->list);
2402 kfree_rcu(key, rcu);
2407 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2412 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2413 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2416 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2418 list_del_rcu(&k->list);
2423 return removed ? 0 : -ENOENT;
2426 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2430 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2431 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2434 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2436 list_del_rcu(&k->list);
2441 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2444 struct smp_irk *irk;
2447 if (type == BDADDR_BREDR) {
2448 if (hci_find_link_key(hdev, bdaddr))
2453 /* Convert to HCI addr type which struct smp_ltk uses */
2454 if (type == BDADDR_LE_PUBLIC)
2455 addr_type = ADDR_LE_DEV_PUBLIC;
2457 addr_type = ADDR_LE_DEV_RANDOM;
2459 irk = hci_get_irk(hdev, bdaddr, addr_type);
2461 bdaddr = &irk->bdaddr;
2462 addr_type = irk->addr_type;
2466 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2467 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2477 /* HCI command timer function */
2478 static void hci_cmd_timeout(struct work_struct *work)
2480 struct hci_dev *hdev = container_of(work, struct hci_dev,
2483 if (hdev->sent_cmd) {
2484 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2485 u16 opcode = __le16_to_cpu(sent->opcode);
2487 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2489 BT_ERR("%s command tx timeout", hdev->name);
2492 atomic_set(&hdev->cmd_cnt, 1);
2493 queue_work(hdev->workqueue, &hdev->cmd_work);
2496 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2497 bdaddr_t *bdaddr, u8 bdaddr_type)
2499 struct oob_data *data;
2501 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2502 if (bacmp(bdaddr, &data->bdaddr) != 0)
2504 if (data->bdaddr_type != bdaddr_type)
2512 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2515 struct oob_data *data;
2517 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2521 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2523 list_del(&data->list);
2529 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2531 struct oob_data *data, *n;
2533 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2534 list_del(&data->list);
2539 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2540 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2541 u8 *hash256, u8 *rand256)
2543 struct oob_data *data;
2545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2547 data = kmalloc(sizeof(*data), GFP_KERNEL);
2551 bacpy(&data->bdaddr, bdaddr);
2552 data->bdaddr_type = bdaddr_type;
2553 list_add(&data->list, &hdev->remote_oob_data);
2556 if (hash192 && rand192) {
2557 memcpy(data->hash192, hash192, sizeof(data->hash192));
2558 memcpy(data->rand192, rand192, sizeof(data->rand192));
2559 if (hash256 && rand256)
2560 data->present = 0x03;
2562 memset(data->hash192, 0, sizeof(data->hash192));
2563 memset(data->rand192, 0, sizeof(data->rand192));
2564 if (hash256 && rand256)
2565 data->present = 0x02;
2567 data->present = 0x00;
2570 if (hash256 && rand256) {
2571 memcpy(data->hash256, hash256, sizeof(data->hash256));
2572 memcpy(data->rand256, rand256, sizeof(data->rand256));
2574 memset(data->hash256, 0, sizeof(data->hash256));
2575 memset(data->rand256, 0, sizeof(data->rand256));
2576 if (hash192 && rand192)
2577 data->present = 0x01;
2580 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2585 /* This function requires the caller holds hdev->lock */
2586 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2588 struct adv_info *adv_instance;
2590 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2591 if (adv_instance->instance == instance)
2592 return adv_instance;
2598 /* This function requires the caller holds hdev->lock */
2599 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2601 struct adv_info *cur_instance;
2603 cur_instance = hci_find_adv_instance(hdev, instance);
2607 if (cur_instance == list_last_entry(&hdev->adv_instances,
2608 struct adv_info, list))
2609 return list_first_entry(&hdev->adv_instances,
2610 struct adv_info, list);
2612 return list_next_entry(cur_instance, list);
2615 /* This function requires the caller holds hdev->lock */
2616 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2618 struct adv_info *adv_instance;
2620 adv_instance = hci_find_adv_instance(hdev, instance);
2624 BT_DBG("%s removing %dMR", hdev->name, instance);
2626 if (hdev->cur_adv_instance == instance) {
2627 if (hdev->adv_instance_timeout) {
2628 cancel_delayed_work(&hdev->adv_instance_expire);
2629 hdev->adv_instance_timeout = 0;
2631 hdev->cur_adv_instance = 0x00;
2634 list_del(&adv_instance->list);
2635 kfree(adv_instance);
2637 hdev->adv_instance_cnt--;
2642 /* This function requires the caller holds hdev->lock */
2643 void hci_adv_instances_clear(struct hci_dev *hdev)
2645 struct adv_info *adv_instance, *n;
2647 if (hdev->adv_instance_timeout) {
2648 cancel_delayed_work(&hdev->adv_instance_expire);
2649 hdev->adv_instance_timeout = 0;
2652 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2653 list_del(&adv_instance->list);
2654 kfree(adv_instance);
2657 hdev->adv_instance_cnt = 0;
2658 hdev->cur_adv_instance = 0x00;
2661 /* This function requires the caller holds hdev->lock */
2662 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2663 u16 adv_data_len, u8 *adv_data,
2664 u16 scan_rsp_len, u8 *scan_rsp_data,
2665 u16 timeout, u16 duration)
2667 struct adv_info *adv_instance;
2669 adv_instance = hci_find_adv_instance(hdev, instance);
2671 memset(adv_instance->adv_data, 0,
2672 sizeof(adv_instance->adv_data));
2673 memset(adv_instance->scan_rsp_data, 0,
2674 sizeof(adv_instance->scan_rsp_data));
2676 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2677 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2680 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2684 adv_instance->pending = true;
2685 adv_instance->instance = instance;
2686 list_add(&adv_instance->list, &hdev->adv_instances);
2687 hdev->adv_instance_cnt++;
2690 adv_instance->flags = flags;
2691 adv_instance->adv_data_len = adv_data_len;
2692 adv_instance->scan_rsp_len = scan_rsp_len;
2695 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2698 memcpy(adv_instance->scan_rsp_data,
2699 scan_rsp_data, scan_rsp_len);
2701 adv_instance->timeout = timeout;
2702 adv_instance->remaining_time = timeout;
2705 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2707 adv_instance->duration = duration;
2709 BT_DBG("%s for %dMR", hdev->name, instance);
2714 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2715 bdaddr_t *bdaddr, u8 type)
2717 struct bdaddr_list *b;
2719 list_for_each_entry(b, bdaddr_list, list) {
2720 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2727 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2729 struct bdaddr_list *b, *n;
2731 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2737 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2739 struct bdaddr_list *entry;
2741 if (!bacmp(bdaddr, BDADDR_ANY))
2744 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2747 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2751 bacpy(&entry->bdaddr, bdaddr);
2752 entry->bdaddr_type = type;
2754 list_add(&entry->list, list);
2759 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2761 struct bdaddr_list *entry;
2763 if (!bacmp(bdaddr, BDADDR_ANY)) {
2764 hci_bdaddr_list_clear(list);
2768 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2772 list_del(&entry->list);
2778 /* This function requires the caller holds hdev->lock */
2779 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2780 bdaddr_t *addr, u8 addr_type)
2782 struct hci_conn_params *params;
2784 list_for_each_entry(params, &hdev->le_conn_params, list) {
2785 if (bacmp(¶ms->addr, addr) == 0 &&
2786 params->addr_type == addr_type) {
2794 /* This function requires the caller holds hdev->lock */
2795 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2796 bdaddr_t *addr, u8 addr_type)
2798 struct hci_conn_params *param;
2800 list_for_each_entry(param, list, action) {
2801 if (bacmp(¶m->addr, addr) == 0 &&
2802 param->addr_type == addr_type)
2809 /* This function requires the caller holds hdev->lock */
2810 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2811 bdaddr_t *addr, u8 addr_type)
2813 struct hci_conn_params *params;
2815 params = hci_conn_params_lookup(hdev, addr, addr_type);
2819 params = kzalloc(sizeof(*params), GFP_KERNEL);
2821 BT_ERR("Out of memory");
2825 bacpy(¶ms->addr, addr);
2826 params->addr_type = addr_type;
2828 list_add(¶ms->list, &hdev->le_conn_params);
2829 INIT_LIST_HEAD(¶ms->action);
2831 params->conn_min_interval = hdev->le_conn_min_interval;
2832 params->conn_max_interval = hdev->le_conn_max_interval;
2833 params->conn_latency = hdev->le_conn_latency;
2834 params->supervision_timeout = hdev->le_supv_timeout;
2835 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2837 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2842 static void hci_conn_params_free(struct hci_conn_params *params)
2845 hci_conn_drop(params->conn);
2846 hci_conn_put(params->conn);
2849 list_del(¶ms->action);
2850 list_del(¶ms->list);
2854 /* This function requires the caller holds hdev->lock */
2855 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2857 struct hci_conn_params *params;
2859 params = hci_conn_params_lookup(hdev, addr, addr_type);
2863 hci_conn_params_free(params);
2865 hci_update_background_scan(hdev);
2867 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2870 /* This function requires the caller holds hdev->lock */
2871 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2873 struct hci_conn_params *params, *tmp;
2875 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2876 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2879 /* If trying to estabilish one time connection to disabled
2880 * device, leave the params, but mark them as just once.
2882 if (params->explicit_connect) {
2883 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2887 list_del(¶ms->list);
2891 BT_DBG("All LE disabled connection parameters were removed");
2894 /* This function requires the caller holds hdev->lock */
2895 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2897 struct hci_conn_params *params, *tmp;
2899 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2900 hci_conn_params_free(params);
2902 BT_DBG("All LE connection parameters were removed");
2905 /* Copy the Identity Address of the controller.
2907 * If the controller has a public BD_ADDR, then by default use that one.
2908 * If this is a LE only controller without a public address, default to
2909 * the static random address.
2911 * For debugging purposes it is possible to force controllers with a
2912 * public address to use the static random address instead.
2914 * In case BR/EDR has been disabled on a dual-mode controller and
2915 * userspace has configured a static address, then that address
2916 * becomes the identity address instead of the public BR/EDR address.
2918 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2921 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2922 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2923 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2924 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2925 bacpy(bdaddr, &hdev->static_addr);
2926 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2928 bacpy(bdaddr, &hdev->bdaddr);
2929 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2933 /* Alloc HCI device */
2934 struct hci_dev *hci_alloc_dev(void)
2936 struct hci_dev *hdev;
2938 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2942 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2943 hdev->esco_type = (ESCO_HV1);
2944 hdev->link_mode = (HCI_LM_ACCEPT);
2945 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2946 hdev->io_capability = 0x03; /* No Input No Output */
2947 hdev->manufacturer = 0xffff; /* Default to internal use */
2948 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2949 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2950 hdev->adv_instance_cnt = 0;
2951 hdev->cur_adv_instance = 0x00;
2952 hdev->adv_instance_timeout = 0;
2954 hdev->sniff_max_interval = 800;
2955 hdev->sniff_min_interval = 80;
2957 hdev->le_adv_channel_map = 0x07;
2958 hdev->le_adv_min_interval = 0x0800;
2959 hdev->le_adv_max_interval = 0x0800;
2960 hdev->le_scan_interval = 0x0060;
2961 hdev->le_scan_window = 0x0030;
2962 hdev->le_conn_min_interval = 0x0018;
2963 hdev->le_conn_max_interval = 0x0028;
2964 hdev->le_conn_latency = 0x0000;
2965 hdev->le_supv_timeout = 0x002a;
2966 hdev->le_def_tx_len = 0x001b;
2967 hdev->le_def_tx_time = 0x0148;
2968 hdev->le_max_tx_len = 0x001b;
2969 hdev->le_max_tx_time = 0x0148;
2970 hdev->le_max_rx_len = 0x001b;
2971 hdev->le_max_rx_time = 0x0148;
2973 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2974 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2975 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2976 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2978 mutex_init(&hdev->lock);
2979 mutex_init(&hdev->req_lock);
2981 INIT_LIST_HEAD(&hdev->mgmt_pending);
2982 INIT_LIST_HEAD(&hdev->blacklist);
2983 INIT_LIST_HEAD(&hdev->whitelist);
2984 INIT_LIST_HEAD(&hdev->uuids);
2985 INIT_LIST_HEAD(&hdev->link_keys);
2986 INIT_LIST_HEAD(&hdev->long_term_keys);
2987 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2988 INIT_LIST_HEAD(&hdev->remote_oob_data);
2989 INIT_LIST_HEAD(&hdev->le_white_list);
2990 INIT_LIST_HEAD(&hdev->le_conn_params);
2991 INIT_LIST_HEAD(&hdev->pend_le_conns);
2992 INIT_LIST_HEAD(&hdev->pend_le_reports);
2993 INIT_LIST_HEAD(&hdev->conn_hash.list);
2994 INIT_LIST_HEAD(&hdev->adv_instances);
2996 INIT_WORK(&hdev->rx_work, hci_rx_work);
2997 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2998 INIT_WORK(&hdev->tx_work, hci_tx_work);
2999 INIT_WORK(&hdev->power_on, hci_power_on);
3000 INIT_WORK(&hdev->error_reset, hci_error_reset);
3002 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3004 skb_queue_head_init(&hdev->rx_q);
3005 skb_queue_head_init(&hdev->cmd_q);
3006 skb_queue_head_init(&hdev->raw_q);
3008 init_waitqueue_head(&hdev->req_wait_q);
3010 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3012 hci_request_setup(hdev);
3014 hci_init_sysfs(hdev);
3015 discovery_init(hdev);
3019 EXPORT_SYMBOL(hci_alloc_dev);
3021 /* Free HCI device */
3022 void hci_free_dev(struct hci_dev *hdev)
3024 /* will free via device release */
3025 put_device(&hdev->dev);
3027 EXPORT_SYMBOL(hci_free_dev);
3029 /* Register HCI device */
3030 int hci_register_dev(struct hci_dev *hdev)
3034 if (!hdev->open || !hdev->close || !hdev->send)
3037 /* Do not allow HCI_AMP devices to register at index 0,
3038 * so the index can be used as the AMP controller ID.
3040 switch (hdev->dev_type) {
3042 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3045 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3054 sprintf(hdev->name, "hci%d", id);
3057 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3059 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3060 WQ_MEM_RECLAIM, 1, hdev->name);
3061 if (!hdev->workqueue) {
3066 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3067 WQ_MEM_RECLAIM, 1, hdev->name);
3068 if (!hdev->req_workqueue) {
3069 destroy_workqueue(hdev->workqueue);
3074 if (!IS_ERR_OR_NULL(bt_debugfs))
3075 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3077 dev_set_name(&hdev->dev, "%s", hdev->name);
3079 error = device_add(&hdev->dev);
3083 hci_leds_init(hdev);
3085 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3086 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3089 if (rfkill_register(hdev->rfkill) < 0) {
3090 rfkill_destroy(hdev->rfkill);
3091 hdev->rfkill = NULL;
3095 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3096 hci_dev_set_flag(hdev, HCI_RFKILLED);
3098 hci_dev_set_flag(hdev, HCI_SETUP);
3099 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3101 if (hdev->dev_type == HCI_PRIMARY) {
3102 /* Assume BR/EDR support until proven otherwise (such as
3103 * through reading supported features during init.
3105 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3108 write_lock(&hci_dev_list_lock);
3109 list_add(&hdev->list, &hci_dev_list);
3110 write_unlock(&hci_dev_list_lock);
3112 /* Devices that are marked for raw-only usage are unconfigured
3113 * and should not be included in normal operation.
3115 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3116 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3118 hci_sock_dev_event(hdev, HCI_DEV_REG);
3121 queue_work(hdev->req_workqueue, &hdev->power_on);
3126 destroy_workqueue(hdev->workqueue);
3127 destroy_workqueue(hdev->req_workqueue);
3129 ida_simple_remove(&hci_index_ida, hdev->id);
3133 EXPORT_SYMBOL(hci_register_dev);
3135 /* Unregister HCI device */
3136 void hci_unregister_dev(struct hci_dev *hdev)
3140 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3142 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3146 write_lock(&hci_dev_list_lock);
3147 list_del(&hdev->list);
3148 write_unlock(&hci_dev_list_lock);
3150 cancel_work_sync(&hdev->power_on);
3152 hci_dev_do_close(hdev);
3154 if (!test_bit(HCI_INIT, &hdev->flags) &&
3155 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3156 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3158 mgmt_index_removed(hdev);
3159 hci_dev_unlock(hdev);
3162 /* mgmt_index_removed should take care of emptying the
3164 BUG_ON(!list_empty(&hdev->mgmt_pending));
3166 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3169 rfkill_unregister(hdev->rfkill);
3170 rfkill_destroy(hdev->rfkill);
3173 device_del(&hdev->dev);
3175 debugfs_remove_recursive(hdev->debugfs);
3176 kfree_const(hdev->hw_info);
3177 kfree_const(hdev->fw_info);
3179 destroy_workqueue(hdev->workqueue);
3180 destroy_workqueue(hdev->req_workqueue);
3183 hci_bdaddr_list_clear(&hdev->blacklist);
3184 hci_bdaddr_list_clear(&hdev->whitelist);
3185 hci_uuids_clear(hdev);
3186 hci_link_keys_clear(hdev);
3187 hci_smp_ltks_clear(hdev);
3188 hci_smp_irks_clear(hdev);
3189 hci_remote_oob_data_clear(hdev);
3190 hci_adv_instances_clear(hdev);
3191 hci_bdaddr_list_clear(&hdev->le_white_list);
3192 hci_conn_params_clear_all(hdev);
3193 hci_discovery_filter_clear(hdev);
3194 hci_dev_unlock(hdev);
3198 ida_simple_remove(&hci_index_ida, id);
3200 EXPORT_SYMBOL(hci_unregister_dev);
3202 /* Suspend HCI device */
3203 int hci_suspend_dev(struct hci_dev *hdev)
3205 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3208 EXPORT_SYMBOL(hci_suspend_dev);
3210 /* Resume HCI device */
3211 int hci_resume_dev(struct hci_dev *hdev)
3213 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3216 EXPORT_SYMBOL(hci_resume_dev);
3218 /* Reset HCI device */
3219 int hci_reset_dev(struct hci_dev *hdev)
3221 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3222 struct sk_buff *skb;
3224 skb = bt_skb_alloc(3, GFP_ATOMIC);
3228 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3229 memcpy(skb_put(skb, 3), hw_err, 3);
3231 /* Send Hardware Error to upper stack */
3232 return hci_recv_frame(hdev, skb);
3234 EXPORT_SYMBOL(hci_reset_dev);
3236 /* Receive frame from HCI drivers */
3237 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3239 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3240 && !test_bit(HCI_INIT, &hdev->flags))) {
3245 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3246 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3247 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3253 bt_cb(skb)->incoming = 1;
3256 __net_timestamp(skb);
3258 skb_queue_tail(&hdev->rx_q, skb);
3259 queue_work(hdev->workqueue, &hdev->rx_work);
3263 EXPORT_SYMBOL(hci_recv_frame);
3265 /* Receive diagnostic message from HCI drivers */
3266 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3268 /* Mark as diagnostic packet */
3269 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3272 __net_timestamp(skb);
3274 skb_queue_tail(&hdev->rx_q, skb);
3275 queue_work(hdev->workqueue, &hdev->rx_work);
3279 EXPORT_SYMBOL(hci_recv_diag);
3281 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3285 va_start(vargs, fmt);
3286 kfree_const(hdev->hw_info);
3287 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3290 EXPORT_SYMBOL(hci_set_hw_info);
3292 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3296 va_start(vargs, fmt);
3297 kfree_const(hdev->fw_info);
3298 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3301 EXPORT_SYMBOL(hci_set_fw_info);
3303 /* ---- Interface to upper protocols ---- */
3305 int hci_register_cb(struct hci_cb *cb)
3307 BT_DBG("%p name %s", cb, cb->name);
3309 mutex_lock(&hci_cb_list_lock);
3310 list_add_tail(&cb->list, &hci_cb_list);
3311 mutex_unlock(&hci_cb_list_lock);
3315 EXPORT_SYMBOL(hci_register_cb);
3317 int hci_unregister_cb(struct hci_cb *cb)
3319 BT_DBG("%p name %s", cb, cb->name);
3321 mutex_lock(&hci_cb_list_lock);
3322 list_del(&cb->list);
3323 mutex_unlock(&hci_cb_list_lock);
3327 EXPORT_SYMBOL(hci_unregister_cb);
3329 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3333 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3337 __net_timestamp(skb);
3339 /* Send copy to monitor */
3340 hci_send_to_monitor(hdev, skb);
3342 if (atomic_read(&hdev->promisc)) {
3343 /* Send copy to the sockets */
3344 hci_send_to_sock(hdev, skb);
3347 /* Get rid of skb owner, prior to sending to the driver. */
3350 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3355 err = hdev->send(hdev, skb);
3357 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3362 /* Send HCI command */
3363 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3366 struct sk_buff *skb;
3368 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3370 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3372 BT_ERR("%s no memory for command", hdev->name);
3376 /* Stand-alone HCI commands must be flagged as
3377 * single-command requests.
3379 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3381 skb_queue_tail(&hdev->cmd_q, skb);
3382 queue_work(hdev->workqueue, &hdev->cmd_work);
3387 /* Get data from the previously sent command */
3388 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3390 struct hci_command_hdr *hdr;
3392 if (!hdev->sent_cmd)
3395 hdr = (void *) hdev->sent_cmd->data;
3397 if (hdr->opcode != cpu_to_le16(opcode))
3400 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3402 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3405 /* Send HCI command and wait for command commplete event */
3406 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3407 const void *param, u32 timeout)
3409 struct sk_buff *skb;
3411 if (!test_bit(HCI_UP, &hdev->flags))
3412 return ERR_PTR(-ENETDOWN);
3414 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3416 hci_req_sync_lock(hdev);
3417 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3418 hci_req_sync_unlock(hdev);
3422 EXPORT_SYMBOL(hci_cmd_sync);
3425 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3427 struct hci_acl_hdr *hdr;
3430 skb_push(skb, HCI_ACL_HDR_SIZE);
3431 skb_reset_transport_header(skb);
3432 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3433 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3434 hdr->dlen = cpu_to_le16(len);
3437 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3438 struct sk_buff *skb, __u16 flags)
3440 struct hci_conn *conn = chan->conn;
3441 struct hci_dev *hdev = conn->hdev;
3442 struct sk_buff *list;
3444 skb->len = skb_headlen(skb);
3447 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3449 switch (hdev->dev_type) {
3451 hci_add_acl_hdr(skb, conn->handle, flags);
3454 hci_add_acl_hdr(skb, chan->handle, flags);
3457 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3461 list = skb_shinfo(skb)->frag_list;
3463 /* Non fragmented */
3464 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3466 skb_queue_tail(queue, skb);
3469 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3471 skb_shinfo(skb)->frag_list = NULL;
3473 /* Queue all fragments atomically. We need to use spin_lock_bh
3474 * here because of 6LoWPAN links, as there this function is
3475 * called from softirq and using normal spin lock could cause
3478 spin_lock_bh(&queue->lock);
3480 __skb_queue_tail(queue, skb);
3482 flags &= ~ACL_START;
3485 skb = list; list = list->next;
3487 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3488 hci_add_acl_hdr(skb, conn->handle, flags);
3490 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3492 __skb_queue_tail(queue, skb);
3495 spin_unlock_bh(&queue->lock);
3499 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3501 struct hci_dev *hdev = chan->conn->hdev;
3503 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3505 hci_queue_acl(chan, &chan->data_q, skb, flags);
3507 queue_work(hdev->workqueue, &hdev->tx_work);
3511 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3513 struct hci_dev *hdev = conn->hdev;
3514 struct hci_sco_hdr hdr;
3516 BT_DBG("%s len %d", hdev->name, skb->len);
3518 hdr.handle = cpu_to_le16(conn->handle);
3519 hdr.dlen = skb->len;
3521 skb_push(skb, HCI_SCO_HDR_SIZE);
3522 skb_reset_transport_header(skb);
3523 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3525 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3527 skb_queue_tail(&conn->data_q, skb);
3528 queue_work(hdev->workqueue, &hdev->tx_work);
3531 /* ---- HCI TX task (outgoing data) ---- */
3533 /* HCI Connection scheduler */
3534 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3537 struct hci_conn_hash *h = &hdev->conn_hash;
3538 struct hci_conn *conn = NULL, *c;
3539 unsigned int num = 0, min = ~0;
3541 /* We don't have to lock device here. Connections are always
3542 * added and removed with TX task disabled. */
3546 list_for_each_entry_rcu(c, &h->list, list) {
3547 if (c->type != type || skb_queue_empty(&c->data_q))
3550 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3555 if (c->sent < min) {
3560 if (hci_conn_num(hdev, type) == num)
3569 switch (conn->type) {
3571 cnt = hdev->acl_cnt;
3575 cnt = hdev->sco_cnt;
3578 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3582 BT_ERR("Unknown link type");
3590 BT_DBG("conn %p quote %d", conn, *quote);
3594 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3596 struct hci_conn_hash *h = &hdev->conn_hash;
3599 BT_ERR("%s link tx timeout", hdev->name);
3603 /* Kill stalled connections */
3604 list_for_each_entry_rcu(c, &h->list, list) {
3605 if (c->type == type && c->sent) {
3606 BT_ERR("%s killing stalled connection %pMR",
3607 hdev->name, &c->dst);
3608 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3615 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3618 struct hci_conn_hash *h = &hdev->conn_hash;
3619 struct hci_chan *chan = NULL;
3620 unsigned int num = 0, min = ~0, cur_prio = 0;
3621 struct hci_conn *conn;
3622 int cnt, q, conn_num = 0;
3624 BT_DBG("%s", hdev->name);
3628 list_for_each_entry_rcu(conn, &h->list, list) {
3629 struct hci_chan *tmp;
3631 if (conn->type != type)
3634 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3639 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3640 struct sk_buff *skb;
3642 if (skb_queue_empty(&tmp->data_q))
3645 skb = skb_peek(&tmp->data_q);
3646 if (skb->priority < cur_prio)
3649 if (skb->priority > cur_prio) {
3652 cur_prio = skb->priority;
3657 if (conn->sent < min) {
3663 if (hci_conn_num(hdev, type) == conn_num)
3672 switch (chan->conn->type) {
3674 cnt = hdev->acl_cnt;
3677 cnt = hdev->block_cnt;
3681 cnt = hdev->sco_cnt;
3684 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3688 BT_ERR("Unknown link type");
3693 BT_DBG("chan %p quote %d", chan, *quote);
3697 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3699 struct hci_conn_hash *h = &hdev->conn_hash;
3700 struct hci_conn *conn;
3703 BT_DBG("%s", hdev->name);
3707 list_for_each_entry_rcu(conn, &h->list, list) {
3708 struct hci_chan *chan;
3710 if (conn->type != type)
3713 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3718 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3719 struct sk_buff *skb;
3726 if (skb_queue_empty(&chan->data_q))
3729 skb = skb_peek(&chan->data_q);
3730 if (skb->priority >= HCI_PRIO_MAX - 1)
3733 skb->priority = HCI_PRIO_MAX - 1;
3735 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3739 if (hci_conn_num(hdev, type) == num)
3747 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3749 /* Calculate count of blocks used by this packet */
3750 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3753 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3755 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3756 /* ACL tx timeout must be longer than maximum
3757 * link supervision timeout (40.9 seconds) */
3758 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3759 HCI_ACL_TX_TIMEOUT))
3760 hci_link_tx_to(hdev, ACL_LINK);
3764 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3766 unsigned int cnt = hdev->acl_cnt;
3767 struct hci_chan *chan;
3768 struct sk_buff *skb;
3771 __check_timeout(hdev, cnt);
3773 while (hdev->acl_cnt &&
3774 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3775 u32 priority = (skb_peek(&chan->data_q))->priority;
3776 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3777 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3778 skb->len, skb->priority);
3780 /* Stop if priority has changed */
3781 if (skb->priority < priority)
3784 skb = skb_dequeue(&chan->data_q);
3786 hci_conn_enter_active_mode(chan->conn,
3787 bt_cb(skb)->force_active);
3789 hci_send_frame(hdev, skb);
3790 hdev->acl_last_tx = jiffies;
3798 if (cnt != hdev->acl_cnt)
3799 hci_prio_recalculate(hdev, ACL_LINK);
3802 static void hci_sched_acl_blk(struct hci_dev *hdev)
3804 unsigned int cnt = hdev->block_cnt;
3805 struct hci_chan *chan;
3806 struct sk_buff *skb;
3810 __check_timeout(hdev, cnt);
3812 BT_DBG("%s", hdev->name);
3814 if (hdev->dev_type == HCI_AMP)
3819 while (hdev->block_cnt > 0 &&
3820 (chan = hci_chan_sent(hdev, type, "e))) {
3821 u32 priority = (skb_peek(&chan->data_q))->priority;
3822 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3826 skb->len, skb->priority);
3828 /* Stop if priority has changed */
3829 if (skb->priority < priority)
3832 skb = skb_dequeue(&chan->data_q);
3834 blocks = __get_blocks(hdev, skb);
3835 if (blocks > hdev->block_cnt)
3838 hci_conn_enter_active_mode(chan->conn,
3839 bt_cb(skb)->force_active);
3841 hci_send_frame(hdev, skb);
3842 hdev->acl_last_tx = jiffies;
3844 hdev->block_cnt -= blocks;
3847 chan->sent += blocks;
3848 chan->conn->sent += blocks;
3852 if (cnt != hdev->block_cnt)
3853 hci_prio_recalculate(hdev, type);
3856 static void hci_sched_acl(struct hci_dev *hdev)
3858 BT_DBG("%s", hdev->name);
3860 /* No ACL link over BR/EDR controller */
3861 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3864 /* No AMP link over AMP controller */
3865 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3868 switch (hdev->flow_ctl_mode) {
3869 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3870 hci_sched_acl_pkt(hdev);
3873 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3874 hci_sched_acl_blk(hdev);
3880 static void hci_sched_sco(struct hci_dev *hdev)
3882 struct hci_conn *conn;
3883 struct sk_buff *skb;
3886 BT_DBG("%s", hdev->name);
3888 if (!hci_conn_num(hdev, SCO_LINK))
3891 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3892 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3893 BT_DBG("skb %p len %d", skb, skb->len);
3894 hci_send_frame(hdev, skb);
3897 if (conn->sent == ~0)
3903 static void hci_sched_esco(struct hci_dev *hdev)
3905 struct hci_conn *conn;
3906 struct sk_buff *skb;
3909 BT_DBG("%s", hdev->name);
3911 if (!hci_conn_num(hdev, ESCO_LINK))
3914 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3916 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3917 BT_DBG("skb %p len %d", skb, skb->len);
3918 hci_send_frame(hdev, skb);
3921 if (conn->sent == ~0)
3927 static void hci_sched_le(struct hci_dev *hdev)
3929 struct hci_chan *chan;
3930 struct sk_buff *skb;
3931 int quote, cnt, tmp;
3933 BT_DBG("%s", hdev->name);
3935 if (!hci_conn_num(hdev, LE_LINK))
3938 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3939 /* LE tx timeout must be longer than maximum
3940 * link supervision timeout (40.9 seconds) */
3941 if (!hdev->le_cnt && hdev->le_pkts &&
3942 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3943 hci_link_tx_to(hdev, LE_LINK);
3946 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3948 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3949 u32 priority = (skb_peek(&chan->data_q))->priority;
3950 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3951 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3952 skb->len, skb->priority);
3954 /* Stop if priority has changed */
3955 if (skb->priority < priority)
3958 skb = skb_dequeue(&chan->data_q);
3960 hci_send_frame(hdev, skb);
3961 hdev->le_last_tx = jiffies;
3972 hdev->acl_cnt = cnt;
3975 hci_prio_recalculate(hdev, LE_LINK);
3978 static void hci_tx_work(struct work_struct *work)
3980 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3981 struct sk_buff *skb;
3983 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3984 hdev->sco_cnt, hdev->le_cnt);
3986 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3987 /* Schedule queues and send stuff to HCI driver */
3988 hci_sched_acl(hdev);
3989 hci_sched_sco(hdev);
3990 hci_sched_esco(hdev);
3994 /* Send next queued raw (unknown type) packet */
3995 while ((skb = skb_dequeue(&hdev->raw_q)))
3996 hci_send_frame(hdev, skb);
3999 /* ----- HCI RX task (incoming data processing) ----- */
4001 /* ACL data packet */
4002 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4004 struct hci_acl_hdr *hdr = (void *) skb->data;
4005 struct hci_conn *conn;
4006 __u16 handle, flags;
4008 skb_pull(skb, HCI_ACL_HDR_SIZE);
4010 handle = __le16_to_cpu(hdr->handle);
4011 flags = hci_flags(handle);
4012 handle = hci_handle(handle);
4014 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4017 hdev->stat.acl_rx++;
4020 conn = hci_conn_hash_lookup_handle(hdev, handle);
4021 hci_dev_unlock(hdev);
4024 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4026 /* Send to upper protocol */
4027 l2cap_recv_acldata(conn, skb, flags);
4030 BT_ERR("%s ACL packet for unknown connection handle %d",
4031 hdev->name, handle);
4037 /* SCO data packet */
4038 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4040 struct hci_sco_hdr *hdr = (void *) skb->data;
4041 struct hci_conn *conn;
4044 skb_pull(skb, HCI_SCO_HDR_SIZE);
4046 handle = __le16_to_cpu(hdr->handle);
4048 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4050 hdev->stat.sco_rx++;
4053 conn = hci_conn_hash_lookup_handle(hdev, handle);
4054 hci_dev_unlock(hdev);
4057 /* Send to upper protocol */
4058 sco_recv_scodata(conn, skb);
4061 BT_ERR("%s SCO packet for unknown connection handle %d",
4062 hdev->name, handle);
4068 static bool hci_req_is_complete(struct hci_dev *hdev)
4070 struct sk_buff *skb;
4072 skb = skb_peek(&hdev->cmd_q);
4076 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4079 static void hci_resend_last(struct hci_dev *hdev)
4081 struct hci_command_hdr *sent;
4082 struct sk_buff *skb;
4085 if (!hdev->sent_cmd)
4088 sent = (void *) hdev->sent_cmd->data;
4089 opcode = __le16_to_cpu(sent->opcode);
4090 if (opcode == HCI_OP_RESET)
4093 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4097 skb_queue_head(&hdev->cmd_q, skb);
4098 queue_work(hdev->workqueue, &hdev->cmd_work);
4101 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4102 hci_req_complete_t *req_complete,
4103 hci_req_complete_skb_t *req_complete_skb)
4105 struct sk_buff *skb;
4106 unsigned long flags;
4108 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4110 /* If the completed command doesn't match the last one that was
4111 * sent we need to do special handling of it.
4113 if (!hci_sent_cmd_data(hdev, opcode)) {
4114 /* Some CSR based controllers generate a spontaneous
4115 * reset complete event during init and any pending
4116 * command will never be completed. In such a case we
4117 * need to resend whatever was the last sent
4120 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4121 hci_resend_last(hdev);
4126 /* If the command succeeded and there's still more commands in
4127 * this request the request is not yet complete.
4129 if (!status && !hci_req_is_complete(hdev))
4132 /* If this was the last command in a request the complete
4133 * callback would be found in hdev->sent_cmd instead of the
4134 * command queue (hdev->cmd_q).
4136 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4137 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4141 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4142 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4146 /* Remove all pending commands belonging to this request */
4147 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4148 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4149 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4150 __skb_queue_head(&hdev->cmd_q, skb);
4154 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4155 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4157 *req_complete = bt_cb(skb)->hci.req_complete;
4160 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4163 static void hci_rx_work(struct work_struct *work)
4165 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4166 struct sk_buff *skb;
4168 BT_DBG("%s", hdev->name);
4170 while ((skb = skb_dequeue(&hdev->rx_q))) {
4171 /* Send copy to monitor */
4172 hci_send_to_monitor(hdev, skb);
4174 if (atomic_read(&hdev->promisc)) {
4175 /* Send copy to the sockets */
4176 hci_send_to_sock(hdev, skb);
4179 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4184 if (test_bit(HCI_INIT, &hdev->flags)) {
4185 /* Don't process data packets in this states. */
4186 switch (hci_skb_pkt_type(skb)) {
4187 case HCI_ACLDATA_PKT:
4188 case HCI_SCODATA_PKT:
4195 switch (hci_skb_pkt_type(skb)) {
4197 BT_DBG("%s Event packet", hdev->name);
4198 hci_event_packet(hdev, skb);
4201 case HCI_ACLDATA_PKT:
4202 BT_DBG("%s ACL data packet", hdev->name);
4203 hci_acldata_packet(hdev, skb);
4206 case HCI_SCODATA_PKT:
4207 BT_DBG("%s SCO data packet", hdev->name);
4208 hci_scodata_packet(hdev, skb);
4218 static void hci_cmd_work(struct work_struct *work)
4220 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4221 struct sk_buff *skb;
4223 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4224 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4226 /* Send queued commands */
4227 if (atomic_read(&hdev->cmd_cnt)) {
4228 skb = skb_dequeue(&hdev->cmd_q);
4232 kfree_skb(hdev->sent_cmd);
4234 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4235 if (hdev->sent_cmd) {
4236 atomic_dec(&hdev->cmd_cnt);
4237 hci_send_frame(hdev, skb);
4238 if (test_bit(HCI_RESET, &hdev->flags))
4239 cancel_delayed_work(&hdev->cmd_timer);
4241 schedule_delayed_work(&hdev->cmd_timer,
4244 skb_queue_head(&hdev->cmd_q, skb);
4245 queue_work(hdev->workqueue, &hdev->cmd_work);