2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
60 /* ---- HCI debugfs entries ---- */
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
65 struct hci_dev *hdev = file->private_data;
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
77 struct hci_dev *hdev = file->private_data;
82 if (!test_bit(HCI_UP, &hdev->flags))
85 err = kstrtobool_from_user(user_buf, count, &enable);
89 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
92 hci_req_sync_lock(hdev);
94 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
97 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
99 hci_req_sync_unlock(hdev);
106 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111 static const struct file_operations dut_mode_fops = {
113 .read = dut_mode_read,
114 .write = dut_mode_write,
115 .llseek = default_llseek,
118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119 size_t count, loff_t *ppos)
121 struct hci_dev *hdev = file->private_data;
124 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
127 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131 size_t count, loff_t *ppos)
133 struct hci_dev *hdev = file->private_data;
137 err = kstrtobool_from_user(user_buf, count, &enable);
141 /* When the diagnostic flags are not persistent and the transport
142 * is not active or in user channel operation, then there is no need
143 * for the vendor callback. Instead just store the desired value and
144 * the setting will be programmed when the controller gets powered on.
146 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147 (!test_bit(HCI_RUNNING, &hdev->flags) ||
148 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
151 hci_req_sync_lock(hdev);
152 err = hdev->set_diag(hdev, enable);
153 hci_req_sync_unlock(hdev);
160 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
162 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167 static const struct file_operations vendor_diag_fops = {
169 .read = vendor_diag_read,
170 .write = vendor_diag_write,
171 .llseek = default_llseek,
174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
176 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
186 BT_DBG("%s %ld", req->hdev->name, opt);
189 set_bit(HCI_RESET, &req->hdev->flags);
190 hci_req_add(req, HCI_OP_RESET, 0, NULL);
194 static void bredr_init(struct hci_request *req)
196 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 /* Read Local Supported Features */
199 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
201 /* Read Local Version */
202 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
204 /* Read BD Address */
205 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
208 static void amp_init1(struct hci_request *req)
210 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
212 /* Read Local Version */
213 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Local Supported Commands */
216 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
218 /* Read Local AMP Info */
219 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
221 /* Read Data Blk size */
222 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
224 /* Read Flow Control Mode */
225 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
227 /* Read Location Data */
228 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
231 static int amp_init2(struct hci_request *req)
233 /* Read Local Supported Features. Not all AMP controllers
234 * support this so it's placed conditionally in the second
237 if (req->hdev->commands[14] & 0x20)
238 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
245 struct hci_dev *hdev = req->hdev;
247 BT_DBG("%s %ld", hdev->name, opt);
250 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251 hci_reset_req(req, 0);
253 switch (hdev->dev_type) {
261 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
268 static void bredr_setup(struct hci_request *req)
273 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
276 /* Read Class of Device */
277 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
279 /* Read Local Name */
280 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
282 /* Read Voice Setting */
283 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
285 /* Read Number of Supported IAC */
286 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
288 /* Read Current IAC LAP */
289 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
291 /* Clear Event Filters */
292 flt_type = HCI_FLT_CLEAR_ALL;
293 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
295 /* Connection accept timeout ~20 secs */
296 param = cpu_to_le16(0x7d00);
297 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
300 static void le_setup(struct hci_request *req)
302 struct hci_dev *hdev = req->hdev;
304 /* Read LE Buffer Size */
305 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307 /* Read LE Local Supported Features */
308 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
310 /* Read LE Supported States */
311 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
313 /* LE-only controllers have LE implicitly enabled */
314 if (!lmp_bredr_capable(hdev))
315 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
318 static void hci_setup_event_mask(struct hci_request *req)
320 struct hci_dev *hdev = req->hdev;
322 /* The second byte is 0xff instead of 0x9f (two reserved bits
323 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
326 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
328 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329 * any event mask for pre 1.2 devices.
331 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
334 if (lmp_bredr_capable(hdev)) {
335 events[4] |= 0x01; /* Flow Specification Complete */
337 /* Use a different default for LE-only devices */
338 memset(events, 0, sizeof(events));
339 events[1] |= 0x20; /* Command Complete */
340 events[1] |= 0x40; /* Command Status */
341 events[1] |= 0x80; /* Hardware Error */
343 /* If the controller supports the Disconnect command, enable
344 * the corresponding event. In addition enable packet flow
345 * control related events.
347 if (hdev->commands[0] & 0x20) {
348 events[0] |= 0x10; /* Disconnection Complete */
349 events[2] |= 0x04; /* Number of Completed Packets */
350 events[3] |= 0x02; /* Data Buffer Overflow */
353 /* If the controller supports the Read Remote Version
354 * Information command, enable the corresponding event.
356 if (hdev->commands[2] & 0x80)
357 events[1] |= 0x08; /* Read Remote Version Information
361 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362 events[0] |= 0x80; /* Encryption Change */
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
367 if (lmp_inq_rssi_capable(hdev) ||
368 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369 events[4] |= 0x02; /* Inquiry Result with RSSI */
371 if (lmp_ext_feat_capable(hdev))
372 events[4] |= 0x04; /* Read Remote Extended Features Complete */
374 if (lmp_esco_capable(hdev)) {
375 events[5] |= 0x08; /* Synchronous Connection Complete */
376 events[5] |= 0x10; /* Synchronous Connection Changed */
379 if (lmp_sniffsubr_capable(hdev))
380 events[5] |= 0x20; /* Sniff Subrating */
382 if (lmp_pause_enc_capable(hdev))
383 events[5] |= 0x80; /* Encryption Key Refresh Complete */
385 if (lmp_ext_inq_capable(hdev))
386 events[5] |= 0x40; /* Extended Inquiry Result */
388 if (lmp_no_flush_capable(hdev))
389 events[7] |= 0x01; /* Enhanced Flush Complete */
391 if (lmp_lsto_capable(hdev))
392 events[6] |= 0x80; /* Link Supervision Timeout Changed */
394 if (lmp_ssp_capable(hdev)) {
395 events[6] |= 0x01; /* IO Capability Request */
396 events[6] |= 0x02; /* IO Capability Response */
397 events[6] |= 0x04; /* User Confirmation Request */
398 events[6] |= 0x08; /* User Passkey Request */
399 events[6] |= 0x10; /* Remote OOB Data Request */
400 events[6] |= 0x20; /* Simple Pairing Complete */
401 events[7] |= 0x04; /* User Passkey Notification */
402 events[7] |= 0x08; /* Keypress Notification */
403 events[7] |= 0x10; /* Remote Host Supported
404 * Features Notification
408 if (lmp_le_capable(hdev))
409 events[7] |= 0x20; /* LE Meta-Event */
411 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
416 struct hci_dev *hdev = req->hdev;
418 if (hdev->dev_type == HCI_AMP)
419 return amp_init2(req);
421 if (lmp_bredr_capable(hdev))
424 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
426 if (lmp_le_capable(hdev))
429 /* All Bluetooth 1.2 and later controllers should support the
430 * HCI command for reading the local supported commands.
432 * Unfortunately some controllers indicate Bluetooth 1.2 support,
433 * but do not have support for this command. If that is the case,
434 * the driver can quirk the behavior and skip reading the local
435 * supported commands.
437 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
441 if (lmp_ssp_capable(hdev)) {
442 /* When SSP is available, then the host features page
443 * should also be available as well. However some
444 * controllers list the max_page as 0 as long as SSP
445 * has not been enabled. To achieve proper debugging
446 * output, force the minimum max_page to 1 at least.
448 hdev->max_page = 0x01;
450 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
453 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454 sizeof(mode), &mode);
456 struct hci_cp_write_eir cp;
458 memset(hdev->eir, 0, sizeof(hdev->eir));
459 memset(&cp, 0, sizeof(cp));
461 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465 if (lmp_inq_rssi_capable(hdev) ||
466 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
469 /* If Extended Inquiry Result events are supported, then
470 * they are clearly preferred over Inquiry Result with RSSI
473 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
475 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
478 if (lmp_inq_tx_pwr_capable(hdev))
479 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
481 if (lmp_ext_feat_capable(hdev)) {
482 struct hci_cp_read_local_ext_features cp;
485 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
491 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
498 static void hci_setup_link_policy(struct hci_request *req)
500 struct hci_dev *hdev = req->hdev;
501 struct hci_cp_write_def_link_policy cp;
504 if (lmp_rswitch_capable(hdev))
505 link_policy |= HCI_LP_RSWITCH;
506 if (lmp_hold_capable(hdev))
507 link_policy |= HCI_LP_HOLD;
508 if (lmp_sniff_capable(hdev))
509 link_policy |= HCI_LP_SNIFF;
510 if (lmp_park_capable(hdev))
511 link_policy |= HCI_LP_PARK;
513 cp.policy = cpu_to_le16(link_policy);
514 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
517 static void hci_set_le_support(struct hci_request *req)
519 struct hci_dev *hdev = req->hdev;
520 struct hci_cp_write_le_host_supported cp;
522 /* LE-only devices do not support explicit enablement */
523 if (!lmp_bredr_capable(hdev))
526 memset(&cp, 0, sizeof(cp));
528 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533 if (cp.le != lmp_host_le_capable(hdev))
534 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 static void hci_set_event_mask_page_2(struct hci_request *req)
540 struct hci_dev *hdev = req->hdev;
541 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542 bool changed = false;
544 /* If Connectionless Slave Broadcast master role is supported
545 * enable all necessary events for it.
547 if (lmp_csb_master_capable(hdev)) {
548 events[1] |= 0x40; /* Triggered Clock Capture */
549 events[1] |= 0x80; /* Synchronization Train Complete */
550 events[2] |= 0x10; /* Slave Page Response Timeout */
551 events[2] |= 0x20; /* CSB Channel Map Change */
555 /* If Connectionless Slave Broadcast slave role is supported
556 * enable all necessary events for it.
558 if (lmp_csb_slave_capable(hdev)) {
559 events[2] |= 0x01; /* Synchronization Train Received */
560 events[2] |= 0x02; /* CSB Receive */
561 events[2] |= 0x04; /* CSB Timeout */
562 events[2] |= 0x08; /* Truncated Page Complete */
566 /* Enable Authenticated Payload Timeout Expired event if supported */
567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572 /* Some Broadcom based controllers indicate support for Set Event
573 * Mask Page 2 command, but then actually do not support it. Since
574 * the default value is all bits set to zero, the command is only
575 * required if the event mask has to be changed. In case no change
576 * to the event mask is needed, skip this command.
579 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580 sizeof(events), events);
583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
585 struct hci_dev *hdev = req->hdev;
588 hci_setup_event_mask(req);
590 if (hdev->commands[6] & 0x20 &&
591 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592 struct hci_cp_read_stored_link_key cp;
594 bacpy(&cp.bdaddr, BDADDR_ANY);
596 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
599 if (hdev->commands[5] & 0x10)
600 hci_setup_link_policy(req);
602 if (hdev->commands[8] & 0x01)
603 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
605 /* Some older Broadcom based Bluetooth 1.2 controllers do not
606 * support the Read Page Scan Type command. Check support for
607 * this command in the bit mask of supported commands.
609 if (hdev->commands[13] & 0x01)
610 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
612 if (lmp_le_capable(hdev)) {
615 memset(events, 0, sizeof(events));
617 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618 events[0] |= 0x10; /* LE Long Term Key Request */
620 /* If controller supports the Connection Parameters Request
621 * Link Layer Procedure, enable the corresponding event.
623 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624 events[0] |= 0x20; /* LE Remote Connection
628 /* If the controller supports the Data Length Extension
629 * feature, enable the corresponding event.
631 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632 events[0] |= 0x40; /* LE Data Length Change */
634 /* If the controller supports Extended Scanner Filter
635 * Policies, enable the correspondig event.
637 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638 events[1] |= 0x04; /* LE Direct Advertising
642 /* If the controller supports Channel Selection Algorithm #2
643 * feature, enable the corresponding event.
645 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646 events[2] |= 0x08; /* LE Channel Selection
650 /* If the controller supports the LE Set Scan Enable command,
651 * enable the corresponding advertising report event.
653 if (hdev->commands[26] & 0x08)
654 events[0] |= 0x02; /* LE Advertising Report */
656 /* If the controller supports the LE Create Connection
657 * command, enable the corresponding event.
659 if (hdev->commands[26] & 0x10)
660 events[0] |= 0x01; /* LE Connection Complete */
662 /* If the controller supports the LE Connection Update
663 * command, enable the corresponding event.
665 if (hdev->commands[27] & 0x04)
666 events[0] |= 0x04; /* LE Connection Update
670 /* If the controller supports the LE Read Remote Used Features
671 * command, enable the corresponding event.
673 if (hdev->commands[27] & 0x20)
674 events[0] |= 0x08; /* LE Read Remote Used
678 /* If the controller supports the LE Read Local P-256
679 * Public Key command, enable the corresponding event.
681 if (hdev->commands[34] & 0x02)
682 events[0] |= 0x80; /* LE Read Local P-256
683 * Public Key Complete
686 /* If the controller supports the LE Generate DHKey
687 * command, enable the corresponding event.
689 if (hdev->commands[34] & 0x04)
690 events[1] |= 0x01; /* LE Generate DHKey Complete */
692 /* If the controller supports the LE Set Default PHY or
693 * LE Set PHY commands, enable the corresponding event.
695 if (hdev->commands[35] & (0x20 | 0x40))
696 events[1] |= 0x08; /* LE PHY Update Complete */
698 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
701 if (hdev->commands[25] & 0x40) {
702 /* Read LE Advertising Channel TX Power */
703 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
706 if (hdev->commands[26] & 0x40) {
707 /* Read LE White List Size */
708 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
712 if (hdev->commands[26] & 0x80) {
713 /* Clear LE White List */
714 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
717 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
718 /* Read LE Maximum Data Length */
719 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
721 /* Read LE Suggested Default Data Length */
722 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
725 hci_set_le_support(req);
728 /* Read features beyond page 1 if available */
729 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
730 struct hci_cp_read_local_ext_features cp;
733 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
740 static int hci_init4_req(struct hci_request *req, unsigned long opt)
742 struct hci_dev *hdev = req->hdev;
744 /* Some Broadcom based Bluetooth controllers do not support the
745 * Delete Stored Link Key command. They are clearly indicating its
746 * absence in the bit mask of supported commands.
748 * Check the supported commands and only if the the command is marked
749 * as supported send it. If not supported assume that the controller
750 * does not have actual support for stored link keys which makes this
751 * command redundant anyway.
753 * Some controllers indicate that they support handling deleting
754 * stored link keys, but they don't. The quirk lets a driver
755 * just disable this command.
757 if (hdev->commands[6] & 0x80 &&
758 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
759 struct hci_cp_delete_stored_link_key cp;
761 bacpy(&cp.bdaddr, BDADDR_ANY);
762 cp.delete_all = 0x01;
763 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
767 /* Set event mask page 2 if the HCI command for it is supported */
768 if (hdev->commands[22] & 0x04)
769 hci_set_event_mask_page_2(req);
771 /* Read local codec list if the HCI command is supported */
772 if (hdev->commands[29] & 0x20)
773 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
775 /* Get MWS transport configuration if the HCI command is supported */
776 if (hdev->commands[30] & 0x08)
777 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
779 /* Check for Synchronization Train support */
780 if (lmp_sync_train_capable(hdev))
781 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
783 /* Enable Secure Connections if supported and configured */
784 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
785 bredr_sc_enabled(hdev)) {
788 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
789 sizeof(support), &support);
792 /* Set Suggested Default Data Length to maximum if supported */
793 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
794 struct hci_cp_le_write_def_data_len cp;
796 cp.tx_len = hdev->le_max_tx_len;
797 cp.tx_time = hdev->le_max_tx_time;
798 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
801 /* Set Default PHY parameters if command is supported */
802 if (hdev->commands[35] & 0x20) {
803 struct hci_cp_le_set_default_phy cp;
805 /* No transmitter PHY or receiver PHY preferences */
810 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
816 static int __hci_init(struct hci_dev *hdev)
820 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
824 if (hci_dev_test_flag(hdev, HCI_SETUP))
825 hci_debugfs_create_basic(hdev);
827 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
831 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
832 * BR/EDR/LE type controllers. AMP controllers only need the
833 * first two stages of init.
835 if (hdev->dev_type != HCI_PRIMARY)
838 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
842 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
846 /* This function is only called when the controller is actually in
847 * configured state. When the controller is marked as unconfigured,
848 * this initialization procedure is not run.
850 * It means that it is possible that a controller runs through its
851 * setup phase and then discovers missing settings. If that is the
852 * case, then this function will not be called. It then will only
853 * be called during the config phase.
855 * So only when in setup phase or config phase, create the debugfs
856 * entries and register the SMP channels.
858 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
859 !hci_dev_test_flag(hdev, HCI_CONFIG))
862 hci_debugfs_create_common(hdev);
864 if (lmp_bredr_capable(hdev))
865 hci_debugfs_create_bredr(hdev);
867 if (lmp_le_capable(hdev))
868 hci_debugfs_create_le(hdev);
873 static int hci_init0_req(struct hci_request *req, unsigned long opt)
875 struct hci_dev *hdev = req->hdev;
877 BT_DBG("%s %ld", hdev->name, opt);
880 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
881 hci_reset_req(req, 0);
883 /* Read Local Version */
884 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
886 /* Read BD Address */
887 if (hdev->set_bdaddr)
888 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
893 static int __hci_unconf_init(struct hci_dev *hdev)
897 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
900 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
904 if (hci_dev_test_flag(hdev, HCI_SETUP))
905 hci_debugfs_create_basic(hdev);
910 static int hci_scan_req(struct hci_request *req, unsigned long opt)
914 BT_DBG("%s %x", req->hdev->name, scan);
916 /* Inquiry and Page scans */
917 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
921 static int hci_auth_req(struct hci_request *req, unsigned long opt)
925 BT_DBG("%s %x", req->hdev->name, auth);
928 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
932 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
936 BT_DBG("%s %x", req->hdev->name, encrypt);
939 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
943 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
945 __le16 policy = cpu_to_le16(opt);
947 BT_DBG("%s %x", req->hdev->name, policy);
949 /* Default link policy */
950 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
954 /* Get HCI device by index.
955 * Device is held on return. */
956 struct hci_dev *hci_dev_get(int index)
958 struct hci_dev *hdev = NULL, *d;
965 read_lock(&hci_dev_list_lock);
966 list_for_each_entry(d, &hci_dev_list, list) {
967 if (d->id == index) {
968 hdev = hci_dev_hold(d);
972 read_unlock(&hci_dev_list_lock);
976 /* ---- Inquiry support ---- */
978 bool hci_discovery_active(struct hci_dev *hdev)
980 struct discovery_state *discov = &hdev->discovery;
982 switch (discov->state) {
983 case DISCOVERY_FINDING:
984 case DISCOVERY_RESOLVING:
992 void hci_discovery_set_state(struct hci_dev *hdev, int state)
994 int old_state = hdev->discovery.state;
996 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
998 if (old_state == state)
1001 hdev->discovery.state = state;
1004 case DISCOVERY_STOPPED:
1005 hci_update_background_scan(hdev);
1007 if (old_state != DISCOVERY_STARTING)
1008 mgmt_discovering(hdev, 0);
1010 case DISCOVERY_STARTING:
1012 case DISCOVERY_FINDING:
1013 mgmt_discovering(hdev, 1);
1015 case DISCOVERY_RESOLVING:
1017 case DISCOVERY_STOPPING:
1022 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1024 struct discovery_state *cache = &hdev->discovery;
1025 struct inquiry_entry *p, *n;
1027 list_for_each_entry_safe(p, n, &cache->all, all) {
1032 INIT_LIST_HEAD(&cache->unknown);
1033 INIT_LIST_HEAD(&cache->resolve);
1036 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1039 struct discovery_state *cache = &hdev->discovery;
1040 struct inquiry_entry *e;
1042 BT_DBG("cache %p, %pMR", cache, bdaddr);
1044 list_for_each_entry(e, &cache->all, all) {
1045 if (!bacmp(&e->data.bdaddr, bdaddr))
1052 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1055 struct discovery_state *cache = &hdev->discovery;
1056 struct inquiry_entry *e;
1058 BT_DBG("cache %p, %pMR", cache, bdaddr);
1060 list_for_each_entry(e, &cache->unknown, list) {
1061 if (!bacmp(&e->data.bdaddr, bdaddr))
1068 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1072 struct discovery_state *cache = &hdev->discovery;
1073 struct inquiry_entry *e;
1075 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1077 list_for_each_entry(e, &cache->resolve, list) {
1078 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1080 if (!bacmp(&e->data.bdaddr, bdaddr))
1087 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1088 struct inquiry_entry *ie)
1090 struct discovery_state *cache = &hdev->discovery;
1091 struct list_head *pos = &cache->resolve;
1092 struct inquiry_entry *p;
1094 list_del(&ie->list);
1096 list_for_each_entry(p, &cache->resolve, list) {
1097 if (p->name_state != NAME_PENDING &&
1098 abs(p->data.rssi) >= abs(ie->data.rssi))
1103 list_add(&ie->list, pos);
1106 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1109 struct discovery_state *cache = &hdev->discovery;
1110 struct inquiry_entry *ie;
1113 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1115 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1117 if (!data->ssp_mode)
1118 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1120 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1122 if (!ie->data.ssp_mode)
1123 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1125 if (ie->name_state == NAME_NEEDED &&
1126 data->rssi != ie->data.rssi) {
1127 ie->data.rssi = data->rssi;
1128 hci_inquiry_cache_update_resolve(hdev, ie);
1134 /* Entry not in the cache. Add new one. */
1135 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1137 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1141 list_add(&ie->all, &cache->all);
1144 ie->name_state = NAME_KNOWN;
1146 ie->name_state = NAME_NOT_KNOWN;
1147 list_add(&ie->list, &cache->unknown);
1151 if (name_known && ie->name_state != NAME_KNOWN &&
1152 ie->name_state != NAME_PENDING) {
1153 ie->name_state = NAME_KNOWN;
1154 list_del(&ie->list);
1157 memcpy(&ie->data, data, sizeof(*data));
1158 ie->timestamp = jiffies;
1159 cache->timestamp = jiffies;
1161 if (ie->name_state == NAME_NOT_KNOWN)
1162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1168 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1170 struct discovery_state *cache = &hdev->discovery;
1171 struct inquiry_info *info = (struct inquiry_info *) buf;
1172 struct inquiry_entry *e;
1175 list_for_each_entry(e, &cache->all, all) {
1176 struct inquiry_data *data = &e->data;
1181 bacpy(&info->bdaddr, &data->bdaddr);
1182 info->pscan_rep_mode = data->pscan_rep_mode;
1183 info->pscan_period_mode = data->pscan_period_mode;
1184 info->pscan_mode = data->pscan_mode;
1185 memcpy(info->dev_class, data->dev_class, 3);
1186 info->clock_offset = data->clock_offset;
1192 BT_DBG("cache %p, copied %d", cache, copied);
1196 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1198 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1199 struct hci_dev *hdev = req->hdev;
1200 struct hci_cp_inquiry cp;
1202 BT_DBG("%s", hdev->name);
1204 if (test_bit(HCI_INQUIRY, &hdev->flags))
1208 memcpy(&cp.lap, &ir->lap, 3);
1209 cp.length = ir->length;
1210 cp.num_rsp = ir->num_rsp;
1211 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1216 int hci_inquiry(void __user *arg)
1218 __u8 __user *ptr = arg;
1219 struct hci_inquiry_req ir;
1220 struct hci_dev *hdev;
1221 int err = 0, do_inquiry = 0, max_rsp;
1225 if (copy_from_user(&ir, ptr, sizeof(ir)))
1228 hdev = hci_dev_get(ir.dev_id);
1232 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1237 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1242 if (hdev->dev_type != HCI_PRIMARY) {
1247 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1253 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1254 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1255 hci_inquiry_cache_flush(hdev);
1258 hci_dev_unlock(hdev);
1260 timeo = ir.length * msecs_to_jiffies(2000);
1263 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1268 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1269 * cleared). If it is interrupted by a signal, return -EINTR.
1271 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1272 TASK_INTERRUPTIBLE))
1276 /* for unlimited number of responses we will use buffer with
1279 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1281 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1282 * copy it to the user space.
1284 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1291 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1292 hci_dev_unlock(hdev);
1294 BT_DBG("num_rsp %d", ir.num_rsp);
1296 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1298 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1311 static int hci_dev_do_open(struct hci_dev *hdev)
1315 BT_DBG("%s %p", hdev->name, hdev);
1317 hci_req_sync_lock(hdev);
1319 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1324 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1325 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1326 /* Check for rfkill but allow the HCI setup stage to
1327 * proceed (which in itself doesn't cause any RF activity).
1329 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1334 /* Check for valid public address or a configured static
1335 * random adddress, but let the HCI setup proceed to
1336 * be able to determine if there is a public address
1339 * In case of user channel usage, it is not important
1340 * if a public address or static random address is
1343 * This check is only valid for BR/EDR controllers
1344 * since AMP controllers do not have an address.
1346 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1347 hdev->dev_type == HCI_PRIMARY &&
1348 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1349 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1350 ret = -EADDRNOTAVAIL;
1355 if (test_bit(HCI_UP, &hdev->flags)) {
1360 if (hdev->open(hdev)) {
1365 set_bit(HCI_RUNNING, &hdev->flags);
1366 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1368 atomic_set(&hdev->cmd_cnt, 1);
1369 set_bit(HCI_INIT, &hdev->flags);
1371 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1372 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1375 ret = hdev->setup(hdev);
1377 /* The transport driver can set these quirks before
1378 * creating the HCI device or in its setup callback.
1380 * In case any of them is set, the controller has to
1381 * start up as unconfigured.
1383 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1384 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1385 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1387 /* For an unconfigured controller it is required to
1388 * read at least the version information provided by
1389 * the Read Local Version Information command.
1391 * If the set_bdaddr driver callback is provided, then
1392 * also the original Bluetooth public device address
1393 * will be read using the Read BD Address command.
1395 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1396 ret = __hci_unconf_init(hdev);
1399 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400 /* If public address change is configured, ensure that
1401 * the address gets programmed. If the driver does not
1402 * support changing the public address, fail the power
1405 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1407 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1409 ret = -EADDRNOTAVAIL;
1413 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1414 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1415 ret = __hci_init(hdev);
1416 if (!ret && hdev->post_init)
1417 ret = hdev->post_init(hdev);
1421 /* If the HCI Reset command is clearing all diagnostic settings,
1422 * then they need to be reprogrammed after the init procedure
1425 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1426 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1427 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1428 ret = hdev->set_diag(hdev, true);
1430 clear_bit(HCI_INIT, &hdev->flags);
1434 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1435 set_bit(HCI_UP, &hdev->flags);
1436 hci_sock_dev_event(hdev, HCI_DEV_UP);
1437 hci_leds_update_powered(hdev, true);
1438 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1439 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1440 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1441 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1442 hci_dev_test_flag(hdev, HCI_MGMT) &&
1443 hdev->dev_type == HCI_PRIMARY) {
1444 ret = __hci_req_hci_power_on(hdev);
1445 mgmt_power_on(hdev, ret);
1448 /* Init failed, cleanup */
1449 flush_work(&hdev->tx_work);
1450 flush_work(&hdev->cmd_work);
1451 flush_work(&hdev->rx_work);
1453 skb_queue_purge(&hdev->cmd_q);
1454 skb_queue_purge(&hdev->rx_q);
1459 if (hdev->sent_cmd) {
1460 kfree_skb(hdev->sent_cmd);
1461 hdev->sent_cmd = NULL;
1464 clear_bit(HCI_RUNNING, &hdev->flags);
1465 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1468 hdev->flags &= BIT(HCI_RAW);
1472 hci_req_sync_unlock(hdev);
1476 /* ---- HCI ioctl helpers ---- */
1478 int hci_dev_open(__u16 dev)
1480 struct hci_dev *hdev;
1483 hdev = hci_dev_get(dev);
1487 /* Devices that are marked as unconfigured can only be powered
1488 * up as user channel. Trying to bring them up as normal devices
1489 * will result into a failure. Only user channel operation is
1492 * When this function is called for a user channel, the flag
1493 * HCI_USER_CHANNEL will be set first before attempting to
1496 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1497 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1502 /* We need to ensure that no other power on/off work is pending
1503 * before proceeding to call hci_dev_do_open. This is
1504 * particularly important if the setup procedure has not yet
1507 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1508 cancel_delayed_work(&hdev->power_off);
1510 /* After this call it is guaranteed that the setup procedure
1511 * has finished. This means that error conditions like RFKILL
1512 * or no valid public or static random address apply.
1514 flush_workqueue(hdev->req_workqueue);
1516 /* For controllers not using the management interface and that
1517 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1518 * so that pairing works for them. Once the management interface
1519 * is in use this bit will be cleared again and userspace has
1520 * to explicitly enable it.
1522 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1523 !hci_dev_test_flag(hdev, HCI_MGMT))
1524 hci_dev_set_flag(hdev, HCI_BONDABLE);
1526 err = hci_dev_do_open(hdev);
1533 /* This function requires the caller holds hdev->lock */
1534 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 struct hci_conn_params *p;
1538 list_for_each_entry(p, &hdev->le_conn_params, list) {
1540 hci_conn_drop(p->conn);
1541 hci_conn_put(p->conn);
1544 list_del_init(&p->action);
1547 BT_DBG("All LE pending actions cleared");
1550 int hci_dev_do_close(struct hci_dev *hdev)
1554 BT_DBG("%s %p", hdev->name, hdev);
1556 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1557 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1558 test_bit(HCI_UP, &hdev->flags)) {
1559 /* Execute vendor specific shutdown routine */
1561 hdev->shutdown(hdev);
1564 cancel_delayed_work(&hdev->power_off);
1566 hci_request_cancel_all(hdev);
1567 hci_req_sync_lock(hdev);
1569 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1570 cancel_delayed_work_sync(&hdev->cmd_timer);
1571 hci_req_sync_unlock(hdev);
1575 hci_leds_update_powered(hdev, false);
1577 /* Flush RX and TX works */
1578 flush_work(&hdev->tx_work);
1579 flush_work(&hdev->rx_work);
1581 if (hdev->discov_timeout > 0) {
1582 hdev->discov_timeout = 0;
1583 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1584 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1587 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1588 cancel_delayed_work(&hdev->service_cache);
1590 if (hci_dev_test_flag(hdev, HCI_MGMT))
1591 cancel_delayed_work_sync(&hdev->rpa_expired);
1593 /* Avoid potential lockdep warnings from the *_flush() calls by
1594 * ensuring the workqueue is empty up front.
1596 drain_workqueue(hdev->workqueue);
1600 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1602 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1604 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1605 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1606 hci_dev_test_flag(hdev, HCI_MGMT))
1607 __mgmt_power_off(hdev);
1609 hci_inquiry_cache_flush(hdev);
1610 hci_pend_le_actions_clear(hdev);
1611 hci_conn_hash_flush(hdev);
1612 hci_dev_unlock(hdev);
1614 smp_unregister(hdev);
1616 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1622 skb_queue_purge(&hdev->cmd_q);
1623 atomic_set(&hdev->cmd_cnt, 1);
1624 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1625 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1626 set_bit(HCI_INIT, &hdev->flags);
1627 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1628 clear_bit(HCI_INIT, &hdev->flags);
1631 /* flush cmd work */
1632 flush_work(&hdev->cmd_work);
1635 skb_queue_purge(&hdev->rx_q);
1636 skb_queue_purge(&hdev->cmd_q);
1637 skb_queue_purge(&hdev->raw_q);
1639 /* Drop last sent command */
1640 if (hdev->sent_cmd) {
1641 cancel_delayed_work_sync(&hdev->cmd_timer);
1642 kfree_skb(hdev->sent_cmd);
1643 hdev->sent_cmd = NULL;
1646 clear_bit(HCI_RUNNING, &hdev->flags);
1647 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1649 /* After this point our queues are empty
1650 * and no tasks are scheduled. */
1654 hdev->flags &= BIT(HCI_RAW);
1655 hci_dev_clear_volatile_flags(hdev);
1657 /* Controller radio is available but is currently powered down */
1658 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1660 memset(hdev->eir, 0, sizeof(hdev->eir));
1661 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1662 bacpy(&hdev->random_addr, BDADDR_ANY);
1664 hci_req_sync_unlock(hdev);
1670 int hci_dev_close(__u16 dev)
1672 struct hci_dev *hdev;
1675 hdev = hci_dev_get(dev);
1679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1684 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1685 cancel_delayed_work(&hdev->power_off);
1687 err = hci_dev_do_close(hdev);
1694 static int hci_dev_do_reset(struct hci_dev *hdev)
1698 BT_DBG("%s %p", hdev->name, hdev);
1700 hci_req_sync_lock(hdev);
1703 skb_queue_purge(&hdev->rx_q);
1704 skb_queue_purge(&hdev->cmd_q);
1706 /* Avoid potential lockdep warnings from the *_flush() calls by
1707 * ensuring the workqueue is empty up front.
1709 drain_workqueue(hdev->workqueue);
1712 hci_inquiry_cache_flush(hdev);
1713 hci_conn_hash_flush(hdev);
1714 hci_dev_unlock(hdev);
1719 atomic_set(&hdev->cmd_cnt, 1);
1720 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1722 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1724 hci_req_sync_unlock(hdev);
1728 int hci_dev_reset(__u16 dev)
1730 struct hci_dev *hdev;
1733 hdev = hci_dev_get(dev);
1737 if (!test_bit(HCI_UP, &hdev->flags)) {
1742 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1747 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1752 err = hci_dev_do_reset(hdev);
1759 int hci_dev_reset_stat(__u16 dev)
1761 struct hci_dev *hdev;
1764 hdev = hci_dev_get(dev);
1768 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1773 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1785 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1787 bool conn_changed, discov_changed;
1789 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1791 if ((scan & SCAN_PAGE))
1792 conn_changed = !hci_dev_test_and_set_flag(hdev,
1795 conn_changed = hci_dev_test_and_clear_flag(hdev,
1798 if ((scan & SCAN_INQUIRY)) {
1799 discov_changed = !hci_dev_test_and_set_flag(hdev,
1802 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1803 discov_changed = hci_dev_test_and_clear_flag(hdev,
1807 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1810 if (conn_changed || discov_changed) {
1811 /* In case this was disabled through mgmt */
1812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1814 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1815 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1817 mgmt_new_settings(hdev);
1821 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1823 struct hci_dev *hdev;
1824 struct hci_dev_req dr;
1827 if (copy_from_user(&dr, arg, sizeof(dr)))
1830 hdev = hci_dev_get(dr.dev_id);
1834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1839 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1844 if (hdev->dev_type != HCI_PRIMARY) {
1849 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1856 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1857 HCI_INIT_TIMEOUT, NULL);
1861 if (!lmp_encrypt_capable(hdev)) {
1866 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867 /* Auth must be enabled first */
1868 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869 HCI_INIT_TIMEOUT, NULL);
1874 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1875 HCI_INIT_TIMEOUT, NULL);
1879 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1880 HCI_INIT_TIMEOUT, NULL);
1882 /* Ensure that the connectable and discoverable states
1883 * get correctly modified as this was a non-mgmt change.
1886 hci_update_scan_state(hdev, dr.dev_opt);
1890 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1891 HCI_INIT_TIMEOUT, NULL);
1894 case HCISETLINKMODE:
1895 hdev->link_mode = ((__u16) dr.dev_opt) &
1896 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1900 hdev->pkt_type = (__u16) dr.dev_opt;
1904 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1905 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1909 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1910 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1923 int hci_get_dev_list(void __user *arg)
1925 struct hci_dev *hdev;
1926 struct hci_dev_list_req *dl;
1927 struct hci_dev_req *dr;
1928 int n = 0, size, err;
1931 if (get_user(dev_num, (__u16 __user *) arg))
1934 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937 size = sizeof(*dl) + dev_num * sizeof(*dr);
1939 dl = kzalloc(size, GFP_KERNEL);
1945 read_lock(&hci_dev_list_lock);
1946 list_for_each_entry(hdev, &hci_dev_list, list) {
1947 unsigned long flags = hdev->flags;
1949 /* When the auto-off is configured it means the transport
1950 * is running, but in that case still indicate that the
1951 * device is actually down.
1953 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1954 flags &= ~BIT(HCI_UP);
1956 (dr + n)->dev_id = hdev->id;
1957 (dr + n)->dev_opt = flags;
1962 read_unlock(&hci_dev_list_lock);
1965 size = sizeof(*dl) + n * sizeof(*dr);
1967 err = copy_to_user(arg, dl, size);
1970 return err ? -EFAULT : 0;
1973 int hci_get_dev_info(void __user *arg)
1975 struct hci_dev *hdev;
1976 struct hci_dev_info di;
1977 unsigned long flags;
1980 if (copy_from_user(&di, arg, sizeof(di)))
1983 hdev = hci_dev_get(di.dev_id);
1987 /* When the auto-off is configured it means the transport
1988 * is running, but in that case still indicate that the
1989 * device is actually down.
1991 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1992 flags = hdev->flags & ~BIT(HCI_UP);
1994 flags = hdev->flags;
1996 strcpy(di.name, hdev->name);
1997 di.bdaddr = hdev->bdaddr;
1998 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2000 di.pkt_type = hdev->pkt_type;
2001 if (lmp_bredr_capable(hdev)) {
2002 di.acl_mtu = hdev->acl_mtu;
2003 di.acl_pkts = hdev->acl_pkts;
2004 di.sco_mtu = hdev->sco_mtu;
2005 di.sco_pkts = hdev->sco_pkts;
2007 di.acl_mtu = hdev->le_mtu;
2008 di.acl_pkts = hdev->le_pkts;
2012 di.link_policy = hdev->link_policy;
2013 di.link_mode = hdev->link_mode;
2015 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016 memcpy(&di.features, &hdev->features, sizeof(di.features));
2018 if (copy_to_user(arg, &di, sizeof(di)))
2026 /* ---- Interface to HCI drivers ---- */
2028 static int hci_rfkill_set_block(void *data, bool blocked)
2030 struct hci_dev *hdev = data;
2032 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2034 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2038 hci_dev_set_flag(hdev, HCI_RFKILLED);
2039 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040 !hci_dev_test_flag(hdev, HCI_CONFIG))
2041 hci_dev_do_close(hdev);
2043 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2049 static const struct rfkill_ops hci_rfkill_ops = {
2050 .set_block = hci_rfkill_set_block,
2053 static void hci_power_on(struct work_struct *work)
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2058 BT_DBG("%s", hdev->name);
2060 if (test_bit(HCI_UP, &hdev->flags) &&
2061 hci_dev_test_flag(hdev, HCI_MGMT) &&
2062 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2063 cancel_delayed_work(&hdev->power_off);
2064 hci_req_sync_lock(hdev);
2065 err = __hci_req_hci_power_on(hdev);
2066 hci_req_sync_unlock(hdev);
2067 mgmt_power_on(hdev, err);
2071 err = hci_dev_do_open(hdev);
2074 mgmt_set_powered_failed(hdev, err);
2075 hci_dev_unlock(hdev);
2079 /* During the HCI setup phase, a few error conditions are
2080 * ignored and they need to be checked now. If they are still
2081 * valid, it is important to turn the device back off.
2083 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2084 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2085 (hdev->dev_type == HCI_PRIMARY &&
2086 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2087 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2088 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2089 hci_dev_do_close(hdev);
2090 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2091 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2092 HCI_AUTO_OFF_TIMEOUT);
2095 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2096 /* For unconfigured devices, set the HCI_RAW flag
2097 * so that userspace can easily identify them.
2099 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2100 set_bit(HCI_RAW, &hdev->flags);
2102 /* For fully configured devices, this will send
2103 * the Index Added event. For unconfigured devices,
2104 * it will send Unconfigued Index Added event.
2106 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2107 * and no event will be send.
2109 mgmt_index_added(hdev);
2110 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2111 /* When the controller is now configured, then it
2112 * is important to clear the HCI_RAW flag.
2114 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2115 clear_bit(HCI_RAW, &hdev->flags);
2117 /* Powering on the controller with HCI_CONFIG set only
2118 * happens with the transition from unconfigured to
2119 * configured. This will send the Index Added event.
2121 mgmt_index_added(hdev);
2125 static void hci_power_off(struct work_struct *work)
2127 struct hci_dev *hdev = container_of(work, struct hci_dev,
2130 BT_DBG("%s", hdev->name);
2132 hci_dev_do_close(hdev);
2135 static void hci_error_reset(struct work_struct *work)
2137 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2139 BT_DBG("%s", hdev->name);
2142 hdev->hw_error(hdev, hdev->hw_error_code);
2144 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2146 if (hci_dev_do_close(hdev))
2149 hci_dev_do_open(hdev);
2152 void hci_uuids_clear(struct hci_dev *hdev)
2154 struct bt_uuid *uuid, *tmp;
2156 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2157 list_del(&uuid->list);
2162 void hci_link_keys_clear(struct hci_dev *hdev)
2164 struct link_key *key;
2166 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2167 list_del_rcu(&key->list);
2168 kfree_rcu(key, rcu);
2172 void hci_smp_ltks_clear(struct hci_dev *hdev)
2176 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2177 list_del_rcu(&k->list);
2182 void hci_smp_irks_clear(struct hci_dev *hdev)
2186 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2187 list_del_rcu(&k->list);
2192 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2197 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2198 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2208 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2209 u8 key_type, u8 old_key_type)
2212 if (key_type < 0x03)
2215 /* Debug keys are insecure so don't store them persistently */
2216 if (key_type == HCI_LK_DEBUG_COMBINATION)
2219 /* Changed combination key and there's no previous one */
2220 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2223 /* Security mode 3 case */
2227 /* BR/EDR key derived using SC from an LE link */
2228 if (conn->type == LE_LINK)
2231 /* Neither local nor remote side had no-bonding as requirement */
2232 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2235 /* Local side had dedicated bonding as requirement */
2236 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2239 /* Remote side had dedicated bonding as requirement */
2240 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2243 /* If none of the above criteria match, then don't store the key
2248 static u8 ltk_role(u8 type)
2250 if (type == SMP_LTK)
2251 return HCI_ROLE_MASTER;
2253 return HCI_ROLE_SLAVE;
2256 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2257 u8 addr_type, u8 role)
2262 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2263 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2266 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2276 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2278 struct smp_irk *irk;
2281 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2282 if (!bacmp(&irk->rpa, rpa)) {
2288 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2289 if (smp_irk_matches(hdev, irk->val, rpa)) {
2290 bacpy(&irk->rpa, rpa);
2300 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2303 struct smp_irk *irk;
2305 /* Identity Address must be public or static random */
2306 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2310 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2311 if (addr_type == irk->addr_type &&
2312 bacmp(bdaddr, &irk->bdaddr) == 0) {
2322 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2323 bdaddr_t *bdaddr, u8 *val, u8 type,
2324 u8 pin_len, bool *persistent)
2326 struct link_key *key, *old_key;
2329 old_key = hci_find_link_key(hdev, bdaddr);
2331 old_key_type = old_key->type;
2334 old_key_type = conn ? conn->key_type : 0xff;
2335 key = kzalloc(sizeof(*key), GFP_KERNEL);
2338 list_add_rcu(&key->list, &hdev->link_keys);
2341 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2343 /* Some buggy controller combinations generate a changed
2344 * combination key for legacy pairing even when there's no
2346 if (type == HCI_LK_CHANGED_COMBINATION &&
2347 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2348 type = HCI_LK_COMBINATION;
2350 conn->key_type = type;
2353 bacpy(&key->bdaddr, bdaddr);
2354 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2355 key->pin_len = pin_len;
2357 if (type == HCI_LK_CHANGED_COMBINATION)
2358 key->type = old_key_type;
2363 *persistent = hci_persistent_key(hdev, conn, type,
2369 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2370 u8 addr_type, u8 type, u8 authenticated,
2371 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2373 struct smp_ltk *key, *old_key;
2374 u8 role = ltk_role(type);
2376 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2380 key = kzalloc(sizeof(*key), GFP_KERNEL);
2383 list_add_rcu(&key->list, &hdev->long_term_keys);
2386 bacpy(&key->bdaddr, bdaddr);
2387 key->bdaddr_type = addr_type;
2388 memcpy(key->val, tk, sizeof(key->val));
2389 key->authenticated = authenticated;
2392 key->enc_size = enc_size;
2398 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2399 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2401 struct smp_irk *irk;
2403 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2405 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2409 bacpy(&irk->bdaddr, bdaddr);
2410 irk->addr_type = addr_type;
2412 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2415 memcpy(irk->val, val, 16);
2416 bacpy(&irk->rpa, rpa);
2421 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2423 struct link_key *key;
2425 key = hci_find_link_key(hdev, bdaddr);
2429 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2431 list_del_rcu(&key->list);
2432 kfree_rcu(key, rcu);
2437 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2442 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2443 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2446 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2448 list_del_rcu(&k->list);
2453 return removed ? 0 : -ENOENT;
2456 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2460 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2461 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2464 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2466 list_del_rcu(&k->list);
2471 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2474 struct smp_irk *irk;
2477 if (type == BDADDR_BREDR) {
2478 if (hci_find_link_key(hdev, bdaddr))
2483 /* Convert to HCI addr type which struct smp_ltk uses */
2484 if (type == BDADDR_LE_PUBLIC)
2485 addr_type = ADDR_LE_DEV_PUBLIC;
2487 addr_type = ADDR_LE_DEV_RANDOM;
2489 irk = hci_get_irk(hdev, bdaddr, addr_type);
2491 bdaddr = &irk->bdaddr;
2492 addr_type = irk->addr_type;
2496 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2497 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2507 /* HCI command timer function */
2508 static void hci_cmd_timeout(struct work_struct *work)
2510 struct hci_dev *hdev = container_of(work, struct hci_dev,
2513 if (hdev->sent_cmd) {
2514 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2515 u16 opcode = __le16_to_cpu(sent->opcode);
2517 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2519 bt_dev_err(hdev, "command tx timeout");
2522 atomic_set(&hdev->cmd_cnt, 1);
2523 queue_work(hdev->workqueue, &hdev->cmd_work);
2526 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2527 bdaddr_t *bdaddr, u8 bdaddr_type)
2529 struct oob_data *data;
2531 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2532 if (bacmp(bdaddr, &data->bdaddr) != 0)
2534 if (data->bdaddr_type != bdaddr_type)
2542 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2545 struct oob_data *data;
2547 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2551 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2553 list_del(&data->list);
2559 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2561 struct oob_data *data, *n;
2563 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2564 list_del(&data->list);
2569 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2570 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2571 u8 *hash256, u8 *rand256)
2573 struct oob_data *data;
2575 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2577 data = kmalloc(sizeof(*data), GFP_KERNEL);
2581 bacpy(&data->bdaddr, bdaddr);
2582 data->bdaddr_type = bdaddr_type;
2583 list_add(&data->list, &hdev->remote_oob_data);
2586 if (hash192 && rand192) {
2587 memcpy(data->hash192, hash192, sizeof(data->hash192));
2588 memcpy(data->rand192, rand192, sizeof(data->rand192));
2589 if (hash256 && rand256)
2590 data->present = 0x03;
2592 memset(data->hash192, 0, sizeof(data->hash192));
2593 memset(data->rand192, 0, sizeof(data->rand192));
2594 if (hash256 && rand256)
2595 data->present = 0x02;
2597 data->present = 0x00;
2600 if (hash256 && rand256) {
2601 memcpy(data->hash256, hash256, sizeof(data->hash256));
2602 memcpy(data->rand256, rand256, sizeof(data->rand256));
2604 memset(data->hash256, 0, sizeof(data->hash256));
2605 memset(data->rand256, 0, sizeof(data->rand256));
2606 if (hash192 && rand192)
2607 data->present = 0x01;
2610 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2615 /* This function requires the caller holds hdev->lock */
2616 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2618 struct adv_info *adv_instance;
2620 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2621 if (adv_instance->instance == instance)
2622 return adv_instance;
2628 /* This function requires the caller holds hdev->lock */
2629 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2631 struct adv_info *cur_instance;
2633 cur_instance = hci_find_adv_instance(hdev, instance);
2637 if (cur_instance == list_last_entry(&hdev->adv_instances,
2638 struct adv_info, list))
2639 return list_first_entry(&hdev->adv_instances,
2640 struct adv_info, list);
2642 return list_next_entry(cur_instance, list);
2645 /* This function requires the caller holds hdev->lock */
2646 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2648 struct adv_info *adv_instance;
2650 adv_instance = hci_find_adv_instance(hdev, instance);
2654 BT_DBG("%s removing %dMR", hdev->name, instance);
2656 if (hdev->cur_adv_instance == instance) {
2657 if (hdev->adv_instance_timeout) {
2658 cancel_delayed_work(&hdev->adv_instance_expire);
2659 hdev->adv_instance_timeout = 0;
2661 hdev->cur_adv_instance = 0x00;
2664 list_del(&adv_instance->list);
2665 kfree(adv_instance);
2667 hdev->adv_instance_cnt--;
2672 /* This function requires the caller holds hdev->lock */
2673 void hci_adv_instances_clear(struct hci_dev *hdev)
2675 struct adv_info *adv_instance, *n;
2677 if (hdev->adv_instance_timeout) {
2678 cancel_delayed_work(&hdev->adv_instance_expire);
2679 hdev->adv_instance_timeout = 0;
2682 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2683 list_del(&adv_instance->list);
2684 kfree(adv_instance);
2687 hdev->adv_instance_cnt = 0;
2688 hdev->cur_adv_instance = 0x00;
2691 /* This function requires the caller holds hdev->lock */
2692 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2693 u16 adv_data_len, u8 *adv_data,
2694 u16 scan_rsp_len, u8 *scan_rsp_data,
2695 u16 timeout, u16 duration)
2697 struct adv_info *adv_instance;
2699 adv_instance = hci_find_adv_instance(hdev, instance);
2701 memset(adv_instance->adv_data, 0,
2702 sizeof(adv_instance->adv_data));
2703 memset(adv_instance->scan_rsp_data, 0,
2704 sizeof(adv_instance->scan_rsp_data));
2706 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2707 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2710 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2714 adv_instance->pending = true;
2715 adv_instance->instance = instance;
2716 list_add(&adv_instance->list, &hdev->adv_instances);
2717 hdev->adv_instance_cnt++;
2720 adv_instance->flags = flags;
2721 adv_instance->adv_data_len = adv_data_len;
2722 adv_instance->scan_rsp_len = scan_rsp_len;
2725 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2728 memcpy(adv_instance->scan_rsp_data,
2729 scan_rsp_data, scan_rsp_len);
2731 adv_instance->timeout = timeout;
2732 adv_instance->remaining_time = timeout;
2735 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2737 adv_instance->duration = duration;
2739 BT_DBG("%s for %dMR", hdev->name, instance);
2744 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2745 bdaddr_t *bdaddr, u8 type)
2747 struct bdaddr_list *b;
2749 list_for_each_entry(b, bdaddr_list, list) {
2750 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2757 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2759 struct bdaddr_list *b, *n;
2761 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2767 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2769 struct bdaddr_list *entry;
2771 if (!bacmp(bdaddr, BDADDR_ANY))
2774 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2777 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2781 bacpy(&entry->bdaddr, bdaddr);
2782 entry->bdaddr_type = type;
2784 list_add(&entry->list, list);
2789 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2791 struct bdaddr_list *entry;
2793 if (!bacmp(bdaddr, BDADDR_ANY)) {
2794 hci_bdaddr_list_clear(list);
2798 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2802 list_del(&entry->list);
2808 /* This function requires the caller holds hdev->lock */
2809 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2810 bdaddr_t *addr, u8 addr_type)
2812 struct hci_conn_params *params;
2814 list_for_each_entry(params, &hdev->le_conn_params, list) {
2815 if (bacmp(¶ms->addr, addr) == 0 &&
2816 params->addr_type == addr_type) {
2824 /* This function requires the caller holds hdev->lock */
2825 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2826 bdaddr_t *addr, u8 addr_type)
2828 struct hci_conn_params *param;
2830 list_for_each_entry(param, list, action) {
2831 if (bacmp(¶m->addr, addr) == 0 &&
2832 param->addr_type == addr_type)
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2841 bdaddr_t *addr, u8 addr_type)
2843 struct hci_conn_params *params;
2845 params = hci_conn_params_lookup(hdev, addr, addr_type);
2849 params = kzalloc(sizeof(*params), GFP_KERNEL);
2851 bt_dev_err(hdev, "out of memory");
2855 bacpy(¶ms->addr, addr);
2856 params->addr_type = addr_type;
2858 list_add(¶ms->list, &hdev->le_conn_params);
2859 INIT_LIST_HEAD(¶ms->action);
2861 params->conn_min_interval = hdev->le_conn_min_interval;
2862 params->conn_max_interval = hdev->le_conn_max_interval;
2863 params->conn_latency = hdev->le_conn_latency;
2864 params->supervision_timeout = hdev->le_supv_timeout;
2865 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2867 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2872 static void hci_conn_params_free(struct hci_conn_params *params)
2875 hci_conn_drop(params->conn);
2876 hci_conn_put(params->conn);
2879 list_del(¶ms->action);
2880 list_del(¶ms->list);
2884 /* This function requires the caller holds hdev->lock */
2885 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2887 struct hci_conn_params *params;
2889 params = hci_conn_params_lookup(hdev, addr, addr_type);
2893 hci_conn_params_free(params);
2895 hci_update_background_scan(hdev);
2897 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2900 /* This function requires the caller holds hdev->lock */
2901 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2903 struct hci_conn_params *params, *tmp;
2905 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2906 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2909 /* If trying to estabilish one time connection to disabled
2910 * device, leave the params, but mark them as just once.
2912 if (params->explicit_connect) {
2913 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2917 list_del(¶ms->list);
2921 BT_DBG("All LE disabled connection parameters were removed");
2924 /* This function requires the caller holds hdev->lock */
2925 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2927 struct hci_conn_params *params, *tmp;
2929 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2930 hci_conn_params_free(params);
2932 BT_DBG("All LE connection parameters were removed");
2935 /* Copy the Identity Address of the controller.
2937 * If the controller has a public BD_ADDR, then by default use that one.
2938 * If this is a LE only controller without a public address, default to
2939 * the static random address.
2941 * For debugging purposes it is possible to force controllers with a
2942 * public address to use the static random address instead.
2944 * In case BR/EDR has been disabled on a dual-mode controller and
2945 * userspace has configured a static address, then that address
2946 * becomes the identity address instead of the public BR/EDR address.
2948 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2951 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2952 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2953 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2954 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2955 bacpy(bdaddr, &hdev->static_addr);
2956 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2958 bacpy(bdaddr, &hdev->bdaddr);
2959 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2963 /* Alloc HCI device */
2964 struct hci_dev *hci_alloc_dev(void)
2966 struct hci_dev *hdev;
2968 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2972 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2973 hdev->esco_type = (ESCO_HV1);
2974 hdev->link_mode = (HCI_LM_ACCEPT);
2975 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2976 hdev->io_capability = 0x03; /* No Input No Output */
2977 hdev->manufacturer = 0xffff; /* Default to internal use */
2978 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2979 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2980 hdev->adv_instance_cnt = 0;
2981 hdev->cur_adv_instance = 0x00;
2982 hdev->adv_instance_timeout = 0;
2984 hdev->sniff_max_interval = 800;
2985 hdev->sniff_min_interval = 80;
2987 hdev->le_adv_channel_map = 0x07;
2988 hdev->le_adv_min_interval = 0x0800;
2989 hdev->le_adv_max_interval = 0x0800;
2990 hdev->le_scan_interval = 0x0060;
2991 hdev->le_scan_window = 0x0030;
2992 hdev->le_conn_min_interval = 0x0018;
2993 hdev->le_conn_max_interval = 0x0028;
2994 hdev->le_conn_latency = 0x0000;
2995 hdev->le_supv_timeout = 0x002a;
2996 hdev->le_def_tx_len = 0x001b;
2997 hdev->le_def_tx_time = 0x0148;
2998 hdev->le_max_tx_len = 0x001b;
2999 hdev->le_max_tx_time = 0x0148;
3000 hdev->le_max_rx_len = 0x001b;
3001 hdev->le_max_rx_time = 0x0148;
3003 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3004 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3005 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3006 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3008 mutex_init(&hdev->lock);
3009 mutex_init(&hdev->req_lock);
3011 INIT_LIST_HEAD(&hdev->mgmt_pending);
3012 INIT_LIST_HEAD(&hdev->blacklist);
3013 INIT_LIST_HEAD(&hdev->whitelist);
3014 INIT_LIST_HEAD(&hdev->uuids);
3015 INIT_LIST_HEAD(&hdev->link_keys);
3016 INIT_LIST_HEAD(&hdev->long_term_keys);
3017 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3018 INIT_LIST_HEAD(&hdev->remote_oob_data);
3019 INIT_LIST_HEAD(&hdev->le_white_list);
3020 INIT_LIST_HEAD(&hdev->le_conn_params);
3021 INIT_LIST_HEAD(&hdev->pend_le_conns);
3022 INIT_LIST_HEAD(&hdev->pend_le_reports);
3023 INIT_LIST_HEAD(&hdev->conn_hash.list);
3024 INIT_LIST_HEAD(&hdev->adv_instances);
3026 INIT_WORK(&hdev->rx_work, hci_rx_work);
3027 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3028 INIT_WORK(&hdev->tx_work, hci_tx_work);
3029 INIT_WORK(&hdev->power_on, hci_power_on);
3030 INIT_WORK(&hdev->error_reset, hci_error_reset);
3032 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3034 skb_queue_head_init(&hdev->rx_q);
3035 skb_queue_head_init(&hdev->cmd_q);
3036 skb_queue_head_init(&hdev->raw_q);
3038 init_waitqueue_head(&hdev->req_wait_q);
3040 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3042 hci_request_setup(hdev);
3044 hci_init_sysfs(hdev);
3045 discovery_init(hdev);
3049 EXPORT_SYMBOL(hci_alloc_dev);
3051 /* Free HCI device */
3052 void hci_free_dev(struct hci_dev *hdev)
3054 /* will free via device release */
3055 put_device(&hdev->dev);
3057 EXPORT_SYMBOL(hci_free_dev);
3059 /* Register HCI device */
3060 int hci_register_dev(struct hci_dev *hdev)
3064 if (!hdev->open || !hdev->close || !hdev->send)
3067 /* Do not allow HCI_AMP devices to register at index 0,
3068 * so the index can be used as the AMP controller ID.
3070 switch (hdev->dev_type) {
3072 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3075 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3084 sprintf(hdev->name, "hci%d", id);
3087 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3089 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3090 if (!hdev->workqueue) {
3095 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3097 if (!hdev->req_workqueue) {
3098 destroy_workqueue(hdev->workqueue);
3103 if (!IS_ERR_OR_NULL(bt_debugfs))
3104 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3106 dev_set_name(&hdev->dev, "%s", hdev->name);
3108 error = device_add(&hdev->dev);
3112 hci_leds_init(hdev);
3114 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3115 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3118 if (rfkill_register(hdev->rfkill) < 0) {
3119 rfkill_destroy(hdev->rfkill);
3120 hdev->rfkill = NULL;
3124 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3125 hci_dev_set_flag(hdev, HCI_RFKILLED);
3127 hci_dev_set_flag(hdev, HCI_SETUP);
3128 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3130 if (hdev->dev_type == HCI_PRIMARY) {
3131 /* Assume BR/EDR support until proven otherwise (such as
3132 * through reading supported features during init.
3134 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3137 write_lock(&hci_dev_list_lock);
3138 list_add(&hdev->list, &hci_dev_list);
3139 write_unlock(&hci_dev_list_lock);
3141 /* Devices that are marked for raw-only usage are unconfigured
3142 * and should not be included in normal operation.
3144 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3145 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3147 hci_sock_dev_event(hdev, HCI_DEV_REG);
3150 queue_work(hdev->req_workqueue, &hdev->power_on);
3155 destroy_workqueue(hdev->workqueue);
3156 destroy_workqueue(hdev->req_workqueue);
3158 ida_simple_remove(&hci_index_ida, hdev->id);
3162 EXPORT_SYMBOL(hci_register_dev);
3164 /* Unregister HCI device */
3165 void hci_unregister_dev(struct hci_dev *hdev)
3169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3171 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3175 write_lock(&hci_dev_list_lock);
3176 list_del(&hdev->list);
3177 write_unlock(&hci_dev_list_lock);
3179 cancel_work_sync(&hdev->power_on);
3181 hci_dev_do_close(hdev);
3183 if (!test_bit(HCI_INIT, &hdev->flags) &&
3184 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3185 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3187 mgmt_index_removed(hdev);
3188 hci_dev_unlock(hdev);
3191 /* mgmt_index_removed should take care of emptying the
3193 BUG_ON(!list_empty(&hdev->mgmt_pending));
3195 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3198 rfkill_unregister(hdev->rfkill);
3199 rfkill_destroy(hdev->rfkill);
3202 device_del(&hdev->dev);
3204 debugfs_remove_recursive(hdev->debugfs);
3205 kfree_const(hdev->hw_info);
3206 kfree_const(hdev->fw_info);
3208 destroy_workqueue(hdev->workqueue);
3209 destroy_workqueue(hdev->req_workqueue);
3212 hci_bdaddr_list_clear(&hdev->blacklist);
3213 hci_bdaddr_list_clear(&hdev->whitelist);
3214 hci_uuids_clear(hdev);
3215 hci_link_keys_clear(hdev);
3216 hci_smp_ltks_clear(hdev);
3217 hci_smp_irks_clear(hdev);
3218 hci_remote_oob_data_clear(hdev);
3219 hci_adv_instances_clear(hdev);
3220 hci_bdaddr_list_clear(&hdev->le_white_list);
3221 hci_conn_params_clear_all(hdev);
3222 hci_discovery_filter_clear(hdev);
3223 hci_dev_unlock(hdev);
3227 ida_simple_remove(&hci_index_ida, id);
3229 EXPORT_SYMBOL(hci_unregister_dev);
3231 /* Suspend HCI device */
3232 int hci_suspend_dev(struct hci_dev *hdev)
3234 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3237 EXPORT_SYMBOL(hci_suspend_dev);
3239 /* Resume HCI device */
3240 int hci_resume_dev(struct hci_dev *hdev)
3242 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3245 EXPORT_SYMBOL(hci_resume_dev);
3247 /* Reset HCI device */
3248 int hci_reset_dev(struct hci_dev *hdev)
3250 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3251 struct sk_buff *skb;
3253 skb = bt_skb_alloc(3, GFP_ATOMIC);
3257 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3258 skb_put_data(skb, hw_err, 3);
3260 /* Send Hardware Error to upper stack */
3261 return hci_recv_frame(hdev, skb);
3263 EXPORT_SYMBOL(hci_reset_dev);
3265 /* Receive frame from HCI drivers */
3266 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3268 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3269 && !test_bit(HCI_INIT, &hdev->flags))) {
3274 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3275 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3276 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3282 bt_cb(skb)->incoming = 1;
3285 __net_timestamp(skb);
3287 skb_queue_tail(&hdev->rx_q, skb);
3288 queue_work(hdev->workqueue, &hdev->rx_work);
3292 EXPORT_SYMBOL(hci_recv_frame);
3294 /* Receive diagnostic message from HCI drivers */
3295 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3297 /* Mark as diagnostic packet */
3298 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3301 __net_timestamp(skb);
3303 skb_queue_tail(&hdev->rx_q, skb);
3304 queue_work(hdev->workqueue, &hdev->rx_work);
3308 EXPORT_SYMBOL(hci_recv_diag);
3310 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3314 va_start(vargs, fmt);
3315 kfree_const(hdev->hw_info);
3316 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3319 EXPORT_SYMBOL(hci_set_hw_info);
3321 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3325 va_start(vargs, fmt);
3326 kfree_const(hdev->fw_info);
3327 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3330 EXPORT_SYMBOL(hci_set_fw_info);
3332 /* ---- Interface to upper protocols ---- */
3334 int hci_register_cb(struct hci_cb *cb)
3336 BT_DBG("%p name %s", cb, cb->name);
3338 mutex_lock(&hci_cb_list_lock);
3339 list_add_tail(&cb->list, &hci_cb_list);
3340 mutex_unlock(&hci_cb_list_lock);
3344 EXPORT_SYMBOL(hci_register_cb);
3346 int hci_unregister_cb(struct hci_cb *cb)
3348 BT_DBG("%p name %s", cb, cb->name);
3350 mutex_lock(&hci_cb_list_lock);
3351 list_del(&cb->list);
3352 mutex_unlock(&hci_cb_list_lock);
3356 EXPORT_SYMBOL(hci_unregister_cb);
3358 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3362 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3366 __net_timestamp(skb);
3368 /* Send copy to monitor */
3369 hci_send_to_monitor(hdev, skb);
3371 if (atomic_read(&hdev->promisc)) {
3372 /* Send copy to the sockets */
3373 hci_send_to_sock(hdev, skb);
3376 /* Get rid of skb owner, prior to sending to the driver. */
3379 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3384 err = hdev->send(hdev, skb);
3386 bt_dev_err(hdev, "sending frame failed (%d)", err);
3391 /* Send HCI command */
3392 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3395 struct sk_buff *skb;
3397 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3399 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3401 bt_dev_err(hdev, "no memory for command");
3405 /* Stand-alone HCI commands must be flagged as
3406 * single-command requests.
3408 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3410 skb_queue_tail(&hdev->cmd_q, skb);
3411 queue_work(hdev->workqueue, &hdev->cmd_work);
3416 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3419 struct sk_buff *skb;
3421 if (hci_opcode_ogf(opcode) != 0x3f) {
3422 /* A controller receiving a command shall respond with either
3423 * a Command Status Event or a Command Complete Event.
3424 * Therefore, all standard HCI commands must be sent via the
3425 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3426 * Some vendors do not comply with this rule for vendor-specific
3427 * commands and do not return any event. We want to support
3428 * unresponded commands for such cases only.
3430 bt_dev_err(hdev, "unresponded command not supported");
3434 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3436 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3441 hci_send_frame(hdev, skb);
3445 EXPORT_SYMBOL(__hci_cmd_send);
3447 /* Get data from the previously sent command */
3448 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3450 struct hci_command_hdr *hdr;
3452 if (!hdev->sent_cmd)
3455 hdr = (void *) hdev->sent_cmd->data;
3457 if (hdr->opcode != cpu_to_le16(opcode))
3460 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3462 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3465 /* Send HCI command and wait for command commplete event */
3466 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3467 const void *param, u32 timeout)
3469 struct sk_buff *skb;
3471 if (!test_bit(HCI_UP, &hdev->flags))
3472 return ERR_PTR(-ENETDOWN);
3474 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3476 hci_req_sync_lock(hdev);
3477 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3478 hci_req_sync_unlock(hdev);
3482 EXPORT_SYMBOL(hci_cmd_sync);
3485 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3487 struct hci_acl_hdr *hdr;
3490 skb_push(skb, HCI_ACL_HDR_SIZE);
3491 skb_reset_transport_header(skb);
3492 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3493 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3494 hdr->dlen = cpu_to_le16(len);
3497 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3498 struct sk_buff *skb, __u16 flags)
3500 struct hci_conn *conn = chan->conn;
3501 struct hci_dev *hdev = conn->hdev;
3502 struct sk_buff *list;
3504 skb->len = skb_headlen(skb);
3507 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3509 switch (hdev->dev_type) {
3511 hci_add_acl_hdr(skb, conn->handle, flags);
3514 hci_add_acl_hdr(skb, chan->handle, flags);
3517 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3521 list = skb_shinfo(skb)->frag_list;
3523 /* Non fragmented */
3524 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3526 skb_queue_tail(queue, skb);
3529 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3531 skb_shinfo(skb)->frag_list = NULL;
3533 /* Queue all fragments atomically. We need to use spin_lock_bh
3534 * here because of 6LoWPAN links, as there this function is
3535 * called from softirq and using normal spin lock could cause
3538 spin_lock_bh(&queue->lock);
3540 __skb_queue_tail(queue, skb);
3542 flags &= ~ACL_START;
3545 skb = list; list = list->next;
3547 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3548 hci_add_acl_hdr(skb, conn->handle, flags);
3550 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3552 __skb_queue_tail(queue, skb);
3555 spin_unlock_bh(&queue->lock);
3559 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3561 struct hci_dev *hdev = chan->conn->hdev;
3563 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3565 hci_queue_acl(chan, &chan->data_q, skb, flags);
3567 queue_work(hdev->workqueue, &hdev->tx_work);
3571 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3573 struct hci_dev *hdev = conn->hdev;
3574 struct hci_sco_hdr hdr;
3576 BT_DBG("%s len %d", hdev->name, skb->len);
3578 hdr.handle = cpu_to_le16(conn->handle);
3579 hdr.dlen = skb->len;
3581 skb_push(skb, HCI_SCO_HDR_SIZE);
3582 skb_reset_transport_header(skb);
3583 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3585 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3587 skb_queue_tail(&conn->data_q, skb);
3588 queue_work(hdev->workqueue, &hdev->tx_work);
3591 /* ---- HCI TX task (outgoing data) ---- */
3593 /* HCI Connection scheduler */
3594 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3597 struct hci_conn_hash *h = &hdev->conn_hash;
3598 struct hci_conn *conn = NULL, *c;
3599 unsigned int num = 0, min = ~0;
3601 /* We don't have to lock device here. Connections are always
3602 * added and removed with TX task disabled. */
3606 list_for_each_entry_rcu(c, &h->list, list) {
3607 if (c->type != type || skb_queue_empty(&c->data_q))
3610 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3615 if (c->sent < min) {
3620 if (hci_conn_num(hdev, type) == num)
3629 switch (conn->type) {
3631 cnt = hdev->acl_cnt;
3635 cnt = hdev->sco_cnt;
3638 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3642 bt_dev_err(hdev, "unknown link type %d", conn->type);
3650 BT_DBG("conn %p quote %d", conn, *quote);
3654 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3656 struct hci_conn_hash *h = &hdev->conn_hash;
3659 bt_dev_err(hdev, "link tx timeout");
3663 /* Kill stalled connections */
3664 list_for_each_entry_rcu(c, &h->list, list) {
3665 if (c->type == type && c->sent) {
3666 bt_dev_err(hdev, "killing stalled connection %pMR",
3668 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3675 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3678 struct hci_conn_hash *h = &hdev->conn_hash;
3679 struct hci_chan *chan = NULL;
3680 unsigned int num = 0, min = ~0, cur_prio = 0;
3681 struct hci_conn *conn;
3682 int cnt, q, conn_num = 0;
3684 BT_DBG("%s", hdev->name);
3688 list_for_each_entry_rcu(conn, &h->list, list) {
3689 struct hci_chan *tmp;
3691 if (conn->type != type)
3694 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3699 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3700 struct sk_buff *skb;
3702 if (skb_queue_empty(&tmp->data_q))
3705 skb = skb_peek(&tmp->data_q);
3706 if (skb->priority < cur_prio)
3709 if (skb->priority > cur_prio) {
3712 cur_prio = skb->priority;
3717 if (conn->sent < min) {
3723 if (hci_conn_num(hdev, type) == conn_num)
3732 switch (chan->conn->type) {
3734 cnt = hdev->acl_cnt;
3737 cnt = hdev->block_cnt;
3741 cnt = hdev->sco_cnt;
3744 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3748 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3753 BT_DBG("chan %p quote %d", chan, *quote);
3757 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3759 struct hci_conn_hash *h = &hdev->conn_hash;
3760 struct hci_conn *conn;
3763 BT_DBG("%s", hdev->name);
3767 list_for_each_entry_rcu(conn, &h->list, list) {
3768 struct hci_chan *chan;
3770 if (conn->type != type)
3773 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3778 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3779 struct sk_buff *skb;
3786 if (skb_queue_empty(&chan->data_q))
3789 skb = skb_peek(&chan->data_q);
3790 if (skb->priority >= HCI_PRIO_MAX - 1)
3793 skb->priority = HCI_PRIO_MAX - 1;
3795 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3799 if (hci_conn_num(hdev, type) == num)
3807 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3809 /* Calculate count of blocks used by this packet */
3810 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3813 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3815 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3816 /* ACL tx timeout must be longer than maximum
3817 * link supervision timeout (40.9 seconds) */
3818 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3819 HCI_ACL_TX_TIMEOUT))
3820 hci_link_tx_to(hdev, ACL_LINK);
3824 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3826 unsigned int cnt = hdev->acl_cnt;
3827 struct hci_chan *chan;
3828 struct sk_buff *skb;
3831 __check_timeout(hdev, cnt);
3833 while (hdev->acl_cnt &&
3834 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3835 u32 priority = (skb_peek(&chan->data_q))->priority;
3836 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3837 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3838 skb->len, skb->priority);
3840 /* Stop if priority has changed */
3841 if (skb->priority < priority)
3844 skb = skb_dequeue(&chan->data_q);
3846 hci_conn_enter_active_mode(chan->conn,
3847 bt_cb(skb)->force_active);
3849 hci_send_frame(hdev, skb);
3850 hdev->acl_last_tx = jiffies;
3858 if (cnt != hdev->acl_cnt)
3859 hci_prio_recalculate(hdev, ACL_LINK);
3862 static void hci_sched_acl_blk(struct hci_dev *hdev)
3864 unsigned int cnt = hdev->block_cnt;
3865 struct hci_chan *chan;
3866 struct sk_buff *skb;
3870 __check_timeout(hdev, cnt);
3872 BT_DBG("%s", hdev->name);
3874 if (hdev->dev_type == HCI_AMP)
3879 while (hdev->block_cnt > 0 &&
3880 (chan = hci_chan_sent(hdev, type, "e))) {
3881 u32 priority = (skb_peek(&chan->data_q))->priority;
3882 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3886 skb->len, skb->priority);
3888 /* Stop if priority has changed */
3889 if (skb->priority < priority)
3892 skb = skb_dequeue(&chan->data_q);
3894 blocks = __get_blocks(hdev, skb);
3895 if (blocks > hdev->block_cnt)
3898 hci_conn_enter_active_mode(chan->conn,
3899 bt_cb(skb)->force_active);
3901 hci_send_frame(hdev, skb);
3902 hdev->acl_last_tx = jiffies;
3904 hdev->block_cnt -= blocks;
3907 chan->sent += blocks;
3908 chan->conn->sent += blocks;
3912 if (cnt != hdev->block_cnt)
3913 hci_prio_recalculate(hdev, type);
3916 static void hci_sched_acl(struct hci_dev *hdev)
3918 BT_DBG("%s", hdev->name);
3920 /* No ACL link over BR/EDR controller */
3921 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3924 /* No AMP link over AMP controller */
3925 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3928 switch (hdev->flow_ctl_mode) {
3929 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3930 hci_sched_acl_pkt(hdev);
3933 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3934 hci_sched_acl_blk(hdev);
3940 static void hci_sched_sco(struct hci_dev *hdev)
3942 struct hci_conn *conn;
3943 struct sk_buff *skb;
3946 BT_DBG("%s", hdev->name);
3948 if (!hci_conn_num(hdev, SCO_LINK))
3951 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3952 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3953 BT_DBG("skb %p len %d", skb, skb->len);
3954 hci_send_frame(hdev, skb);
3957 if (conn->sent == ~0)
3963 static void hci_sched_esco(struct hci_dev *hdev)
3965 struct hci_conn *conn;
3966 struct sk_buff *skb;
3969 BT_DBG("%s", hdev->name);
3971 if (!hci_conn_num(hdev, ESCO_LINK))
3974 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3976 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3977 BT_DBG("skb %p len %d", skb, skb->len);
3978 hci_send_frame(hdev, skb);
3981 if (conn->sent == ~0)
3987 static void hci_sched_le(struct hci_dev *hdev)
3989 struct hci_chan *chan;
3990 struct sk_buff *skb;
3991 int quote, cnt, tmp;
3993 BT_DBG("%s", hdev->name);
3995 if (!hci_conn_num(hdev, LE_LINK))
3998 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3999 /* LE tx timeout must be longer than maximum
4000 * link supervision timeout (40.9 seconds) */
4001 if (!hdev->le_cnt && hdev->le_pkts &&
4002 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4003 hci_link_tx_to(hdev, LE_LINK);
4006 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4008 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4009 u32 priority = (skb_peek(&chan->data_q))->priority;
4010 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4011 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4012 skb->len, skb->priority);
4014 /* Stop if priority has changed */
4015 if (skb->priority < priority)
4018 skb = skb_dequeue(&chan->data_q);
4020 hci_send_frame(hdev, skb);
4021 hdev->le_last_tx = jiffies;
4032 hdev->acl_cnt = cnt;
4035 hci_prio_recalculate(hdev, LE_LINK);
4038 static void hci_tx_work(struct work_struct *work)
4040 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4041 struct sk_buff *skb;
4043 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4044 hdev->sco_cnt, hdev->le_cnt);
4046 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4047 /* Schedule queues and send stuff to HCI driver */
4048 hci_sched_acl(hdev);
4049 hci_sched_sco(hdev);
4050 hci_sched_esco(hdev);
4054 /* Send next queued raw (unknown type) packet */
4055 while ((skb = skb_dequeue(&hdev->raw_q)))
4056 hci_send_frame(hdev, skb);
4059 /* ----- HCI RX task (incoming data processing) ----- */
4061 /* ACL data packet */
4062 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4064 struct hci_acl_hdr *hdr = (void *) skb->data;
4065 struct hci_conn *conn;
4066 __u16 handle, flags;
4068 skb_pull(skb, HCI_ACL_HDR_SIZE);
4070 handle = __le16_to_cpu(hdr->handle);
4071 flags = hci_flags(handle);
4072 handle = hci_handle(handle);
4074 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4077 hdev->stat.acl_rx++;
4080 conn = hci_conn_hash_lookup_handle(hdev, handle);
4081 hci_dev_unlock(hdev);
4084 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4086 /* Send to upper protocol */
4087 l2cap_recv_acldata(conn, skb, flags);
4090 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4097 /* SCO data packet */
4098 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4100 struct hci_sco_hdr *hdr = (void *) skb->data;
4101 struct hci_conn *conn;
4104 skb_pull(skb, HCI_SCO_HDR_SIZE);
4106 handle = __le16_to_cpu(hdr->handle);
4108 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4110 hdev->stat.sco_rx++;
4113 conn = hci_conn_hash_lookup_handle(hdev, handle);
4114 hci_dev_unlock(hdev);
4117 /* Send to upper protocol */
4118 sco_recv_scodata(conn, skb);
4121 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4128 static bool hci_req_is_complete(struct hci_dev *hdev)
4130 struct sk_buff *skb;
4132 skb = skb_peek(&hdev->cmd_q);
4136 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4139 static void hci_resend_last(struct hci_dev *hdev)
4141 struct hci_command_hdr *sent;
4142 struct sk_buff *skb;
4145 if (!hdev->sent_cmd)
4148 sent = (void *) hdev->sent_cmd->data;
4149 opcode = __le16_to_cpu(sent->opcode);
4150 if (opcode == HCI_OP_RESET)
4153 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4157 skb_queue_head(&hdev->cmd_q, skb);
4158 queue_work(hdev->workqueue, &hdev->cmd_work);
4161 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4162 hci_req_complete_t *req_complete,
4163 hci_req_complete_skb_t *req_complete_skb)
4165 struct sk_buff *skb;
4166 unsigned long flags;
4168 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4170 /* If the completed command doesn't match the last one that was
4171 * sent we need to do special handling of it.
4173 if (!hci_sent_cmd_data(hdev, opcode)) {
4174 /* Some CSR based controllers generate a spontaneous
4175 * reset complete event during init and any pending
4176 * command will never be completed. In such a case we
4177 * need to resend whatever was the last sent
4180 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4181 hci_resend_last(hdev);
4186 /* If the command succeeded and there's still more commands in
4187 * this request the request is not yet complete.
4189 if (!status && !hci_req_is_complete(hdev))
4192 /* If this was the last command in a request the complete
4193 * callback would be found in hdev->sent_cmd instead of the
4194 * command queue (hdev->cmd_q).
4196 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4197 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4201 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4202 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4206 /* Remove all pending commands belonging to this request */
4207 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4208 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4209 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4210 __skb_queue_head(&hdev->cmd_q, skb);
4214 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4215 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4217 *req_complete = bt_cb(skb)->hci.req_complete;
4220 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4223 static void hci_rx_work(struct work_struct *work)
4225 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4226 struct sk_buff *skb;
4228 BT_DBG("%s", hdev->name);
4230 while ((skb = skb_dequeue(&hdev->rx_q))) {
4231 /* Send copy to monitor */
4232 hci_send_to_monitor(hdev, skb);
4234 if (atomic_read(&hdev->promisc)) {
4235 /* Send copy to the sockets */
4236 hci_send_to_sock(hdev, skb);
4239 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4244 if (test_bit(HCI_INIT, &hdev->flags)) {
4245 /* Don't process data packets in this states. */
4246 switch (hci_skb_pkt_type(skb)) {
4247 case HCI_ACLDATA_PKT:
4248 case HCI_SCODATA_PKT:
4255 switch (hci_skb_pkt_type(skb)) {
4257 BT_DBG("%s Event packet", hdev->name);
4258 hci_event_packet(hdev, skb);
4261 case HCI_ACLDATA_PKT:
4262 BT_DBG("%s ACL data packet", hdev->name);
4263 hci_acldata_packet(hdev, skb);
4266 case HCI_SCODATA_PKT:
4267 BT_DBG("%s SCO data packet", hdev->name);
4268 hci_scodata_packet(hdev, skb);
4278 static void hci_cmd_work(struct work_struct *work)
4280 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4281 struct sk_buff *skb;
4283 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4284 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4286 /* Send queued commands */
4287 if (atomic_read(&hdev->cmd_cnt)) {
4288 skb = skb_dequeue(&hdev->cmd_q);
4292 kfree_skb(hdev->sent_cmd);
4294 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4295 if (hdev->sent_cmd) {
4296 atomic_dec(&hdev->cmd_cnt);
4297 hci_send_frame(hdev, skb);
4298 if (test_bit(HCI_RESET, &hdev->flags))
4299 cancel_delayed_work(&hdev->cmd_timer);
4301 schedule_delayed_work(&hdev->cmd_timer,
4304 skb_queue_head(&hdev->cmd_q, skb);
4305 queue_work(hdev->workqueue, &hdev->cmd_work);