2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
40 #include <asm/system.h>
41 #include <linux/uaccess.h>
42 #include <asm/unaligned.h>
44 #include <net/bluetooth/bluetooth.h>
45 #include <net/bluetooth/hci_core.h>
47 /* Handle HCI Event packets */
49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51 __u8 status = *((__u8 *) skb->data);
53 BT_DBG("%s status 0x%x", hdev->name, status);
57 mgmt_stop_discovery_failed(hdev, status);
62 clear_bit(HCI_INQUIRY, &hdev->flags);
65 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
68 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
70 hci_conn_check_pending(hdev);
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
75 __u8 status = *((__u8 *) skb->data);
77 BT_DBG("%s status 0x%x", hdev->name, status);
82 hci_conn_check_pending(hdev);
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
87 BT_DBG("%s", hdev->name);
90 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 struct hci_rp_role_discovery *rp = (void *) skb->data;
93 struct hci_conn *conn;
95 BT_DBG("%s status 0x%x", hdev->name, rp->status);
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
105 conn->link_mode &= ~HCI_LM_MASTER;
107 conn->link_mode |= HCI_LM_MASTER;
110 hci_dev_unlock(hdev);
113 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 struct hci_rp_read_link_policy *rp = (void *) skb->data;
116 struct hci_conn *conn;
118 BT_DBG("%s status 0x%x", hdev->name, rp->status);
125 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 conn->link_policy = __le16_to_cpu(rp->policy);
129 hci_dev_unlock(hdev);
132 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 struct hci_rp_write_link_policy *rp = (void *) skb->data;
135 struct hci_conn *conn;
138 BT_DBG("%s status 0x%x", hdev->name, rp->status);
143 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 conn->link_policy = get_unaligned_le16(sent + 2);
153 hci_dev_unlock(hdev);
156 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
158 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
160 BT_DBG("%s status 0x%x", hdev->name, rp->status);
165 hdev->link_policy = __le16_to_cpu(rp->policy);
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
170 __u8 status = *((__u8 *) skb->data);
173 BT_DBG("%s status 0x%x", hdev->name, status);
175 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
180 hdev->link_policy = get_unaligned_le16(sent);
182 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
185 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 __u8 status = *((__u8 *) skb->data);
189 BT_DBG("%s status 0x%x", hdev->name, status);
191 clear_bit(HCI_RESET, &hdev->flags);
193 hci_req_complete(hdev, HCI_OP_RESET, status);
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
198 hdev->discovery.state = DISCOVERY_STOPPED;
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
203 __u8 status = *((__u8 *) skb->data);
206 BT_DBG("%s status 0x%x", hdev->name, status);
208 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
214 if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 mgmt_set_local_name_complete(hdev, sent, status);
217 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
219 hci_dev_unlock(hdev);
221 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
226 struct hci_rp_read_local_name *rp = (void *) skb->data;
228 BT_DBG("%s status 0x%x", hdev->name, rp->status);
233 if (test_bit(HCI_SETUP, &hdev->dev_flags))
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
239 __u8 status = *((__u8 *) skb->data);
242 BT_DBG("%s status 0x%x", hdev->name, status);
244 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 __u8 param = *((__u8 *) sent);
251 if (param == AUTH_ENABLED)
252 set_bit(HCI_AUTH, &hdev->flags);
254 clear_bit(HCI_AUTH, &hdev->flags);
257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 mgmt_auth_enable_complete(hdev, status);
260 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
265 __u8 status = *((__u8 *) skb->data);
268 BT_DBG("%s status 0x%x", hdev->name, status);
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
275 __u8 param = *((__u8 *) sent);
278 set_bit(HCI_ENCRYPT, &hdev->flags);
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
292 BT_DBG("%s status 0x%x", hdev->name, status);
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
298 param = *((__u8 *) sent);
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
331 hci_dev_unlock(hdev);
332 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
335 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
337 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
339 BT_DBG("%s status 0x%x", hdev->name, rp->status);
344 memcpy(hdev->dev_class, rp->dev_class, 3);
346 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
350 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
352 __u8 status = *((__u8 *) skb->data);
355 BT_DBG("%s status 0x%x", hdev->name, status);
357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
364 memcpy(hdev->dev_class, sent, 3);
366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
367 mgmt_set_class_of_dev_complete(hdev, sent, status);
369 hci_dev_unlock(hdev);
372 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
374 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
377 BT_DBG("%s status 0x%x", hdev->name, rp->status);
382 setting = __le16_to_cpu(rp->voice_setting);
384 if (hdev->voice_setting == setting)
387 hdev->voice_setting = setting;
389 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
392 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
395 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
397 __u8 status = *((__u8 *) skb->data);
401 BT_DBG("%s status 0x%x", hdev->name, status);
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
410 setting = get_unaligned_le16(sent);
412 if (hdev->voice_setting == setting)
415 hdev->voice_setting = setting;
417 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
425 __u8 status = *((__u8 *) skb->data);
427 BT_DBG("%s status 0x%x", hdev->name, status);
429 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
432 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
434 __u8 status = *((__u8 *) skb->data);
437 BT_DBG("%s status 0x%x", hdev->name, status);
439 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 if (test_bit(HCI_MGMT, &hdev->dev_flags))
444 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
447 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
455 if (hdev->features[6] & LMP_EXT_INQ)
458 if (hdev->features[3] & LMP_RSSI_INQ)
461 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
462 hdev->lmp_subver == 0x0757)
465 if (hdev->manufacturer == 15) {
466 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
468 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
470 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
474 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
475 hdev->lmp_subver == 0x1805)
481 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
485 mode = hci_get_inquiry_mode(hdev);
487 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
490 static void hci_setup_event_mask(struct hci_dev *hdev)
492 /* The second byte is 0xff instead of 0x9f (two reserved bits
493 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
494 * command otherwise */
495 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
497 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
498 * any event mask for pre 1.2 devices */
499 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
502 events[4] |= 0x01; /* Flow Specification Complete */
503 events[4] |= 0x02; /* Inquiry Result with RSSI */
504 events[4] |= 0x04; /* Read Remote Extended Features Complete */
505 events[5] |= 0x08; /* Synchronous Connection Complete */
506 events[5] |= 0x10; /* Synchronous Connection Changed */
508 if (hdev->features[3] & LMP_RSSI_INQ)
509 events[4] |= 0x04; /* Inquiry Result with RSSI */
511 if (hdev->features[5] & LMP_SNIFF_SUBR)
512 events[5] |= 0x20; /* Sniff Subrating */
514 if (hdev->features[5] & LMP_PAUSE_ENC)
515 events[5] |= 0x80; /* Encryption Key Refresh Complete */
517 if (hdev->features[6] & LMP_EXT_INQ)
518 events[5] |= 0x40; /* Extended Inquiry Result */
520 if (hdev->features[6] & LMP_NO_FLUSH)
521 events[7] |= 0x01; /* Enhanced Flush Complete */
523 if (hdev->features[7] & LMP_LSTO)
524 events[6] |= 0x80; /* Link Supervision Timeout Changed */
526 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
527 events[6] |= 0x01; /* IO Capability Request */
528 events[6] |= 0x02; /* IO Capability Response */
529 events[6] |= 0x04; /* User Confirmation Request */
530 events[6] |= 0x08; /* User Passkey Request */
531 events[6] |= 0x10; /* Remote OOB Data Request */
532 events[6] |= 0x20; /* Simple Pairing Complete */
533 events[7] |= 0x04; /* User Passkey Notification */
534 events[7] |= 0x08; /* Keypress Notification */
535 events[7] |= 0x10; /* Remote Host Supported
536 * Features Notification */
539 if (hdev->features[4] & LMP_LE)
540 events[7] |= 0x20; /* LE Meta-Event */
542 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
545 static void hci_setup(struct hci_dev *hdev)
547 if (hdev->dev_type != HCI_BREDR)
550 hci_setup_event_mask(hdev);
552 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
553 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
555 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
556 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
558 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
559 sizeof(mode), &mode);
561 struct hci_cp_write_eir cp;
563 memset(hdev->eir, 0, sizeof(hdev->eir));
564 memset(&cp, 0, sizeof(cp));
566 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
570 if (hdev->features[3] & LMP_RSSI_INQ)
571 hci_setup_inquiry_mode(hdev);
573 if (hdev->features[7] & LMP_INQ_TX_PWR)
574 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
576 if (hdev->features[7] & LMP_EXTFEATURES) {
577 struct hci_cp_read_local_ext_features cp;
580 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
584 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
586 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
593 struct hci_rp_read_local_version *rp = (void *) skb->data;
595 BT_DBG("%s status 0x%x", hdev->name, rp->status);
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
606 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
608 hdev->hci_ver, hdev->hci_rev);
610 if (test_bit(HCI_INIT, &hdev->flags))
614 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
617 static void hci_setup_link_policy(struct hci_dev *hdev)
621 if (hdev->features[0] & LMP_RSWITCH)
622 link_policy |= HCI_LP_RSWITCH;
623 if (hdev->features[0] & LMP_HOLD)
624 link_policy |= HCI_LP_HOLD;
625 if (hdev->features[0] & LMP_SNIFF)
626 link_policy |= HCI_LP_SNIFF;
627 if (hdev->features[1] & LMP_PARK)
628 link_policy |= HCI_LP_PARK;
630 link_policy = cpu_to_le16(link_policy);
631 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
635 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
637 struct hci_rp_read_local_commands *rp = (void *) skb->data;
639 BT_DBG("%s status 0x%x", hdev->name, rp->status);
644 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
646 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
647 hci_setup_link_policy(hdev);
650 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
653 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
655 struct hci_rp_read_local_features *rp = (void *) skb->data;
657 BT_DBG("%s status 0x%x", hdev->name, rp->status);
662 memcpy(hdev->features, rp->features, 8);
664 /* Adjust default settings according to features
665 * supported by device. */
667 if (hdev->features[0] & LMP_3SLOT)
668 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
670 if (hdev->features[0] & LMP_5SLOT)
671 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
673 if (hdev->features[1] & LMP_HV2) {
674 hdev->pkt_type |= (HCI_HV2);
675 hdev->esco_type |= (ESCO_HV2);
678 if (hdev->features[1] & LMP_HV3) {
679 hdev->pkt_type |= (HCI_HV3);
680 hdev->esco_type |= (ESCO_HV3);
683 if (hdev->features[3] & LMP_ESCO)
684 hdev->esco_type |= (ESCO_EV3);
686 if (hdev->features[4] & LMP_EV4)
687 hdev->esco_type |= (ESCO_EV4);
689 if (hdev->features[4] & LMP_EV5)
690 hdev->esco_type |= (ESCO_EV5);
692 if (hdev->features[5] & LMP_EDR_ESCO_2M)
693 hdev->esco_type |= (ESCO_2EV3);
695 if (hdev->features[5] & LMP_EDR_ESCO_3M)
696 hdev->esco_type |= (ESCO_3EV3);
698 if (hdev->features[5] & LMP_EDR_3S_ESCO)
699 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
701 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
702 hdev->features[0], hdev->features[1],
703 hdev->features[2], hdev->features[3],
704 hdev->features[4], hdev->features[5],
705 hdev->features[6], hdev->features[7]);
708 static void hci_set_le_support(struct hci_dev *hdev)
710 struct hci_cp_write_le_host_supported cp;
712 memset(&cp, 0, sizeof(cp));
714 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
716 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
719 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
720 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
724 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
727 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
729 BT_DBG("%s status 0x%x", hdev->name, rp->status);
736 memcpy(hdev->features, rp->features, 8);
739 memcpy(hdev->host_features, rp->features, 8);
743 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
744 hci_set_le_support(hdev);
747 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
753 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
755 BT_DBG("%s status 0x%x", hdev->name, rp->status);
760 hdev->flow_ctl_mode = rp->mode;
762 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
765 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
767 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
769 BT_DBG("%s status 0x%x", hdev->name, rp->status);
774 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
775 hdev->sco_mtu = rp->sco_mtu;
776 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
777 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
779 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
784 hdev->acl_cnt = hdev->acl_pkts;
785 hdev->sco_cnt = hdev->sco_pkts;
787 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
788 hdev->acl_mtu, hdev->acl_pkts,
789 hdev->sco_mtu, hdev->sco_pkts);
792 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
794 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
796 BT_DBG("%s status 0x%x", hdev->name, rp->status);
799 bacpy(&hdev->bdaddr, &rp->bdaddr);
801 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
804 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
807 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
809 BT_DBG("%s status 0x%x", hdev->name, rp->status);
814 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
815 hdev->block_len = __le16_to_cpu(rp->block_len);
816 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
818 hdev->block_cnt = hdev->num_blocks;
820 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
821 hdev->block_cnt, hdev->block_len);
823 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
826 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
828 __u8 status = *((__u8 *) skb->data);
830 BT_DBG("%s status 0x%x", hdev->name, status);
832 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
835 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
838 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
840 BT_DBG("%s status 0x%x", hdev->name, rp->status);
845 hdev->amp_status = rp->amp_status;
846 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
847 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
848 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
849 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
850 hdev->amp_type = rp->amp_type;
851 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
852 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
853 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
854 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
856 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
859 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
862 __u8 status = *((__u8 *) skb->data);
864 BT_DBG("%s status 0x%x", hdev->name, status);
866 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
869 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
871 __u8 status = *((__u8 *) skb->data);
873 BT_DBG("%s status 0x%x", hdev->name, status);
875 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
878 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
881 __u8 status = *((__u8 *) skb->data);
883 BT_DBG("%s status 0x%x", hdev->name, status);
885 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
888 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
891 __u8 status = *((__u8 *) skb->data);
893 BT_DBG("%s status 0x%x", hdev->name, status);
895 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
898 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
900 __u8 status = *((__u8 *) skb->data);
902 BT_DBG("%s status 0x%x", hdev->name, status);
904 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
907 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
909 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
910 struct hci_cp_pin_code_reply *cp;
911 struct hci_conn *conn;
913 BT_DBG("%s status 0x%x", hdev->name, rp->status);
917 if (test_bit(HCI_MGMT, &hdev->dev_flags))
918 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
923 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
927 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
929 conn->pin_length = cp->pin_len;
932 hci_dev_unlock(hdev);
935 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
937 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
939 BT_DBG("%s status 0x%x", hdev->name, rp->status);
943 if (test_bit(HCI_MGMT, &hdev->dev_flags))
944 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
947 hci_dev_unlock(hdev);
950 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
953 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
955 BT_DBG("%s status 0x%x", hdev->name, rp->status);
960 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
961 hdev->le_pkts = rp->le_max_pkt;
963 hdev->le_cnt = hdev->le_pkts;
965 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
967 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
970 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
972 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
974 BT_DBG("%s status 0x%x", hdev->name, rp->status);
978 if (test_bit(HCI_MGMT, &hdev->dev_flags))
979 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
982 hci_dev_unlock(hdev);
985 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
988 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990 BT_DBG("%s status 0x%x", hdev->name, rp->status);
994 if (test_bit(HCI_MGMT, &hdev->dev_flags))
995 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
996 ACL_LINK, 0, rp->status);
998 hci_dev_unlock(hdev);
1001 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1003 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1005 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1009 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1010 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1013 hci_dev_unlock(hdev);
1016 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1019 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1021 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1025 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1026 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1027 ACL_LINK, 0, rp->status);
1029 hci_dev_unlock(hdev);
1032 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1033 struct sk_buff *skb)
1035 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1037 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1040 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1041 rp->randomizer, rp->status);
1042 hci_dev_unlock(hdev);
1045 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1047 __u8 status = *((__u8 *) skb->data);
1049 BT_DBG("%s status 0x%x", hdev->name, status);
1051 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1055 mgmt_start_discovery_failed(hdev, status);
1056 hci_dev_unlock(hdev);
1061 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1062 struct sk_buff *skb)
1064 struct hci_cp_le_set_scan_enable *cp;
1065 __u8 status = *((__u8 *) skb->data);
1067 BT_DBG("%s status 0x%x", hdev->name, status);
1069 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1073 switch (cp->enable) {
1074 case LE_SCANNING_ENABLED:
1075 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1079 mgmt_start_discovery_failed(hdev, status);
1080 hci_dev_unlock(hdev);
1084 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1086 cancel_delayed_work_sync(&hdev->adv_work);
1089 hci_adv_entries_clear(hdev);
1090 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1091 hci_dev_unlock(hdev);
1094 case LE_SCANNING_DISABLED:
1098 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1100 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1102 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1103 mgmt_interleaved_discovery(hdev);
1106 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1107 hci_dev_unlock(hdev);
1113 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1118 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1120 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1122 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1127 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1130 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1132 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1134 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1139 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1142 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1145 struct hci_cp_write_le_host_supported *sent;
1146 __u8 status = *((__u8 *) skb->data);
1148 BT_DBG("%s status 0x%x", hdev->name, status);
1150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1156 hdev->host_features[0] |= LMP_HOST_LE;
1158 hdev->host_features[0] &= ~LMP_HOST_LE;
1161 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1162 !test_bit(HCI_INIT, &hdev->flags))
1163 mgmt_le_enable_complete(hdev, sent->le, status);
1165 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1168 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1170 BT_DBG("%s status 0x%x", hdev->name, status);
1173 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1174 hci_conn_check_pending(hdev);
1176 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1177 mgmt_start_discovery_failed(hdev, status);
1178 hci_dev_unlock(hdev);
1182 set_bit(HCI_INQUIRY, &hdev->flags);
1185 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1186 hci_dev_unlock(hdev);
1189 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1191 struct hci_cp_create_conn *cp;
1192 struct hci_conn *conn;
1194 BT_DBG("%s status 0x%x", hdev->name, status);
1196 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1202 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1204 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1207 if (conn && conn->state == BT_CONNECT) {
1208 if (status != 0x0c || conn->attempt > 2) {
1209 conn->state = BT_CLOSED;
1210 hci_proto_connect_cfm(conn, status);
1213 conn->state = BT_CONNECT2;
1217 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1220 conn->link_mode |= HCI_LM_MASTER;
1222 BT_ERR("No memory for new connection");
1226 hci_dev_unlock(hdev);
1229 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1231 struct hci_cp_add_sco *cp;
1232 struct hci_conn *acl, *sco;
1235 BT_DBG("%s status 0x%x", hdev->name, status);
1240 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1244 handle = __le16_to_cpu(cp->handle);
1246 BT_DBG("%s handle %d", hdev->name, handle);
1250 acl = hci_conn_hash_lookup_handle(hdev, handle);
1254 sco->state = BT_CLOSED;
1256 hci_proto_connect_cfm(sco, status);
1261 hci_dev_unlock(hdev);
1264 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1266 struct hci_cp_auth_requested *cp;
1267 struct hci_conn *conn;
1269 BT_DBG("%s status 0x%x", hdev->name, status);
1274 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1280 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1282 if (conn->state == BT_CONFIG) {
1283 hci_proto_connect_cfm(conn, status);
1288 hci_dev_unlock(hdev);
1291 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1293 struct hci_cp_set_conn_encrypt *cp;
1294 struct hci_conn *conn;
1296 BT_DBG("%s status 0x%x", hdev->name, status);
1301 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1307 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1309 if (conn->state == BT_CONFIG) {
1310 hci_proto_connect_cfm(conn, status);
1315 hci_dev_unlock(hdev);
1318 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1319 struct hci_conn *conn)
1321 if (conn->state != BT_CONFIG || !conn->out)
1324 if (conn->pending_sec_level == BT_SECURITY_SDP)
1327 /* Only request authentication for SSP connections or non-SSP
1328 * devices with sec_level HIGH or if MITM protection is requested */
1329 if (!hci_conn_ssp_enabled(conn) &&
1330 conn->pending_sec_level != BT_SECURITY_HIGH &&
1331 !(conn->auth_type & 0x01))
1337 static inline int hci_resolve_name(struct hci_dev *hdev,
1338 struct inquiry_entry *e)
1340 struct hci_cp_remote_name_req cp;
1342 memset(&cp, 0, sizeof(cp));
1344 bacpy(&cp.bdaddr, &e->data.bdaddr);
1345 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1346 cp.pscan_mode = e->data.pscan_mode;
1347 cp.clock_offset = e->data.clock_offset;
1349 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1352 static bool hci_resolve_next_name(struct hci_dev *hdev)
1354 struct discovery_state *discov = &hdev->discovery;
1355 struct inquiry_entry *e;
1357 if (list_empty(&discov->resolve))
1360 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1361 if (hci_resolve_name(hdev, e) == 0) {
1362 e->name_state = NAME_PENDING;
1369 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1370 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1372 struct discovery_state *discov = &hdev->discovery;
1373 struct inquiry_entry *e;
1375 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1376 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1377 name_len, conn->dev_class);
1379 if (discov->state == DISCOVERY_STOPPED)
1382 if (discov->state == DISCOVERY_STOPPING)
1383 goto discov_complete;
1385 if (discov->state != DISCOVERY_RESOLVING)
1388 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1390 e->name_state = NAME_KNOWN;
1393 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1394 e->data.rssi, name, name_len);
1397 if (hci_resolve_next_name(hdev))
1401 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1404 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1406 struct hci_cp_remote_name_req *cp;
1407 struct hci_conn *conn;
1409 BT_DBG("%s status 0x%x", hdev->name, status);
1411 /* If successful wait for the name req complete event before
1412 * checking for the need to do authentication */
1416 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1424 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1425 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1430 if (!hci_outgoing_auth_needed(hdev, conn))
1433 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1434 struct hci_cp_auth_requested cp;
1435 cp.handle = __cpu_to_le16(conn->handle);
1436 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1440 hci_dev_unlock(hdev);
1443 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1445 struct hci_cp_read_remote_features *cp;
1446 struct hci_conn *conn;
1448 BT_DBG("%s status 0x%x", hdev->name, status);
1453 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1459 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1461 if (conn->state == BT_CONFIG) {
1462 hci_proto_connect_cfm(conn, status);
1467 hci_dev_unlock(hdev);
1470 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1472 struct hci_cp_read_remote_ext_features *cp;
1473 struct hci_conn *conn;
1475 BT_DBG("%s status 0x%x", hdev->name, status);
1480 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1486 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1488 if (conn->state == BT_CONFIG) {
1489 hci_proto_connect_cfm(conn, status);
1494 hci_dev_unlock(hdev);
1497 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1499 struct hci_cp_setup_sync_conn *cp;
1500 struct hci_conn *acl, *sco;
1503 BT_DBG("%s status 0x%x", hdev->name, status);
1508 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1512 handle = __le16_to_cpu(cp->handle);
1514 BT_DBG("%s handle %d", hdev->name, handle);
1518 acl = hci_conn_hash_lookup_handle(hdev, handle);
1522 sco->state = BT_CLOSED;
1524 hci_proto_connect_cfm(sco, status);
1529 hci_dev_unlock(hdev);
1532 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1534 struct hci_cp_sniff_mode *cp;
1535 struct hci_conn *conn;
1537 BT_DBG("%s status 0x%x", hdev->name, status);
1542 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1550 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1552 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1553 hci_sco_setup(conn, status);
1556 hci_dev_unlock(hdev);
1559 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1561 struct hci_cp_exit_sniff_mode *cp;
1562 struct hci_conn *conn;
1564 BT_DBG("%s status 0x%x", hdev->name, status);
1569 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1577 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1579 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1580 hci_sco_setup(conn, status);
1583 hci_dev_unlock(hdev);
1586 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1588 struct hci_cp_disconnect *cp;
1589 struct hci_conn *conn;
1594 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1600 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1602 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1603 conn->dst_type, status);
1605 hci_dev_unlock(hdev);
1608 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1610 struct hci_cp_le_create_conn *cp;
1611 struct hci_conn *conn;
1613 BT_DBG("%s status 0x%x", hdev->name, status);
1615 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1621 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1623 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1627 if (conn && conn->state == BT_CONNECT) {
1628 conn->state = BT_CLOSED;
1629 hci_proto_connect_cfm(conn, status);
1634 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1636 conn->dst_type = cp->peer_addr_type;
1639 BT_ERR("No memory for new connection");
1644 hci_dev_unlock(hdev);
1647 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1649 BT_DBG("%s status 0x%x", hdev->name, status);
1652 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1654 __u8 status = *((__u8 *) skb->data);
1655 struct discovery_state *discov = &hdev->discovery;
1656 struct inquiry_entry *e;
1658 BT_DBG("%s status %d", hdev->name, status);
1660 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1662 hci_conn_check_pending(hdev);
1664 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1667 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1672 if (discov->state != DISCOVERY_FINDING)
1675 if (list_empty(&discov->resolve)) {
1676 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1680 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1681 if (e && hci_resolve_name(hdev, e) == 0) {
1682 e->name_state = NAME_PENDING;
1683 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1689 hci_dev_unlock(hdev);
1692 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1694 struct inquiry_data data;
1695 struct inquiry_info *info = (void *) (skb->data + 1);
1696 int num_rsp = *((__u8 *) skb->data);
1698 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1705 for (; num_rsp; num_rsp--, info++) {
1706 bool name_known, ssp;
1708 bacpy(&data.bdaddr, &info->bdaddr);
1709 data.pscan_rep_mode = info->pscan_rep_mode;
1710 data.pscan_period_mode = info->pscan_period_mode;
1711 data.pscan_mode = info->pscan_mode;
1712 memcpy(data.dev_class, info->dev_class, 3);
1713 data.clock_offset = info->clock_offset;
1715 data.ssp_mode = 0x00;
1717 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1718 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1719 info->dev_class, 0, !name_known, ssp, NULL,
1723 hci_dev_unlock(hdev);
1726 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1728 struct hci_ev_conn_complete *ev = (void *) skb->data;
1729 struct hci_conn *conn;
1731 BT_DBG("%s", hdev->name);
1735 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1737 if (ev->link_type != SCO_LINK)
1740 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1744 conn->type = SCO_LINK;
1748 conn->handle = __le16_to_cpu(ev->handle);
1750 if (conn->type == ACL_LINK) {
1751 conn->state = BT_CONFIG;
1752 hci_conn_hold(conn);
1753 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1755 conn->state = BT_CONNECTED;
1757 hci_conn_hold_device(conn);
1758 hci_conn_add_sysfs(conn);
1760 if (test_bit(HCI_AUTH, &hdev->flags))
1761 conn->link_mode |= HCI_LM_AUTH;
1763 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1764 conn->link_mode |= HCI_LM_ENCRYPT;
1766 /* Get remote features */
1767 if (conn->type == ACL_LINK) {
1768 struct hci_cp_read_remote_features cp;
1769 cp.handle = ev->handle;
1770 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1774 /* Set packet type for incoming connection */
1775 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1776 struct hci_cp_change_conn_ptype cp;
1777 cp.handle = ev->handle;
1778 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1779 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1783 conn->state = BT_CLOSED;
1784 if (conn->type == ACL_LINK)
1785 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1786 conn->dst_type, ev->status);
1789 if (conn->type == ACL_LINK)
1790 hci_sco_setup(conn, ev->status);
1793 hci_proto_connect_cfm(conn, ev->status);
1795 } else if (ev->link_type != ACL_LINK)
1796 hci_proto_connect_cfm(conn, ev->status);
1799 hci_dev_unlock(hdev);
1801 hci_conn_check_pending(hdev);
1804 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1806 struct hci_ev_conn_request *ev = (void *) skb->data;
1807 int mask = hdev->link_mode;
1809 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1810 batostr(&ev->bdaddr), ev->link_type);
1812 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1814 if ((mask & HCI_LM_ACCEPT) &&
1815 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1816 /* Connection accepted */
1817 struct inquiry_entry *ie;
1818 struct hci_conn *conn;
1822 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1824 memcpy(ie->data.dev_class, ev->dev_class, 3);
1826 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1828 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1830 BT_ERR("No memory for new connection");
1831 hci_dev_unlock(hdev);
1836 memcpy(conn->dev_class, ev->dev_class, 3);
1837 conn->state = BT_CONNECT;
1839 hci_dev_unlock(hdev);
1841 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1842 struct hci_cp_accept_conn_req cp;
1844 bacpy(&cp.bdaddr, &ev->bdaddr);
1846 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1847 cp.role = 0x00; /* Become master */
1849 cp.role = 0x01; /* Remain slave */
1851 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1854 struct hci_cp_accept_sync_conn_req cp;
1856 bacpy(&cp.bdaddr, &ev->bdaddr);
1857 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1859 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1860 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1861 cp.max_latency = cpu_to_le16(0xffff);
1862 cp.content_format = cpu_to_le16(hdev->voice_setting);
1863 cp.retrans_effort = 0xff;
1865 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1869 /* Connection rejected */
1870 struct hci_cp_reject_conn_req cp;
1872 bacpy(&cp.bdaddr, &ev->bdaddr);
1873 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1874 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1878 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1880 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1881 struct hci_conn *conn;
1883 BT_DBG("%s status %d", hdev->name, ev->status);
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1891 if (ev->status == 0)
1892 conn->state = BT_CLOSED;
1894 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1895 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1896 if (ev->status != 0)
1897 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1898 conn->dst_type, ev->status);
1900 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1904 if (ev->status == 0) {
1905 hci_proto_disconn_cfm(conn, ev->reason);
1910 hci_dev_unlock(hdev);
1913 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1915 struct hci_ev_auth_complete *ev = (void *) skb->data;
1916 struct hci_conn *conn;
1918 BT_DBG("%s status %d", hdev->name, ev->status);
1922 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1927 if (!hci_conn_ssp_enabled(conn) &&
1928 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1929 BT_INFO("re-auth of legacy device is not possible.");
1931 conn->link_mode |= HCI_LM_AUTH;
1932 conn->sec_level = conn->pending_sec_level;
1935 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1939 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1940 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1942 if (conn->state == BT_CONFIG) {
1943 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1944 struct hci_cp_set_conn_encrypt cp;
1945 cp.handle = ev->handle;
1947 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1950 conn->state = BT_CONNECTED;
1951 hci_proto_connect_cfm(conn, ev->status);
1955 hci_auth_cfm(conn, ev->status);
1957 hci_conn_hold(conn);
1958 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1962 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1964 struct hci_cp_set_conn_encrypt cp;
1965 cp.handle = ev->handle;
1967 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1970 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1971 hci_encrypt_cfm(conn, ev->status, 0x00);
1976 hci_dev_unlock(hdev);
1979 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1981 struct hci_ev_remote_name *ev = (void *) skb->data;
1982 struct hci_conn *conn;
1984 BT_DBG("%s", hdev->name);
1986 hci_conn_check_pending(hdev);
1990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1992 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1995 if (ev->status == 0)
1996 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1997 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1999 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2005 if (!hci_outgoing_auth_needed(hdev, conn))
2008 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2009 struct hci_cp_auth_requested cp;
2010 cp.handle = __cpu_to_le16(conn->handle);
2011 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2015 hci_dev_unlock(hdev);
2018 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2020 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2021 struct hci_conn *conn;
2023 BT_DBG("%s status %d", hdev->name, ev->status);
2027 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2031 /* Encryption implies authentication */
2032 conn->link_mode |= HCI_LM_AUTH;
2033 conn->link_mode |= HCI_LM_ENCRYPT;
2034 conn->sec_level = conn->pending_sec_level;
2036 conn->link_mode &= ~HCI_LM_ENCRYPT;
2039 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2041 if (conn->state == BT_CONFIG) {
2043 conn->state = BT_CONNECTED;
2045 hci_proto_connect_cfm(conn, ev->status);
2048 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2051 hci_dev_unlock(hdev);
2054 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2056 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2057 struct hci_conn *conn;
2059 BT_DBG("%s status %d", hdev->name, ev->status);
2063 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2066 conn->link_mode |= HCI_LM_SECURE;
2068 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2070 hci_key_change_cfm(conn, ev->status);
2073 hci_dev_unlock(hdev);
2076 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2078 struct hci_ev_remote_features *ev = (void *) skb->data;
2079 struct hci_conn *conn;
2081 BT_DBG("%s status %d", hdev->name, ev->status);
2085 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2090 memcpy(conn->features, ev->features, 8);
2092 if (conn->state != BT_CONFIG)
2095 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2096 struct hci_cp_read_remote_ext_features cp;
2097 cp.handle = ev->handle;
2099 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2105 struct hci_cp_remote_name_req cp;
2106 memset(&cp, 0, sizeof(cp));
2107 bacpy(&cp.bdaddr, &conn->dst);
2108 cp.pscan_rep_mode = 0x02;
2109 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2110 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2111 mgmt_device_connected(hdev, &conn->dst, conn->type,
2112 conn->dst_type, 0, NULL, 0,
2115 if (!hci_outgoing_auth_needed(hdev, conn)) {
2116 conn->state = BT_CONNECTED;
2117 hci_proto_connect_cfm(conn, ev->status);
2122 hci_dev_unlock(hdev);
2125 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2127 BT_DBG("%s", hdev->name);
2130 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2132 BT_DBG("%s", hdev->name);
2135 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2137 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2140 skb_pull(skb, sizeof(*ev));
2142 opcode = __le16_to_cpu(ev->opcode);
2145 case HCI_OP_INQUIRY_CANCEL:
2146 hci_cc_inquiry_cancel(hdev, skb);
2149 case HCI_OP_EXIT_PERIODIC_INQ:
2150 hci_cc_exit_periodic_inq(hdev, skb);
2153 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2154 hci_cc_remote_name_req_cancel(hdev, skb);
2157 case HCI_OP_ROLE_DISCOVERY:
2158 hci_cc_role_discovery(hdev, skb);
2161 case HCI_OP_READ_LINK_POLICY:
2162 hci_cc_read_link_policy(hdev, skb);
2165 case HCI_OP_WRITE_LINK_POLICY:
2166 hci_cc_write_link_policy(hdev, skb);
2169 case HCI_OP_READ_DEF_LINK_POLICY:
2170 hci_cc_read_def_link_policy(hdev, skb);
2173 case HCI_OP_WRITE_DEF_LINK_POLICY:
2174 hci_cc_write_def_link_policy(hdev, skb);
2178 hci_cc_reset(hdev, skb);
2181 case HCI_OP_WRITE_LOCAL_NAME:
2182 hci_cc_write_local_name(hdev, skb);
2185 case HCI_OP_READ_LOCAL_NAME:
2186 hci_cc_read_local_name(hdev, skb);
2189 case HCI_OP_WRITE_AUTH_ENABLE:
2190 hci_cc_write_auth_enable(hdev, skb);
2193 case HCI_OP_WRITE_ENCRYPT_MODE:
2194 hci_cc_write_encrypt_mode(hdev, skb);
2197 case HCI_OP_WRITE_SCAN_ENABLE:
2198 hci_cc_write_scan_enable(hdev, skb);
2201 case HCI_OP_READ_CLASS_OF_DEV:
2202 hci_cc_read_class_of_dev(hdev, skb);
2205 case HCI_OP_WRITE_CLASS_OF_DEV:
2206 hci_cc_write_class_of_dev(hdev, skb);
2209 case HCI_OP_READ_VOICE_SETTING:
2210 hci_cc_read_voice_setting(hdev, skb);
2213 case HCI_OP_WRITE_VOICE_SETTING:
2214 hci_cc_write_voice_setting(hdev, skb);
2217 case HCI_OP_HOST_BUFFER_SIZE:
2218 hci_cc_host_buffer_size(hdev, skb);
2221 case HCI_OP_WRITE_SSP_MODE:
2222 hci_cc_write_ssp_mode(hdev, skb);
2225 case HCI_OP_READ_LOCAL_VERSION:
2226 hci_cc_read_local_version(hdev, skb);
2229 case HCI_OP_READ_LOCAL_COMMANDS:
2230 hci_cc_read_local_commands(hdev, skb);
2233 case HCI_OP_READ_LOCAL_FEATURES:
2234 hci_cc_read_local_features(hdev, skb);
2237 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2238 hci_cc_read_local_ext_features(hdev, skb);
2241 case HCI_OP_READ_BUFFER_SIZE:
2242 hci_cc_read_buffer_size(hdev, skb);
2245 case HCI_OP_READ_BD_ADDR:
2246 hci_cc_read_bd_addr(hdev, skb);
2249 case HCI_OP_READ_DATA_BLOCK_SIZE:
2250 hci_cc_read_data_block_size(hdev, skb);
2253 case HCI_OP_WRITE_CA_TIMEOUT:
2254 hci_cc_write_ca_timeout(hdev, skb);
2257 case HCI_OP_READ_FLOW_CONTROL_MODE:
2258 hci_cc_read_flow_control_mode(hdev, skb);
2261 case HCI_OP_READ_LOCAL_AMP_INFO:
2262 hci_cc_read_local_amp_info(hdev, skb);
2265 case HCI_OP_DELETE_STORED_LINK_KEY:
2266 hci_cc_delete_stored_link_key(hdev, skb);
2269 case HCI_OP_SET_EVENT_MASK:
2270 hci_cc_set_event_mask(hdev, skb);
2273 case HCI_OP_WRITE_INQUIRY_MODE:
2274 hci_cc_write_inquiry_mode(hdev, skb);
2277 case HCI_OP_READ_INQ_RSP_TX_POWER:
2278 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2281 case HCI_OP_SET_EVENT_FLT:
2282 hci_cc_set_event_flt(hdev, skb);
2285 case HCI_OP_PIN_CODE_REPLY:
2286 hci_cc_pin_code_reply(hdev, skb);
2289 case HCI_OP_PIN_CODE_NEG_REPLY:
2290 hci_cc_pin_code_neg_reply(hdev, skb);
2293 case HCI_OP_READ_LOCAL_OOB_DATA:
2294 hci_cc_read_local_oob_data_reply(hdev, skb);
2297 case HCI_OP_LE_READ_BUFFER_SIZE:
2298 hci_cc_le_read_buffer_size(hdev, skb);
2301 case HCI_OP_USER_CONFIRM_REPLY:
2302 hci_cc_user_confirm_reply(hdev, skb);
2305 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2306 hci_cc_user_confirm_neg_reply(hdev, skb);
2309 case HCI_OP_USER_PASSKEY_REPLY:
2310 hci_cc_user_passkey_reply(hdev, skb);
2313 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2314 hci_cc_user_passkey_neg_reply(hdev, skb);
2316 case HCI_OP_LE_SET_SCAN_PARAM:
2317 hci_cc_le_set_scan_param(hdev, skb);
2320 case HCI_OP_LE_SET_SCAN_ENABLE:
2321 hci_cc_le_set_scan_enable(hdev, skb);
2324 case HCI_OP_LE_LTK_REPLY:
2325 hci_cc_le_ltk_reply(hdev, skb);
2328 case HCI_OP_LE_LTK_NEG_REPLY:
2329 hci_cc_le_ltk_neg_reply(hdev, skb);
2332 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2333 hci_cc_write_le_host_supported(hdev, skb);
2337 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2341 if (ev->opcode != HCI_OP_NOP)
2342 del_timer(&hdev->cmd_timer);
2345 atomic_set(&hdev->cmd_cnt, 1);
2346 if (!skb_queue_empty(&hdev->cmd_q))
2347 queue_work(hdev->workqueue, &hdev->cmd_work);
2351 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2353 struct hci_ev_cmd_status *ev = (void *) skb->data;
2356 skb_pull(skb, sizeof(*ev));
2358 opcode = __le16_to_cpu(ev->opcode);
2361 case HCI_OP_INQUIRY:
2362 hci_cs_inquiry(hdev, ev->status);
2365 case HCI_OP_CREATE_CONN:
2366 hci_cs_create_conn(hdev, ev->status);
2369 case HCI_OP_ADD_SCO:
2370 hci_cs_add_sco(hdev, ev->status);
2373 case HCI_OP_AUTH_REQUESTED:
2374 hci_cs_auth_requested(hdev, ev->status);
2377 case HCI_OP_SET_CONN_ENCRYPT:
2378 hci_cs_set_conn_encrypt(hdev, ev->status);
2381 case HCI_OP_REMOTE_NAME_REQ:
2382 hci_cs_remote_name_req(hdev, ev->status);
2385 case HCI_OP_READ_REMOTE_FEATURES:
2386 hci_cs_read_remote_features(hdev, ev->status);
2389 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2390 hci_cs_read_remote_ext_features(hdev, ev->status);
2393 case HCI_OP_SETUP_SYNC_CONN:
2394 hci_cs_setup_sync_conn(hdev, ev->status);
2397 case HCI_OP_SNIFF_MODE:
2398 hci_cs_sniff_mode(hdev, ev->status);
2401 case HCI_OP_EXIT_SNIFF_MODE:
2402 hci_cs_exit_sniff_mode(hdev, ev->status);
2405 case HCI_OP_DISCONNECT:
2406 hci_cs_disconnect(hdev, ev->status);
2409 case HCI_OP_LE_CREATE_CONN:
2410 hci_cs_le_create_conn(hdev, ev->status);
2413 case HCI_OP_LE_START_ENC:
2414 hci_cs_le_start_enc(hdev, ev->status);
2418 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2422 if (ev->opcode != HCI_OP_NOP)
2423 del_timer(&hdev->cmd_timer);
2425 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2426 atomic_set(&hdev->cmd_cnt, 1);
2427 if (!skb_queue_empty(&hdev->cmd_q))
2428 queue_work(hdev->workqueue, &hdev->cmd_work);
2432 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2434 struct hci_ev_role_change *ev = (void *) skb->data;
2435 struct hci_conn *conn;
2437 BT_DBG("%s status %d", hdev->name, ev->status);
2441 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2445 conn->link_mode &= ~HCI_LM_MASTER;
2447 conn->link_mode |= HCI_LM_MASTER;
2450 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2452 hci_role_switch_cfm(conn, ev->status, ev->role);
2455 hci_dev_unlock(hdev);
2458 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2460 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2463 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2464 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2468 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2469 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2470 BT_DBG("%s bad parameters", hdev->name);
2474 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2476 for (i = 0; i < ev->num_hndl; i++) {
2477 struct hci_comp_pkts_info *info = &ev->handles[i];
2478 struct hci_conn *conn;
2479 __u16 handle, count;
2481 handle = __le16_to_cpu(info->handle);
2482 count = __le16_to_cpu(info->count);
2484 conn = hci_conn_hash_lookup_handle(hdev, handle);
2488 conn->sent -= count;
2490 switch (conn->type) {
2492 hdev->acl_cnt += count;
2493 if (hdev->acl_cnt > hdev->acl_pkts)
2494 hdev->acl_cnt = hdev->acl_pkts;
2498 if (hdev->le_pkts) {
2499 hdev->le_cnt += count;
2500 if (hdev->le_cnt > hdev->le_pkts)
2501 hdev->le_cnt = hdev->le_pkts;
2503 hdev->acl_cnt += count;
2504 if (hdev->acl_cnt > hdev->acl_pkts)
2505 hdev->acl_cnt = hdev->acl_pkts;
2510 hdev->sco_cnt += count;
2511 if (hdev->sco_cnt > hdev->sco_pkts)
2512 hdev->sco_cnt = hdev->sco_pkts;
2516 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2521 queue_work(hdev->workqueue, &hdev->tx_work);
2524 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2525 struct sk_buff *skb)
2527 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2530 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2531 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2535 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2536 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2537 BT_DBG("%s bad parameters", hdev->name);
2541 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2544 for (i = 0; i < ev->num_hndl; i++) {
2545 struct hci_comp_blocks_info *info = &ev->handles[i];
2546 struct hci_conn *conn;
2547 __u16 handle, block_count;
2549 handle = __le16_to_cpu(info->handle);
2550 block_count = __le16_to_cpu(info->blocks);
2552 conn = hci_conn_hash_lookup_handle(hdev, handle);
2556 conn->sent -= block_count;
2558 switch (conn->type) {
2560 hdev->block_cnt += block_count;
2561 if (hdev->block_cnt > hdev->num_blocks)
2562 hdev->block_cnt = hdev->num_blocks;
2566 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2571 queue_work(hdev->workqueue, &hdev->tx_work);
2574 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2576 struct hci_ev_mode_change *ev = (void *) skb->data;
2577 struct hci_conn *conn;
2579 BT_DBG("%s status %d", hdev->name, ev->status);
2583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2585 conn->mode = ev->mode;
2586 conn->interval = __le16_to_cpu(ev->interval);
2588 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2589 if (conn->mode == HCI_CM_ACTIVE)
2590 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2592 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2595 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2596 hci_sco_setup(conn, ev->status);
2599 hci_dev_unlock(hdev);
2602 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2604 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2605 struct hci_conn *conn;
2607 BT_DBG("%s", hdev->name);
2611 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2615 if (conn->state == BT_CONNECTED) {
2616 hci_conn_hold(conn);
2617 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2621 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2622 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2623 sizeof(ev->bdaddr), &ev->bdaddr);
2624 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2627 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2632 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2636 hci_dev_unlock(hdev);
2639 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2641 struct hci_ev_link_key_req *ev = (void *) skb->data;
2642 struct hci_cp_link_key_reply cp;
2643 struct hci_conn *conn;
2644 struct link_key *key;
2646 BT_DBG("%s", hdev->name);
2648 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2653 key = hci_find_link_key(hdev, &ev->bdaddr);
2655 BT_DBG("%s link key not found for %s", hdev->name,
2656 batostr(&ev->bdaddr));
2660 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2661 batostr(&ev->bdaddr));
2663 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2664 key->type == HCI_LK_DEBUG_COMBINATION) {
2665 BT_DBG("%s ignoring debug key", hdev->name);
2669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2671 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2672 conn->auth_type != 0xff &&
2673 (conn->auth_type & 0x01)) {
2674 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2678 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2679 conn->pending_sec_level == BT_SECURITY_HIGH) {
2680 BT_DBG("%s ignoring key unauthenticated for high \
2681 security", hdev->name);
2685 conn->key_type = key->type;
2686 conn->pin_length = key->pin_len;
2689 bacpy(&cp.bdaddr, &ev->bdaddr);
2690 memcpy(cp.link_key, key->val, 16);
2692 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2694 hci_dev_unlock(hdev);
2699 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2700 hci_dev_unlock(hdev);
2703 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2705 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2706 struct hci_conn *conn;
2709 BT_DBG("%s", hdev->name);
2713 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2715 hci_conn_hold(conn);
2716 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2717 pin_len = conn->pin_length;
2719 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2720 conn->key_type = ev->key_type;
2725 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2726 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2727 ev->key_type, pin_len);
2729 hci_dev_unlock(hdev);
2732 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2734 struct hci_ev_clock_offset *ev = (void *) skb->data;
2735 struct hci_conn *conn;
2737 BT_DBG("%s status %d", hdev->name, ev->status);
2741 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2742 if (conn && !ev->status) {
2743 struct inquiry_entry *ie;
2745 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2747 ie->data.clock_offset = ev->clock_offset;
2748 ie->timestamp = jiffies;
2752 hci_dev_unlock(hdev);
2755 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2757 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2758 struct hci_conn *conn;
2760 BT_DBG("%s status %d", hdev->name, ev->status);
2764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2765 if (conn && !ev->status)
2766 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2768 hci_dev_unlock(hdev);
2771 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2773 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2774 struct inquiry_entry *ie;
2776 BT_DBG("%s", hdev->name);
2780 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2782 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2783 ie->timestamp = jiffies;
2786 hci_dev_unlock(hdev);
2789 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2791 struct inquiry_data data;
2792 int num_rsp = *((__u8 *) skb->data);
2793 bool name_known, ssp;
2795 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2802 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2803 struct inquiry_info_with_rssi_and_pscan_mode *info;
2804 info = (void *) (skb->data + 1);
2806 for (; num_rsp; num_rsp--, info++) {
2807 bacpy(&data.bdaddr, &info->bdaddr);
2808 data.pscan_rep_mode = info->pscan_rep_mode;
2809 data.pscan_period_mode = info->pscan_period_mode;
2810 data.pscan_mode = info->pscan_mode;
2811 memcpy(data.dev_class, info->dev_class, 3);
2812 data.clock_offset = info->clock_offset;
2813 data.rssi = info->rssi;
2814 data.ssp_mode = 0x00;
2816 name_known = hci_inquiry_cache_update(hdev, &data,
2818 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2819 info->dev_class, info->rssi,
2820 !name_known, ssp, NULL, 0);
2823 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2825 for (; num_rsp; num_rsp--, info++) {
2826 bacpy(&data.bdaddr, &info->bdaddr);
2827 data.pscan_rep_mode = info->pscan_rep_mode;
2828 data.pscan_period_mode = info->pscan_period_mode;
2829 data.pscan_mode = 0x00;
2830 memcpy(data.dev_class, info->dev_class, 3);
2831 data.clock_offset = info->clock_offset;
2832 data.rssi = info->rssi;
2833 data.ssp_mode = 0x00;
2834 name_known = hci_inquiry_cache_update(hdev, &data,
2836 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2837 info->dev_class, info->rssi,
2838 !name_known, ssp, NULL, 0);
2842 hci_dev_unlock(hdev);
2845 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2847 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2848 struct hci_conn *conn;
2850 BT_DBG("%s", hdev->name);
2854 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2858 if (!ev->status && ev->page == 0x01) {
2859 struct inquiry_entry *ie;
2861 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2863 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2865 if (ev->features[0] & LMP_HOST_SSP)
2866 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2869 if (conn->state != BT_CONFIG)
2873 struct hci_cp_remote_name_req cp;
2874 memset(&cp, 0, sizeof(cp));
2875 bacpy(&cp.bdaddr, &conn->dst);
2876 cp.pscan_rep_mode = 0x02;
2877 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2878 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2879 mgmt_device_connected(hdev, &conn->dst, conn->type,
2880 conn->dst_type, 0, NULL, 0,
2883 if (!hci_outgoing_auth_needed(hdev, conn)) {
2884 conn->state = BT_CONNECTED;
2885 hci_proto_connect_cfm(conn, ev->status);
2890 hci_dev_unlock(hdev);
2893 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2895 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2896 struct hci_conn *conn;
2898 BT_DBG("%s status %d", hdev->name, ev->status);
2902 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2904 if (ev->link_type == ESCO_LINK)
2907 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2911 conn->type = SCO_LINK;
2914 switch (ev->status) {
2916 conn->handle = __le16_to_cpu(ev->handle);
2917 conn->state = BT_CONNECTED;
2919 hci_conn_hold_device(conn);
2920 hci_conn_add_sysfs(conn);
2923 case 0x11: /* Unsupported Feature or Parameter Value */
2924 case 0x1c: /* SCO interval rejected */
2925 case 0x1a: /* Unsupported Remote Feature */
2926 case 0x1f: /* Unspecified error */
2927 if (conn->out && conn->attempt < 2) {
2928 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2929 (hdev->esco_type & EDR_ESCO_MASK);
2930 hci_setup_sync(conn, conn->link->handle);
2936 conn->state = BT_CLOSED;
2940 hci_proto_connect_cfm(conn, ev->status);
2945 hci_dev_unlock(hdev);
2948 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2950 BT_DBG("%s", hdev->name);
2953 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2955 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2957 BT_DBG("%s status %d", hdev->name, ev->status);
2960 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2962 struct inquiry_data data;
2963 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2964 int num_rsp = *((__u8 *) skb->data);
2966 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2973 for (; num_rsp; num_rsp--, info++) {
2974 bool name_known, ssp;
2976 bacpy(&data.bdaddr, &info->bdaddr);
2977 data.pscan_rep_mode = info->pscan_rep_mode;
2978 data.pscan_period_mode = info->pscan_period_mode;
2979 data.pscan_mode = 0x00;
2980 memcpy(data.dev_class, info->dev_class, 3);
2981 data.clock_offset = info->clock_offset;
2982 data.rssi = info->rssi;
2983 data.ssp_mode = 0x01;
2985 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2986 name_known = eir_has_data_type(info->data,
2992 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2994 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2995 info->dev_class, info->rssi, !name_known,
2996 ssp, info->data, sizeof(info->data));
2999 hci_dev_unlock(hdev);
3002 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3004 /* If remote requests dedicated bonding follow that lead */
3005 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3006 /* If both remote and local IO capabilities allow MITM
3007 * protection then require it, otherwise don't */
3008 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3014 /* If remote requests no-bonding follow that lead */
3015 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3016 return conn->remote_auth | (conn->auth_type & 0x01);
3018 return conn->auth_type;
3021 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3023 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3024 struct hci_conn *conn;
3026 BT_DBG("%s", hdev->name);
3030 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3034 hci_conn_hold(conn);
3036 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3039 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3040 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3041 struct hci_cp_io_capability_reply cp;
3043 bacpy(&cp.bdaddr, &ev->bdaddr);
3044 /* Change the IO capability from KeyboardDisplay
3045 * to DisplayYesNo as it is not supported by BT spec. */
3046 cp.capability = (conn->io_capability == 0x04) ?
3047 0x01 : conn->io_capability;
3048 conn->auth_type = hci_get_auth_req(conn);
3049 cp.authentication = conn->auth_type;
3051 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3052 hci_find_remote_oob_data(hdev, &conn->dst))
3057 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3060 struct hci_cp_io_capability_neg_reply cp;
3062 bacpy(&cp.bdaddr, &ev->bdaddr);
3063 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3065 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3070 hci_dev_unlock(hdev);
3073 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3075 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3076 struct hci_conn *conn;
3078 BT_DBG("%s", hdev->name);
3082 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3086 conn->remote_cap = ev->capability;
3087 conn->remote_auth = ev->authentication;
3089 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3092 hci_dev_unlock(hdev);
3095 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3096 struct sk_buff *skb)
3098 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3099 int loc_mitm, rem_mitm, confirm_hint = 0;
3100 struct hci_conn *conn;
3102 BT_DBG("%s", hdev->name);
3106 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3113 loc_mitm = (conn->auth_type & 0x01);
3114 rem_mitm = (conn->remote_auth & 0x01);
3116 /* If we require MITM but the remote device can't provide that
3117 * (it has NoInputNoOutput) then reject the confirmation
3118 * request. The only exception is when we're dedicated bonding
3119 * initiators (connect_cfm_cb set) since then we always have the MITM
3121 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3122 BT_DBG("Rejecting request: remote device can't provide MITM");
3123 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3124 sizeof(ev->bdaddr), &ev->bdaddr);
3128 /* If no side requires MITM protection; auto-accept */
3129 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3130 (!rem_mitm || conn->io_capability == 0x03)) {
3132 /* If we're not the initiators request authorization to
3133 * proceed from user space (mgmt_user_confirm with
3134 * confirm_hint set to 1). */
3135 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3136 BT_DBG("Confirming auto-accept as acceptor");
3141 BT_DBG("Auto-accept of user confirmation with %ums delay",
3142 hdev->auto_accept_delay);
3144 if (hdev->auto_accept_delay > 0) {
3145 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3146 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3150 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3151 sizeof(ev->bdaddr), &ev->bdaddr);
3156 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3160 hci_dev_unlock(hdev);
3163 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3164 struct sk_buff *skb)
3166 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3168 BT_DBG("%s", hdev->name);
3172 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3173 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3175 hci_dev_unlock(hdev);
3178 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3180 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3181 struct hci_conn *conn;
3183 BT_DBG("%s", hdev->name);
3187 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3191 /* To avoid duplicate auth_failed events to user space we check
3192 * the HCI_CONN_AUTH_PEND flag which will be set if we
3193 * initiated the authentication. A traditional auth_complete
3194 * event gets always produced as initiator and is also mapped to
3195 * the mgmt_auth_failed event */
3196 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3197 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3203 hci_dev_unlock(hdev);
3206 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3208 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3209 struct inquiry_entry *ie;
3211 BT_DBG("%s", hdev->name);
3215 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3217 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3219 hci_dev_unlock(hdev);
3222 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3223 struct sk_buff *skb)
3225 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3226 struct oob_data *data;
3228 BT_DBG("%s", hdev->name);
3232 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3235 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3237 struct hci_cp_remote_oob_data_reply cp;
3239 bacpy(&cp.bdaddr, &ev->bdaddr);
3240 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3241 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3243 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3246 struct hci_cp_remote_oob_data_neg_reply cp;
3248 bacpy(&cp.bdaddr, &ev->bdaddr);
3249 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3254 hci_dev_unlock(hdev);
3257 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3259 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3260 struct hci_conn *conn;
3262 BT_DBG("%s status %d", hdev->name, ev->status);
3266 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3268 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3270 BT_ERR("No memory for new connection");
3271 hci_dev_unlock(hdev);
3275 conn->dst_type = ev->bdaddr_type;
3279 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3280 conn->dst_type, ev->status);
3281 hci_proto_connect_cfm(conn, ev->status);
3282 conn->state = BT_CLOSED;
3287 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3288 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3289 conn->dst_type, 0, NULL, 0, NULL);
3291 conn->sec_level = BT_SECURITY_LOW;
3292 conn->handle = __le16_to_cpu(ev->handle);
3293 conn->state = BT_CONNECTED;
3295 hci_conn_hold_device(conn);
3296 hci_conn_add_sysfs(conn);
3298 hci_proto_connect_cfm(conn, ev->status);
3301 hci_dev_unlock(hdev);
3304 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3305 struct sk_buff *skb)
3307 u8 num_reports = skb->data[0];
3308 void *ptr = &skb->data[1];
3313 while (num_reports--) {
3314 struct hci_ev_le_advertising_info *ev = ptr;
3316 hci_add_adv_entry(hdev, ev);
3318 rssi = ev->data[ev->length];
3319 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3320 NULL, rssi, 0, 1, ev->data, ev->length);
3322 ptr += sizeof(*ev) + ev->length + 1;
3325 hci_dev_unlock(hdev);
3328 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3329 struct sk_buff *skb)
3331 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3332 struct hci_cp_le_ltk_reply cp;
3333 struct hci_cp_le_ltk_neg_reply neg;
3334 struct hci_conn *conn;
3335 struct smp_ltk *ltk;
3337 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3345 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3349 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3350 cp.handle = cpu_to_le16(conn->handle);
3352 if (ltk->authenticated)
3353 conn->sec_level = BT_SECURITY_HIGH;
3355 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3357 if (ltk->type & HCI_SMP_STK) {
3358 list_del(<k->list);
3362 hci_dev_unlock(hdev);
3367 neg.handle = ev->handle;
3368 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3369 hci_dev_unlock(hdev);
3372 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3374 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3376 skb_pull(skb, sizeof(*le_ev));
3378 switch (le_ev->subevent) {
3379 case HCI_EV_LE_CONN_COMPLETE:
3380 hci_le_conn_complete_evt(hdev, skb);
3383 case HCI_EV_LE_ADVERTISING_REPORT:
3384 hci_le_adv_report_evt(hdev, skb);
3387 case HCI_EV_LE_LTK_REQ:
3388 hci_le_ltk_request_evt(hdev, skb);
3396 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3398 struct hci_event_hdr *hdr = (void *) skb->data;
3399 __u8 event = hdr->evt;
3401 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3404 case HCI_EV_INQUIRY_COMPLETE:
3405 hci_inquiry_complete_evt(hdev, skb);
3408 case HCI_EV_INQUIRY_RESULT:
3409 hci_inquiry_result_evt(hdev, skb);
3412 case HCI_EV_CONN_COMPLETE:
3413 hci_conn_complete_evt(hdev, skb);
3416 case HCI_EV_CONN_REQUEST:
3417 hci_conn_request_evt(hdev, skb);
3420 case HCI_EV_DISCONN_COMPLETE:
3421 hci_disconn_complete_evt(hdev, skb);
3424 case HCI_EV_AUTH_COMPLETE:
3425 hci_auth_complete_evt(hdev, skb);
3428 case HCI_EV_REMOTE_NAME:
3429 hci_remote_name_evt(hdev, skb);
3432 case HCI_EV_ENCRYPT_CHANGE:
3433 hci_encrypt_change_evt(hdev, skb);
3436 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3437 hci_change_link_key_complete_evt(hdev, skb);
3440 case HCI_EV_REMOTE_FEATURES:
3441 hci_remote_features_evt(hdev, skb);
3444 case HCI_EV_REMOTE_VERSION:
3445 hci_remote_version_evt(hdev, skb);
3448 case HCI_EV_QOS_SETUP_COMPLETE:
3449 hci_qos_setup_complete_evt(hdev, skb);
3452 case HCI_EV_CMD_COMPLETE:
3453 hci_cmd_complete_evt(hdev, skb);
3456 case HCI_EV_CMD_STATUS:
3457 hci_cmd_status_evt(hdev, skb);
3460 case HCI_EV_ROLE_CHANGE:
3461 hci_role_change_evt(hdev, skb);
3464 case HCI_EV_NUM_COMP_PKTS:
3465 hci_num_comp_pkts_evt(hdev, skb);
3468 case HCI_EV_MODE_CHANGE:
3469 hci_mode_change_evt(hdev, skb);
3472 case HCI_EV_PIN_CODE_REQ:
3473 hci_pin_code_request_evt(hdev, skb);
3476 case HCI_EV_LINK_KEY_REQ:
3477 hci_link_key_request_evt(hdev, skb);
3480 case HCI_EV_LINK_KEY_NOTIFY:
3481 hci_link_key_notify_evt(hdev, skb);
3484 case HCI_EV_CLOCK_OFFSET:
3485 hci_clock_offset_evt(hdev, skb);
3488 case HCI_EV_PKT_TYPE_CHANGE:
3489 hci_pkt_type_change_evt(hdev, skb);
3492 case HCI_EV_PSCAN_REP_MODE:
3493 hci_pscan_rep_mode_evt(hdev, skb);
3496 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3497 hci_inquiry_result_with_rssi_evt(hdev, skb);
3500 case HCI_EV_REMOTE_EXT_FEATURES:
3501 hci_remote_ext_features_evt(hdev, skb);
3504 case HCI_EV_SYNC_CONN_COMPLETE:
3505 hci_sync_conn_complete_evt(hdev, skb);
3508 case HCI_EV_SYNC_CONN_CHANGED:
3509 hci_sync_conn_changed_evt(hdev, skb);
3512 case HCI_EV_SNIFF_SUBRATE:
3513 hci_sniff_subrate_evt(hdev, skb);
3516 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3517 hci_extended_inquiry_result_evt(hdev, skb);
3520 case HCI_EV_IO_CAPA_REQUEST:
3521 hci_io_capa_request_evt(hdev, skb);
3524 case HCI_EV_IO_CAPA_REPLY:
3525 hci_io_capa_reply_evt(hdev, skb);
3528 case HCI_EV_USER_CONFIRM_REQUEST:
3529 hci_user_confirm_request_evt(hdev, skb);
3532 case HCI_EV_USER_PASSKEY_REQUEST:
3533 hci_user_passkey_request_evt(hdev, skb);
3536 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3537 hci_simple_pair_complete_evt(hdev, skb);
3540 case HCI_EV_REMOTE_HOST_FEATURES:
3541 hci_remote_host_features_evt(hdev, skb);
3544 case HCI_EV_LE_META:
3545 hci_le_meta_evt(hdev, skb);
3548 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3549 hci_remote_oob_data_request_evt(hdev, skb);
3552 case HCI_EV_NUM_COMP_BLOCKS:
3553 hci_num_comp_blocks_evt(hdev, skb);
3557 BT_DBG("%s event 0x%x", hdev->name, event);
3562 hdev->stat.evt_rx++;