2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/export.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
41 static const struct sco_param sco_param_cvsd[] = {
42 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
45 { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
46 { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
49 static const struct sco_param sco_param_wideband[] = {
50 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
56 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
59 static void hci_acl_create_connection(struct hci_conn *conn)
61 struct hci_dev *hdev = conn->hdev;
62 struct inquiry_entry *ie;
63 struct hci_cp_create_conn cp;
65 BT_DBG("hcon %p", conn);
67 conn->state = BT_CONNECT;
70 conn->link_mode = HCI_LM_MASTER;
74 conn->link_policy = hdev->link_policy;
76 memset(&cp, 0, sizeof(cp));
77 bacpy(&cp.bdaddr, &conn->dst);
78 cp.pscan_rep_mode = 0x02;
80 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
82 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
83 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
84 cp.pscan_mode = ie->data.pscan_mode;
85 cp.clock_offset = ie->data.clock_offset |
89 memcpy(conn->dev_class, ie->data.dev_class, 3);
90 if (ie->data.ssp_mode > 0)
91 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
94 cp.pkt_type = cpu_to_le16(conn->pkt_type);
95 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
96 cp.role_switch = 0x01;
98 cp.role_switch = 0x00;
100 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
103 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
105 struct hci_cp_create_conn_cancel cp;
107 BT_DBG("hcon %p", conn);
109 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
112 bacpy(&cp.bdaddr, &conn->dst);
113 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
116 static void hci_reject_sco(struct hci_conn *conn)
118 struct hci_cp_reject_sync_conn_req cp;
120 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
121 bacpy(&cp.bdaddr, &conn->dst);
123 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
126 void hci_disconnect(struct hci_conn *conn, __u8 reason)
128 struct hci_cp_disconnect cp;
130 BT_DBG("hcon %p", conn);
132 conn->state = BT_DISCONN;
134 cp.handle = cpu_to_le16(conn->handle);
136 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
139 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
141 struct hci_cp_disconn_phy_link cp;
143 BT_DBG("hcon %p", conn);
145 conn->state = BT_DISCONN;
147 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
149 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
153 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
155 struct hci_dev *hdev = conn->hdev;
156 struct hci_cp_add_sco cp;
158 BT_DBG("hcon %p", conn);
160 conn->state = BT_CONNECT;
165 cp.handle = cpu_to_le16(handle);
166 cp.pkt_type = cpu_to_le16(conn->pkt_type);
168 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
171 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
173 struct hci_dev *hdev = conn->hdev;
174 struct hci_cp_setup_sync_conn cp;
175 const struct sco_param *param;
177 BT_DBG("hcon %p", conn);
179 conn->state = BT_CONNECT;
184 cp.handle = cpu_to_le16(handle);
186 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
187 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
188 cp.voice_setting = cpu_to_le16(conn->setting);
190 switch (conn->setting & SCO_AIRMODE_MASK) {
191 case SCO_AIRMODE_TRANSP:
192 if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
194 cp.retrans_effort = 0x02;
195 param = &sco_param_wideband[conn->attempt - 1];
197 case SCO_AIRMODE_CVSD:
198 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
200 cp.retrans_effort = 0x01;
201 param = &sco_param_cvsd[conn->attempt - 1];
207 cp.pkt_type = __cpu_to_le16(param->pkt_type);
208 cp.max_latency = __cpu_to_le16(param->max_latency);
210 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
216 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
217 u16 latency, u16 to_multiplier)
219 struct hci_cp_le_conn_update cp;
220 struct hci_dev *hdev = conn->hdev;
222 memset(&cp, 0, sizeof(cp));
224 cp.handle = cpu_to_le16(conn->handle);
225 cp.conn_interval_min = cpu_to_le16(min);
226 cp.conn_interval_max = cpu_to_le16(max);
227 cp.conn_latency = cpu_to_le16(latency);
228 cp.supervision_timeout = cpu_to_le16(to_multiplier);
229 cp.min_ce_len = cpu_to_le16(0x0000);
230 cp.max_ce_len = cpu_to_le16(0x0000);
232 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
235 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
238 struct hci_dev *hdev = conn->hdev;
239 struct hci_cp_le_start_enc cp;
241 BT_DBG("hcon %p", conn);
243 memset(&cp, 0, sizeof(cp));
245 cp.handle = cpu_to_le16(conn->handle);
248 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
250 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
253 /* Device _must_ be locked */
254 void hci_sco_setup(struct hci_conn *conn, __u8 status)
256 struct hci_conn *sco = conn->link;
261 BT_DBG("hcon %p", conn);
264 if (lmp_esco_capable(conn->hdev))
265 hci_setup_sync(sco, conn->handle);
267 hci_add_sco(sco, conn->handle);
269 hci_proto_connect_cfm(sco, status);
274 static void hci_conn_disconnect(struct hci_conn *conn)
276 __u8 reason = hci_proto_disconn_ind(conn);
278 switch (conn->type) {
280 hci_amp_disconn(conn, reason);
283 hci_disconnect(conn, reason);
288 static void hci_conn_timeout(struct work_struct *work)
290 struct hci_conn *conn = container_of(work, struct hci_conn,
293 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
295 if (atomic_read(&conn->refcnt))
298 switch (conn->state) {
302 if (conn->type == ACL_LINK)
303 hci_acl_create_connection_cancel(conn);
304 else if (conn->type == LE_LINK)
305 hci_le_create_connection_cancel(conn);
306 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
307 hci_reject_sco(conn);
312 hci_conn_disconnect(conn);
315 conn->state = BT_CLOSED;
320 /* Enter sniff mode */
321 static void hci_conn_idle(struct work_struct *work)
323 struct hci_conn *conn = container_of(work, struct hci_conn,
325 struct hci_dev *hdev = conn->hdev;
327 BT_DBG("hcon %p mode %d", conn, conn->mode);
329 if (test_bit(HCI_RAW, &hdev->flags))
332 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
335 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
338 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
339 struct hci_cp_sniff_subrate cp;
340 cp.handle = cpu_to_le16(conn->handle);
341 cp.max_latency = cpu_to_le16(0);
342 cp.min_remote_timeout = cpu_to_le16(0);
343 cp.min_local_timeout = cpu_to_le16(0);
344 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
347 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
348 struct hci_cp_sniff_mode cp;
349 cp.handle = cpu_to_le16(conn->handle);
350 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
351 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
352 cp.attempt = cpu_to_le16(4);
353 cp.timeout = cpu_to_le16(1);
354 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
358 static void hci_conn_auto_accept(struct work_struct *work)
360 struct hci_conn *conn = container_of(work, struct hci_conn,
361 auto_accept_work.work);
363 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
367 static void le_conn_timeout(struct work_struct *work)
369 struct hci_conn *conn = container_of(work, struct hci_conn,
370 le_conn_timeout.work);
371 struct hci_dev *hdev = conn->hdev;
375 /* We could end up here due to having done directed advertising,
376 * so clean up the state if necessary. This should however only
377 * happen with broken hardware or if low duty cycle was used
378 * (which doesn't have a timeout of its own).
380 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
382 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
384 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
388 hci_le_create_connection_cancel(conn);
391 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
393 struct hci_conn *conn;
395 BT_DBG("%s dst %pMR", hdev->name, dst);
397 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
401 bacpy(&conn->dst, dst);
402 bacpy(&conn->src, &hdev->bdaddr);
405 conn->mode = HCI_CM_ACTIVE;
406 conn->state = BT_OPEN;
407 conn->auth_type = HCI_AT_GENERAL_BONDING;
408 conn->io_capability = hdev->io_capability;
409 conn->remote_auth = 0xff;
410 conn->key_type = 0xff;
411 conn->tx_power = HCI_TX_POWER_INVALID;
412 conn->max_tx_power = HCI_TX_POWER_INVALID;
414 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
415 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
419 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
422 /* conn->src should reflect the local identity address */
423 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
426 if (lmp_esco_capable(hdev))
427 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
428 (hdev->esco_type & EDR_ESCO_MASK);
430 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
433 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
437 skb_queue_head_init(&conn->data_q);
439 INIT_LIST_HEAD(&conn->chan_list);
441 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
442 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
443 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
444 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
446 atomic_set(&conn->refcnt, 0);
450 hci_conn_hash_add(hdev, conn);
452 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
454 hci_conn_init_sysfs(conn);
459 int hci_conn_del(struct hci_conn *conn)
461 struct hci_dev *hdev = conn->hdev;
463 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
465 cancel_delayed_work_sync(&conn->disc_work);
466 cancel_delayed_work_sync(&conn->auto_accept_work);
467 cancel_delayed_work_sync(&conn->idle_work);
469 if (conn->type == ACL_LINK) {
470 struct hci_conn *sco = conn->link;
475 hdev->acl_cnt += conn->sent;
476 } else if (conn->type == LE_LINK) {
477 cancel_delayed_work_sync(&conn->le_conn_timeout);
480 hdev->le_cnt += conn->sent;
482 hdev->acl_cnt += conn->sent;
484 struct hci_conn *acl = conn->link;
491 hci_chan_list_flush(conn);
494 amp_mgr_put(conn->amp_mgr);
496 hci_conn_hash_del(hdev, conn);
498 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
500 skb_queue_purge(&conn->data_q);
502 hci_conn_del_sysfs(conn);
511 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
513 int use_src = bacmp(src, BDADDR_ANY);
514 struct hci_dev *hdev = NULL, *d;
516 BT_DBG("%pMR -> %pMR", src, dst);
518 read_lock(&hci_dev_list_lock);
520 list_for_each_entry(d, &hci_dev_list, list) {
521 if (!test_bit(HCI_UP, &d->flags) ||
522 test_bit(HCI_RAW, &d->flags) ||
523 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
524 d->dev_type != HCI_BREDR)
528 * No source address - find interface with bdaddr != dst
529 * Source address - find interface with bdaddr == src
533 if (!bacmp(&d->bdaddr, src)) {
537 if (bacmp(&d->bdaddr, dst)) {
544 hdev = hci_dev_hold(hdev);
546 read_unlock(&hci_dev_list_lock);
549 EXPORT_SYMBOL(hci_get_route);
551 /* This function requires the caller holds hdev->lock */
552 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
554 struct hci_dev *hdev = conn->hdev;
556 conn->state = BT_CLOSED;
558 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
561 hci_proto_connect_cfm(conn, status);
565 /* Since we may have temporarily stopped the background scanning in
566 * favor of connection establishment, we should restart it.
568 hci_update_background_scan(hdev);
570 /* Re-enable advertising in case this was a failed connection
571 * attempt as a peripheral.
573 mgmt_reenable_advertising(hdev);
576 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
578 struct hci_conn *conn;
583 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
588 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
592 hci_le_conn_failed(conn, status);
595 hci_dev_unlock(hdev);
598 static void hci_req_add_le_create_conn(struct hci_request *req,
599 struct hci_conn *conn)
601 struct hci_cp_le_create_conn cp;
602 struct hci_dev *hdev = conn->hdev;
605 memset(&cp, 0, sizeof(cp));
607 /* Update random address, but set require_privacy to false so
608 * that we never connect with an unresolvable address.
610 if (hci_update_random_address(req, false, &own_addr_type))
613 /* Save the address type used for this connnection attempt so we able
614 * to retrieve this information if we need it.
616 conn->src_type = own_addr_type;
618 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
619 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
620 bacpy(&cp.peer_addr, &conn->dst);
621 cp.peer_addr_type = conn->dst_type;
622 cp.own_address_type = own_addr_type;
623 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
624 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
625 cp.supervision_timeout = cpu_to_le16(0x002a);
626 cp.min_ce_len = cpu_to_le16(0x0000);
627 cp.max_ce_len = cpu_to_le16(0x0000);
629 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
631 conn->state = BT_CONNECT;
634 static void hci_req_directed_advertising(struct hci_request *req,
635 struct hci_conn *conn)
637 struct hci_dev *hdev = req->hdev;
638 struct hci_cp_le_set_adv_param cp;
643 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
645 /* Clear the HCI_ADVERTISING bit temporarily so that the
646 * hci_update_random_address knows that it's safe to go ahead
647 * and write a new random address. The flag will be set back on
648 * as soon as the SET_ADV_ENABLE HCI command completes.
650 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
652 /* Set require_privacy to false so that the remote device has a
653 * chance of identifying us.
655 if (hci_update_random_address(req, false, &own_addr_type) < 0)
658 memset(&cp, 0, sizeof(cp));
659 cp.type = LE_ADV_DIRECT_IND;
660 cp.own_address_type = own_addr_type;
661 cp.direct_addr_type = conn->dst_type;
662 bacpy(&cp.direct_addr, &conn->dst);
663 cp.channel_map = hdev->le_adv_channel_map;
665 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
668 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
670 conn->state = BT_CONNECT;
673 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
674 u8 dst_type, u8 sec_level, u8 auth_type)
676 struct hci_conn_params *params;
677 struct hci_conn *conn;
679 struct hci_request req;
682 /* Some devices send ATT messages as soon as the physical link is
683 * established. To be able to handle these ATT messages, the user-
684 * space first establishes the connection and then starts the pairing
687 * So if a hci_conn object already exists for the following connection
688 * attempt, we simply update pending_sec_level and auth_type fields
689 * and return the object found.
691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
693 conn->pending_sec_level = sec_level;
694 conn->auth_type = auth_type;
698 /* Since the controller supports only one LE connection attempt at a
699 * time, we return -EBUSY if there is any connection attempt running.
701 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
703 return ERR_PTR(-EBUSY);
705 /* When given an identity address with existing identity
706 * resolving key, the connection needs to be established
707 * to a resolvable random address.
709 * This uses the cached random resolvable address from
710 * a previous scan. When no cached address is available,
711 * try connecting to the identity address instead.
713 * Storing the resolvable random address is required here
714 * to handle connection failures. The address will later
715 * be resolved back into the original identity address
716 * from the connect request.
718 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
719 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
721 dst_type = ADDR_LE_DEV_RANDOM;
724 conn = hci_conn_add(hdev, LE_LINK, dst);
726 return ERR_PTR(-ENOMEM);
728 conn->dst_type = dst_type;
729 conn->sec_level = BT_SECURITY_LOW;
730 conn->pending_sec_level = sec_level;
731 conn->auth_type = auth_type;
733 hci_req_init(&req, hdev);
735 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
736 hci_req_directed_advertising(&req, conn);
741 conn->link_mode |= HCI_LM_MASTER;
743 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
745 conn->le_conn_min_interval = params->conn_min_interval;
746 conn->le_conn_max_interval = params->conn_max_interval;
748 conn->le_conn_min_interval = hdev->le_conn_min_interval;
749 conn->le_conn_max_interval = hdev->le_conn_max_interval;
752 /* If controller is scanning, we stop it since some controllers are
753 * not able to scan and connect at the same time. Also set the
754 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
755 * handler for scan disabling knows to set the correct discovery
758 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
759 hci_req_add_le_scan_disable(&req);
760 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
763 hci_req_add_le_create_conn(&req, conn);
766 err = hci_req_run(&req, create_le_conn_complete);
777 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
778 u8 sec_level, u8 auth_type)
780 struct hci_conn *acl;
782 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
783 return ERR_PTR(-ENOTSUPP);
785 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
787 acl = hci_conn_add(hdev, ACL_LINK, dst);
789 return ERR_PTR(-ENOMEM);
794 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
795 acl->sec_level = BT_SECURITY_LOW;
796 acl->pending_sec_level = sec_level;
797 acl->auth_type = auth_type;
798 hci_acl_create_connection(acl);
804 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
807 struct hci_conn *acl;
808 struct hci_conn *sco;
810 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
814 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
816 sco = hci_conn_add(hdev, type, dst);
819 return ERR_PTR(-ENOMEM);
828 sco->setting = setting;
830 if (acl->state == BT_CONNECTED &&
831 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
832 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
833 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
835 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
836 /* defer SCO setup until mode change completed */
837 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
841 hci_sco_setup(acl, 0x00);
847 /* Check link security requirement */
848 int hci_conn_check_link_mode(struct hci_conn *conn)
850 BT_DBG("hcon %p", conn);
852 /* In Secure Connections Only mode, it is required that Secure
853 * Connections is used and the link is encrypted with AES-CCM
854 * using a P-256 authenticated combination key.
856 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
857 if (!hci_conn_sc_enabled(conn) ||
858 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
859 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
863 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
869 /* Authenticate remote device */
870 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
872 BT_DBG("hcon %p", conn);
874 if (conn->pending_sec_level > sec_level)
875 sec_level = conn->pending_sec_level;
877 if (sec_level > conn->sec_level)
878 conn->pending_sec_level = sec_level;
879 else if (conn->link_mode & HCI_LM_AUTH)
882 /* Make sure we preserve an existing MITM requirement*/
883 auth_type |= (conn->auth_type & 0x01);
885 conn->auth_type = auth_type;
887 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
888 struct hci_cp_auth_requested cp;
890 cp.handle = cpu_to_le16(conn->handle);
891 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
894 /* If we're already encrypted set the REAUTH_PEND flag,
895 * otherwise set the ENCRYPT_PEND.
897 if (conn->key_type != 0xff)
898 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
900 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
906 /* Encrypt the the link */
907 static void hci_conn_encrypt(struct hci_conn *conn)
909 BT_DBG("hcon %p", conn);
911 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
912 struct hci_cp_set_conn_encrypt cp;
913 cp.handle = cpu_to_le16(conn->handle);
915 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
920 /* Enable security */
921 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
923 BT_DBG("hcon %p", conn);
925 if (conn->type == LE_LINK)
926 return smp_conn_security(conn, sec_level);
928 /* For sdp we don't need the link key. */
929 if (sec_level == BT_SECURITY_SDP)
932 /* For non 2.1 devices and low security level we don't need the link
934 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
937 /* For other security levels we need the link key. */
938 if (!(conn->link_mode & HCI_LM_AUTH))
941 /* An authenticated FIPS approved combination key has sufficient
942 * security for security level 4. */
943 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
944 sec_level == BT_SECURITY_FIPS)
947 /* An authenticated combination key has sufficient security for
949 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
950 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
951 sec_level == BT_SECURITY_HIGH)
954 /* An unauthenticated combination key has sufficient security for
955 security level 1 and 2. */
956 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
957 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
958 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
961 /* A combination key has always sufficient security for the security
962 levels 1 or 2. High security level requires the combination key
963 is generated using maximum PIN code length (16).
964 For pre 2.1 units. */
965 if (conn->key_type == HCI_LK_COMBINATION &&
966 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
967 conn->pin_length == 16))
971 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
974 if (!hci_conn_auth(conn, sec_level, auth_type))
978 if (conn->link_mode & HCI_LM_ENCRYPT)
981 hci_conn_encrypt(conn);
984 EXPORT_SYMBOL(hci_conn_security);
986 /* Check secure link requirement */
987 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
989 BT_DBG("hcon %p", conn);
991 /* Accept if non-secure or higher security level is required */
992 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
995 /* Accept if secure or higher security level is already present */
996 if (conn->sec_level == BT_SECURITY_HIGH ||
997 conn->sec_level == BT_SECURITY_FIPS)
1000 /* Reject not secure link */
1003 EXPORT_SYMBOL(hci_conn_check_secure);
1005 /* Change link key */
1006 int hci_conn_change_link_key(struct hci_conn *conn)
1008 BT_DBG("hcon %p", conn);
1010 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1011 struct hci_cp_change_conn_link_key cp;
1012 cp.handle = cpu_to_le16(conn->handle);
1013 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1021 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1023 BT_DBG("hcon %p", conn);
1025 if (!role && conn->link_mode & HCI_LM_MASTER)
1028 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1029 struct hci_cp_switch_role cp;
1030 bacpy(&cp.bdaddr, &conn->dst);
1032 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1037 EXPORT_SYMBOL(hci_conn_switch_role);
1039 /* Enter active mode */
1040 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1042 struct hci_dev *hdev = conn->hdev;
1044 BT_DBG("hcon %p mode %d", conn, conn->mode);
1046 if (test_bit(HCI_RAW, &hdev->flags))
1049 if (conn->mode != HCI_CM_SNIFF)
1052 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1055 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1056 struct hci_cp_exit_sniff_mode cp;
1057 cp.handle = cpu_to_le16(conn->handle);
1058 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1062 if (hdev->idle_timeout > 0)
1063 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1064 msecs_to_jiffies(hdev->idle_timeout));
1067 /* Drop all connection on the device */
1068 void hci_conn_hash_flush(struct hci_dev *hdev)
1070 struct hci_conn_hash *h = &hdev->conn_hash;
1071 struct hci_conn *c, *n;
1073 BT_DBG("hdev %s", hdev->name);
1075 list_for_each_entry_safe(c, n, &h->list, list) {
1076 c->state = BT_CLOSED;
1078 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1083 /* Check pending connect attempts */
1084 void hci_conn_check_pending(struct hci_dev *hdev)
1086 struct hci_conn *conn;
1088 BT_DBG("hdev %s", hdev->name);
1092 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1094 hci_acl_create_connection(conn);
1096 hci_dev_unlock(hdev);
1099 int hci_get_conn_list(void __user *arg)
1102 struct hci_conn_list_req req, *cl;
1103 struct hci_conn_info *ci;
1104 struct hci_dev *hdev;
1105 int n = 0, size, err;
1107 if (copy_from_user(&req, arg, sizeof(req)))
1110 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1113 size = sizeof(req) + req.conn_num * sizeof(*ci);
1115 cl = kmalloc(size, GFP_KERNEL);
1119 hdev = hci_dev_get(req.dev_id);
1128 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1129 bacpy(&(ci + n)->bdaddr, &c->dst);
1130 (ci + n)->handle = c->handle;
1131 (ci + n)->type = c->type;
1132 (ci + n)->out = c->out;
1133 (ci + n)->state = c->state;
1134 (ci + n)->link_mode = c->link_mode;
1135 if (++n >= req.conn_num)
1138 hci_dev_unlock(hdev);
1140 cl->dev_id = hdev->id;
1142 size = sizeof(req) + n * sizeof(*ci);
1146 err = copy_to_user(arg, cl, size);
1149 return err ? -EFAULT : 0;
1152 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1154 struct hci_conn_info_req req;
1155 struct hci_conn_info ci;
1156 struct hci_conn *conn;
1157 char __user *ptr = arg + sizeof(req);
1159 if (copy_from_user(&req, arg, sizeof(req)))
1163 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1165 bacpy(&ci.bdaddr, &conn->dst);
1166 ci.handle = conn->handle;
1167 ci.type = conn->type;
1169 ci.state = conn->state;
1170 ci.link_mode = conn->link_mode;
1172 hci_dev_unlock(hdev);
1177 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1180 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1182 struct hci_auth_info_req req;
1183 struct hci_conn *conn;
1185 if (copy_from_user(&req, arg, sizeof(req)))
1189 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1191 req.type = conn->auth_type;
1192 hci_dev_unlock(hdev);
1197 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1200 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1202 struct hci_dev *hdev = conn->hdev;
1203 struct hci_chan *chan;
1205 BT_DBG("%s hcon %p", hdev->name, conn);
1207 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1212 skb_queue_head_init(&chan->data_q);
1213 chan->state = BT_CONNECTED;
1215 list_add_rcu(&chan->list, &conn->chan_list);
1220 void hci_chan_del(struct hci_chan *chan)
1222 struct hci_conn *conn = chan->conn;
1223 struct hci_dev *hdev = conn->hdev;
1225 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1227 list_del_rcu(&chan->list);
1231 hci_conn_drop(conn);
1233 skb_queue_purge(&chan->data_q);
1237 void hci_chan_list_flush(struct hci_conn *conn)
1239 struct hci_chan *chan, *n;
1241 BT_DBG("hcon %p", conn);
1243 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1247 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1250 struct hci_chan *hchan;
1252 list_for_each_entry(hchan, &hcon->chan_list, list) {
1253 if (hchan->handle == handle)
1260 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1262 struct hci_conn_hash *h = &hdev->conn_hash;
1263 struct hci_conn *hcon;
1264 struct hci_chan *hchan = NULL;
1268 list_for_each_entry_rcu(hcon, &h->list, list) {
1269 hchan = __hci_chan_lookup_handle(hcon, handle);