2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI connection handling. */
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "hci_request.h"
48 struct conn_handle_t {
49 struct hci_conn *conn;
53 static const struct sco_param esco_param_cvsd[] = {
54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
55 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
56 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
57 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
58 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
61 static const struct sco_param sco_param_cvsd[] = {
62 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
63 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
66 static const struct sco_param esco_param_msbc[] = {
67 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
68 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
71 /* This function requires the caller holds hdev->lock */
72 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
74 struct hci_conn_params *params;
75 struct hci_dev *hdev = conn->hdev;
81 bdaddr_type = conn->dst_type;
83 /* Check if we need to convert to identity address */
84 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
86 bdaddr = &irk->bdaddr;
87 bdaddr_type = irk->addr_type;
90 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
96 hci_conn_drop(params->conn);
97 hci_conn_put(params->conn);
101 if (!params->explicit_connect)
104 /* If the status indicates successful cancellation of
105 * the attempt (i.e. Unknown Connection Id) there's no point of
106 * notifying failure since we'll go back to keep trying to
107 * connect. The only exception is explicit connect requests
108 * where a timeout + cancel does indicate an actual failure.
110 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
111 mgmt_connect_failed(hdev, &conn->dst, conn->type,
112 conn->dst_type, status);
114 /* The connection attempt was doing scan for new RPA, and is
115 * in scan phase. If params are not associated with any other
116 * autoconnect action, remove them completely. If they are, just unmark
117 * them as waiting for connection, by clearing explicit_connect field.
119 params->explicit_connect = false;
121 list_del_init(¶ms->action);
123 switch (params->auto_connect) {
124 case HCI_AUTO_CONN_EXPLICIT:
125 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
126 /* return instead of break to avoid duplicate scan update */
128 case HCI_AUTO_CONN_DIRECT:
129 case HCI_AUTO_CONN_ALWAYS:
130 list_add(¶ms->action, &hdev->pend_le_conns);
132 case HCI_AUTO_CONN_REPORT:
133 list_add(¶ms->action, &hdev->pend_le_reports);
139 hci_update_passive_scan(hdev);
142 static void hci_conn_cleanup(struct hci_conn *conn)
144 struct hci_dev *hdev = conn->hdev;
146 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
147 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
149 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
150 hci_remove_link_key(hdev, &conn->dst);
152 hci_chan_list_flush(conn);
154 hci_conn_hash_del(hdev, conn);
159 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
160 switch (conn->setting & SCO_AIRMODE_MASK) {
161 case SCO_AIRMODE_CVSD:
162 case SCO_AIRMODE_TRANSP:
164 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
169 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172 hci_conn_del_sysfs(conn);
174 debugfs_remove_recursive(conn->debugfs);
181 static void le_scan_cleanup(struct work_struct *work)
183 struct hci_conn *conn = container_of(work, struct hci_conn,
185 struct hci_dev *hdev = conn->hdev;
186 struct hci_conn *c = NULL;
188 BT_DBG("%s hcon %p", hdev->name, conn);
192 /* Check that the hci_conn is still around */
194 list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
201 hci_connect_le_scan_cleanup(conn, 0x00);
202 hci_conn_cleanup(conn);
205 hci_dev_unlock(hdev);
210 static void hci_connect_le_scan_remove(struct hci_conn *conn)
212 BT_DBG("%s hcon %p", conn->hdev->name, conn);
214 /* We can't call hci_conn_del/hci_conn_cleanup here since that
215 * could deadlock with another hci_conn_del() call that's holding
216 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
217 * Instead, grab temporary extra references to the hci_dev and
218 * hci_conn and perform the necessary cleanup in a separate work
222 hci_dev_hold(conn->hdev);
225 /* Even though we hold a reference to the hdev, many other
226 * things might get cleaned up meanwhile, including the hdev's
227 * own workqueue, so we can't use that for scheduling.
229 schedule_work(&conn->le_scan_cleanup);
232 static void hci_acl_create_connection(struct hci_conn *conn)
234 struct hci_dev *hdev = conn->hdev;
235 struct inquiry_entry *ie;
236 struct hci_cp_create_conn cp;
238 BT_DBG("hcon %p", conn);
240 /* Many controllers disallow HCI Create Connection while it is doing
241 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
242 * Connection. This may cause the MGMT discovering state to become false
243 * without user space's request but it is okay since the MGMT Discovery
244 * APIs do not promise that discovery should be done forever. Instead,
245 * the user space monitors the status of MGMT discovering and it may
246 * request for discovery again when this flag becomes false.
248 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
249 /* Put this connection to "pending" state so that it will be
250 * executed after the inquiry cancel command complete event.
252 conn->state = BT_CONNECT2;
253 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
257 conn->state = BT_CONNECT;
259 conn->role = HCI_ROLE_MASTER;
263 conn->link_policy = hdev->link_policy;
265 memset(&cp, 0, sizeof(cp));
266 bacpy(&cp.bdaddr, &conn->dst);
267 cp.pscan_rep_mode = 0x02;
269 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
271 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
272 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
273 cp.pscan_mode = ie->data.pscan_mode;
274 cp.clock_offset = ie->data.clock_offset |
278 memcpy(conn->dev_class, ie->data.dev_class, 3);
281 cp.pkt_type = cpu_to_le16(conn->pkt_type);
282 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
283 cp.role_switch = 0x01;
285 cp.role_switch = 0x00;
287 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
290 int hci_disconnect(struct hci_conn *conn, __u8 reason)
292 BT_DBG("hcon %p", conn);
294 /* When we are central of an established connection and it enters
295 * the disconnect timeout, then go ahead and try to read the
296 * current clock offset. Processing of the result is done
297 * within the event handling and hci_clock_offset_evt function.
299 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
300 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
301 struct hci_dev *hdev = conn->hdev;
302 struct hci_cp_read_clock_offset clkoff_cp;
304 clkoff_cp.handle = cpu_to_le16(conn->handle);
305 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
309 return hci_abort_conn(conn, reason);
312 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
314 struct hci_dev *hdev = conn->hdev;
315 struct hci_cp_add_sco cp;
317 BT_DBG("hcon %p", conn);
319 conn->state = BT_CONNECT;
324 cp.handle = cpu_to_le16(handle);
325 cp.pkt_type = cpu_to_le16(conn->pkt_type);
327 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
330 static bool find_next_esco_param(struct hci_conn *conn,
331 const struct sco_param *esco_param, int size)
336 for (; conn->attempt <= size; conn->attempt++) {
337 if (lmp_esco_2m_capable(conn->parent) ||
338 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
340 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
341 conn, conn->attempt);
344 return conn->attempt <= size;
347 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
350 __u8 vnd_len, *vnd_data = NULL;
351 struct hci_op_configure_data_path *cmd = NULL;
353 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
358 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
364 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
368 cmd->vnd_len = vnd_len;
369 memcpy(cmd->vnd_data, vnd_data, vnd_len);
371 cmd->direction = 0x00;
372 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
373 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
375 cmd->direction = 0x01;
376 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
377 sizeof(*cmd) + vnd_len, cmd,
386 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
388 struct conn_handle_t *conn_handle = data;
389 struct hci_conn *conn = conn_handle->conn;
390 __u16 handle = conn_handle->handle;
391 struct hci_cp_enhanced_setup_sync_conn cp;
392 const struct sco_param *param;
396 bt_dev_dbg(hdev, "hcon %p", conn);
398 /* for offload use case, codec needs to configured before opening SCO */
399 if (conn->codec.data_path)
400 configure_datapath_sync(hdev, &conn->codec);
402 conn->state = BT_CONNECT;
407 memset(&cp, 0x00, sizeof(cp));
409 cp.handle = cpu_to_le16(handle);
411 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
412 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
414 switch (conn->codec.id) {
416 if (!find_next_esco_param(conn, esco_param_msbc,
417 ARRAY_SIZE(esco_param_msbc)))
420 param = &esco_param_msbc[conn->attempt - 1];
421 cp.tx_coding_format.id = 0x05;
422 cp.rx_coding_format.id = 0x05;
423 cp.tx_codec_frame_size = __cpu_to_le16(60);
424 cp.rx_codec_frame_size = __cpu_to_le16(60);
425 cp.in_bandwidth = __cpu_to_le32(32000);
426 cp.out_bandwidth = __cpu_to_le32(32000);
427 cp.in_coding_format.id = 0x04;
428 cp.out_coding_format.id = 0x04;
429 cp.in_coded_data_size = __cpu_to_le16(16);
430 cp.out_coded_data_size = __cpu_to_le16(16);
431 cp.in_pcm_data_format = 2;
432 cp.out_pcm_data_format = 2;
433 cp.in_pcm_sample_payload_msb_pos = 0;
434 cp.out_pcm_sample_payload_msb_pos = 0;
435 cp.in_data_path = conn->codec.data_path;
436 cp.out_data_path = conn->codec.data_path;
437 cp.in_transport_unit_size = 1;
438 cp.out_transport_unit_size = 1;
441 case BT_CODEC_TRANSPARENT:
442 if (!find_next_esco_param(conn, esco_param_msbc,
443 ARRAY_SIZE(esco_param_msbc)))
445 param = &esco_param_msbc[conn->attempt - 1];
446 cp.tx_coding_format.id = 0x03;
447 cp.rx_coding_format.id = 0x03;
448 cp.tx_codec_frame_size = __cpu_to_le16(60);
449 cp.rx_codec_frame_size = __cpu_to_le16(60);
450 cp.in_bandwidth = __cpu_to_le32(0x1f40);
451 cp.out_bandwidth = __cpu_to_le32(0x1f40);
452 cp.in_coding_format.id = 0x03;
453 cp.out_coding_format.id = 0x03;
454 cp.in_coded_data_size = __cpu_to_le16(16);
455 cp.out_coded_data_size = __cpu_to_le16(16);
456 cp.in_pcm_data_format = 2;
457 cp.out_pcm_data_format = 2;
458 cp.in_pcm_sample_payload_msb_pos = 0;
459 cp.out_pcm_sample_payload_msb_pos = 0;
460 cp.in_data_path = conn->codec.data_path;
461 cp.out_data_path = conn->codec.data_path;
462 cp.in_transport_unit_size = 1;
463 cp.out_transport_unit_size = 1;
467 if (conn->parent && lmp_esco_capable(conn->parent)) {
468 if (!find_next_esco_param(conn, esco_param_cvsd,
469 ARRAY_SIZE(esco_param_cvsd)))
471 param = &esco_param_cvsd[conn->attempt - 1];
473 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
475 param = &sco_param_cvsd[conn->attempt - 1];
477 cp.tx_coding_format.id = 2;
478 cp.rx_coding_format.id = 2;
479 cp.tx_codec_frame_size = __cpu_to_le16(60);
480 cp.rx_codec_frame_size = __cpu_to_le16(60);
481 cp.in_bandwidth = __cpu_to_le32(16000);
482 cp.out_bandwidth = __cpu_to_le32(16000);
483 cp.in_coding_format.id = 4;
484 cp.out_coding_format.id = 4;
485 cp.in_coded_data_size = __cpu_to_le16(16);
486 cp.out_coded_data_size = __cpu_to_le16(16);
487 cp.in_pcm_data_format = 2;
488 cp.out_pcm_data_format = 2;
489 cp.in_pcm_sample_payload_msb_pos = 0;
490 cp.out_pcm_sample_payload_msb_pos = 0;
491 cp.in_data_path = conn->codec.data_path;
492 cp.out_data_path = conn->codec.data_path;
493 cp.in_transport_unit_size = 16;
494 cp.out_transport_unit_size = 16;
500 cp.retrans_effort = param->retrans_effort;
501 cp.pkt_type = __cpu_to_le16(param->pkt_type);
502 cp.max_latency = __cpu_to_le16(param->max_latency);
504 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
510 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
512 struct hci_dev *hdev = conn->hdev;
513 struct hci_cp_setup_sync_conn cp;
514 const struct sco_param *param;
516 bt_dev_dbg(hdev, "hcon %p", conn);
518 conn->state = BT_CONNECT;
523 cp.handle = cpu_to_le16(handle);
525 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
526 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
527 cp.voice_setting = cpu_to_le16(conn->setting);
529 switch (conn->setting & SCO_AIRMODE_MASK) {
530 case SCO_AIRMODE_TRANSP:
531 if (!find_next_esco_param(conn, esco_param_msbc,
532 ARRAY_SIZE(esco_param_msbc)))
534 param = &esco_param_msbc[conn->attempt - 1];
536 case SCO_AIRMODE_CVSD:
537 if (conn->parent && lmp_esco_capable(conn->parent)) {
538 if (!find_next_esco_param(conn, esco_param_cvsd,
539 ARRAY_SIZE(esco_param_cvsd)))
541 param = &esco_param_cvsd[conn->attempt - 1];
543 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
545 param = &sco_param_cvsd[conn->attempt - 1];
552 cp.retrans_effort = param->retrans_effort;
553 cp.pkt_type = __cpu_to_le16(param->pkt_type);
554 cp.max_latency = __cpu_to_le16(param->max_latency);
556 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
562 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
565 struct conn_handle_t *conn_handle;
567 if (enhanced_sync_conn_capable(conn->hdev)) {
568 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
573 conn_handle->conn = conn;
574 conn_handle->handle = handle;
575 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
583 return hci_setup_sync_conn(conn, handle);
586 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
589 struct hci_dev *hdev = conn->hdev;
590 struct hci_conn_params *params;
591 struct hci_cp_le_conn_update cp;
595 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
597 params->conn_min_interval = min;
598 params->conn_max_interval = max;
599 params->conn_latency = latency;
600 params->supervision_timeout = to_multiplier;
603 hci_dev_unlock(hdev);
605 memset(&cp, 0, sizeof(cp));
606 cp.handle = cpu_to_le16(conn->handle);
607 cp.conn_interval_min = cpu_to_le16(min);
608 cp.conn_interval_max = cpu_to_le16(max);
609 cp.conn_latency = cpu_to_le16(latency);
610 cp.supervision_timeout = cpu_to_le16(to_multiplier);
611 cp.min_ce_len = cpu_to_le16(0x0000);
612 cp.max_ce_len = cpu_to_le16(0x0000);
614 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
622 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
623 __u8 ltk[16], __u8 key_size)
625 struct hci_dev *hdev = conn->hdev;
626 struct hci_cp_le_start_enc cp;
628 BT_DBG("hcon %p", conn);
630 memset(&cp, 0, sizeof(cp));
632 cp.handle = cpu_to_le16(conn->handle);
635 memcpy(cp.ltk, ltk, key_size);
637 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
640 /* Device _must_ be locked */
641 void hci_sco_setup(struct hci_conn *conn, __u8 status)
643 struct hci_link *link;
645 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
646 if (!link || !link->conn)
649 BT_DBG("hcon %p", conn);
652 if (lmp_esco_capable(conn->hdev))
653 hci_setup_sync(link->conn, conn->handle);
655 hci_add_sco(link->conn, conn->handle);
657 hci_connect_cfm(link->conn, status);
658 hci_conn_del(link->conn);
662 static void hci_conn_timeout(struct work_struct *work)
664 struct hci_conn *conn = container_of(work, struct hci_conn,
666 int refcnt = atomic_read(&conn->refcnt);
668 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
672 /* FIXME: It was observed that in pairing failed scenario, refcnt
673 * drops below 0. Probably this is because l2cap_conn_del calls
674 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
675 * dropped. After that loop hci_chan_del is called which also drops
676 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
682 /* LE connections in scanning state need special handling */
683 if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
684 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
685 hci_connect_le_scan_remove(conn);
689 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
692 /* Enter sniff mode */
693 static void hci_conn_idle(struct work_struct *work)
695 struct hci_conn *conn = container_of(work, struct hci_conn,
697 struct hci_dev *hdev = conn->hdev;
699 BT_DBG("hcon %p mode %d", conn, conn->mode);
701 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
704 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
707 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
708 struct hci_cp_sniff_subrate cp;
709 cp.handle = cpu_to_le16(conn->handle);
710 cp.max_latency = cpu_to_le16(0);
711 cp.min_remote_timeout = cpu_to_le16(0);
712 cp.min_local_timeout = cpu_to_le16(0);
713 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
716 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
717 struct hci_cp_sniff_mode cp;
718 cp.handle = cpu_to_le16(conn->handle);
719 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
720 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
721 cp.attempt = cpu_to_le16(4);
722 cp.timeout = cpu_to_le16(1);
723 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
727 static void hci_conn_auto_accept(struct work_struct *work)
729 struct hci_conn *conn = container_of(work, struct hci_conn,
730 auto_accept_work.work);
732 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
736 static void le_disable_advertising(struct hci_dev *hdev)
738 if (ext_adv_capable(hdev)) {
739 struct hci_cp_le_set_ext_adv_enable cp;
742 cp.num_of_sets = 0x00;
744 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
748 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
753 static void le_conn_timeout(struct work_struct *work)
755 struct hci_conn *conn = container_of(work, struct hci_conn,
756 le_conn_timeout.work);
757 struct hci_dev *hdev = conn->hdev;
761 /* We could end up here due to having done directed advertising,
762 * so clean up the state if necessary. This should however only
763 * happen with broken hardware or if low duty cycle was used
764 * (which doesn't have a timeout of its own).
766 if (conn->role == HCI_ROLE_SLAVE) {
767 /* Disable LE Advertising */
768 le_disable_advertising(hdev);
770 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
771 hci_dev_unlock(hdev);
775 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
778 struct iso_list_data {
790 struct hci_cp_le_set_cig_params cp;
791 struct hci_cis_params cis[0x11];
795 static void bis_list(struct hci_conn *conn, void *data)
797 struct iso_list_data *d = data;
799 /* Skip if not broadcast/ANY address */
800 if (bacmp(&conn->dst, BDADDR_ANY))
803 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
804 d->bis != conn->iso_qos.bcast.bis)
810 static void find_bis(struct hci_conn *conn, void *data)
812 struct iso_list_data *d = data;
815 if (bacmp(&conn->dst, BDADDR_ANY))
821 static int terminate_big_sync(struct hci_dev *hdev, void *data)
823 struct iso_list_data *d = data;
825 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
827 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
829 /* Check if ISO connection is a BIS and terminate BIG if there are
830 * no other connections using it.
832 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
836 return hci_le_terminate_big_sync(hdev, d->big,
837 HCI_ERROR_LOCAL_HOST_TERM);
840 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
845 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
847 struct iso_list_data *d;
850 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
852 d = kzalloc(sizeof(*d), GFP_KERNEL);
859 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
860 terminate_big_destroy);
867 static int big_terminate_sync(struct hci_dev *hdev, void *data)
869 struct iso_list_data *d = data;
871 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
874 /* Check if ISO connection is a BIS and terminate BIG if there are
875 * no other connections using it.
877 hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d);
881 hci_le_big_terminate_sync(hdev, d->big);
883 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
886 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
888 struct iso_list_data *d;
891 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
893 d = kzalloc(sizeof(*d), GFP_KERNEL);
898 d->sync_handle = sync_handle;
900 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
901 terminate_big_destroy);
908 /* Cleanup BIS connection
910 * Detects if there any BIS left connected in a BIG
911 * broadcaster: Remove advertising instance and terminate BIG.
912 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
914 static void bis_cleanup(struct hci_conn *conn)
916 struct hci_dev *hdev = conn->hdev;
918 bt_dev_dbg(hdev, "conn %p", conn);
920 if (conn->role == HCI_ROLE_MASTER) {
921 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
924 hci_le_terminate_big(hdev, conn->iso_qos.bcast.big,
925 conn->iso_qos.bcast.bis);
927 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
932 static int remove_cig_sync(struct hci_dev *hdev, void *data)
934 u8 handle = PTR_ERR(data);
936 return hci_le_remove_cig_sync(hdev, handle);
939 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
941 bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
943 return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL);
946 static void find_cis(struct hci_conn *conn, void *data)
948 struct iso_list_data *d = data;
950 /* Ignore broadcast */
951 if (!bacmp(&conn->dst, BDADDR_ANY))
957 /* Cleanup CIS connection:
959 * Detects if there any CIS left connected in a CIG and remove it.
961 static void cis_cleanup(struct hci_conn *conn)
963 struct hci_dev *hdev = conn->hdev;
964 struct iso_list_data d;
966 memset(&d, 0, sizeof(d));
967 d.cig = conn->iso_qos.ucast.cig;
969 /* Check if ISO connection is a CIS and remove CIG if there are
970 * no other connections using it.
972 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
976 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
979 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
982 struct hci_conn *conn;
984 BT_DBG("%s dst %pMR", hdev->name, dst);
986 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
990 bacpy(&conn->dst, dst);
991 bacpy(&conn->src, &hdev->bdaddr);
992 conn->handle = HCI_CONN_HANDLE_UNSET;
996 conn->mode = HCI_CM_ACTIVE;
997 conn->state = BT_OPEN;
998 conn->auth_type = HCI_AT_GENERAL_BONDING;
999 conn->io_capability = hdev->io_capability;
1000 conn->remote_auth = 0xff;
1001 conn->key_type = 0xff;
1002 conn->rssi = HCI_RSSI_INVALID;
1003 conn->tx_power = HCI_TX_POWER_INVALID;
1004 conn->max_tx_power = HCI_TX_POWER_INVALID;
1006 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
1007 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1009 /* Set Default Authenticated payload timeout to 30s */
1010 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
1012 if (conn->role == HCI_ROLE_MASTER)
1017 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1020 /* conn->src should reflect the local identity address */
1021 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1024 /* conn->src should reflect the local identity address */
1025 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1027 /* set proper cleanup function */
1028 if (!bacmp(dst, BDADDR_ANY))
1029 conn->cleanup = bis_cleanup;
1030 else if (conn->role == HCI_ROLE_MASTER)
1031 conn->cleanup = cis_cleanup;
1035 if (lmp_esco_capable(hdev))
1036 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1037 (hdev->esco_type & EDR_ESCO_MASK);
1039 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1042 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1046 skb_queue_head_init(&conn->data_q);
1048 INIT_LIST_HEAD(&conn->chan_list);
1049 INIT_LIST_HEAD(&conn->link_list);
1051 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1052 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1053 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1054 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1055 INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
1057 atomic_set(&conn->refcnt, 0);
1061 hci_conn_hash_add(hdev, conn);
1063 /* The SCO and eSCO connections will only be notified when their
1064 * setup has been completed. This is different to ACL links which
1065 * can be notified right away.
1067 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1069 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1072 hci_conn_init_sysfs(conn);
1077 static void hci_conn_unlink(struct hci_conn *conn)
1079 struct hci_dev *hdev = conn->hdev;
1081 bt_dev_dbg(hdev, "hcon %p", conn);
1083 if (!conn->parent) {
1084 struct hci_link *link, *t;
1086 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1087 struct hci_conn *child = link->conn;
1089 hci_conn_unlink(child);
1091 /* If hdev is down it means
1092 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1093 * and links don't need to be cleanup as all connections
1096 if (!test_bit(HCI_UP, &hdev->flags))
1099 /* Due to race, SCO connection might be not established
1100 * yet at this point. Delete it now, otherwise it is
1101 * possible for it to be stuck and can't be deleted.
1103 if ((child->type == SCO_LINK ||
1104 child->type == ESCO_LINK) &&
1105 child->handle == HCI_CONN_HANDLE_UNSET)
1106 hci_conn_del(child);
1115 list_del_rcu(&conn->link->list);
1118 hci_conn_drop(conn->parent);
1119 hci_conn_put(conn->parent);
1120 conn->parent = NULL;
1126 void hci_conn_del(struct hci_conn *conn)
1128 struct hci_dev *hdev = conn->hdev;
1130 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1132 hci_conn_unlink(conn);
1134 cancel_delayed_work_sync(&conn->disc_work);
1135 cancel_delayed_work_sync(&conn->auto_accept_work);
1136 cancel_delayed_work_sync(&conn->idle_work);
1138 if (conn->type == ACL_LINK) {
1139 /* Unacked frames */
1140 hdev->acl_cnt += conn->sent;
1141 } else if (conn->type == LE_LINK) {
1142 cancel_delayed_work(&conn->le_conn_timeout);
1145 hdev->le_cnt += conn->sent;
1147 hdev->acl_cnt += conn->sent;
1149 /* Unacked ISO frames */
1150 if (conn->type == ISO_LINK) {
1152 hdev->iso_cnt += conn->sent;
1153 else if (hdev->le_pkts)
1154 hdev->le_cnt += conn->sent;
1156 hdev->acl_cnt += conn->sent;
1161 amp_mgr_put(conn->amp_mgr);
1163 skb_queue_purge(&conn->data_q);
1165 /* Remove the connection from the list and cleanup its remaining
1166 * state. This is a separate function since for some cases like
1167 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1168 * rest of hci_conn_del.
1170 hci_conn_cleanup(conn);
1173 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1175 int use_src = bacmp(src, BDADDR_ANY);
1176 struct hci_dev *hdev = NULL, *d;
1178 BT_DBG("%pMR -> %pMR", src, dst);
1180 read_lock(&hci_dev_list_lock);
1182 list_for_each_entry(d, &hci_dev_list, list) {
1183 if (!test_bit(HCI_UP, &d->flags) ||
1184 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1185 d->dev_type != HCI_PRIMARY)
1189 * No source address - find interface with bdaddr != dst
1190 * Source address - find interface with bdaddr == src
1197 if (src_type == BDADDR_BREDR) {
1198 if (!lmp_bredr_capable(d))
1200 bacpy(&id_addr, &d->bdaddr);
1201 id_addr_type = BDADDR_BREDR;
1203 if (!lmp_le_capable(d))
1206 hci_copy_identity_address(d, &id_addr,
1209 /* Convert from HCI to three-value type */
1210 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1211 id_addr_type = BDADDR_LE_PUBLIC;
1213 id_addr_type = BDADDR_LE_RANDOM;
1216 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1220 if (bacmp(&d->bdaddr, dst)) {
1227 hdev = hci_dev_hold(hdev);
1229 read_unlock(&hci_dev_list_lock);
1232 EXPORT_SYMBOL(hci_get_route);
1234 /* This function requires the caller holds hdev->lock */
1235 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1237 struct hci_dev *hdev = conn->hdev;
1239 hci_connect_le_scan_cleanup(conn, status);
1241 /* Enable advertising in case this was a failed connection
1242 * attempt as a peripheral.
1244 hci_enable_advertising(hdev);
1247 /* This function requires the caller holds hdev->lock */
1248 void hci_conn_failed(struct hci_conn *conn, u8 status)
1250 struct hci_dev *hdev = conn->hdev;
1252 bt_dev_dbg(hdev, "status 0x%2.2x", status);
1254 switch (conn->type) {
1256 hci_le_conn_failed(conn, status);
1259 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1260 conn->dst_type, status);
1264 conn->state = BT_CLOSED;
1265 hci_connect_cfm(conn, status);
1269 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1271 struct hci_conn *conn = data;
1273 bt_dev_dbg(hdev, "err %d", err);
1278 hci_connect_le_scan_cleanup(conn, 0x00);
1282 /* Check if connection is still pending */
1283 if (conn != hci_lookup_le_connect(hdev))
1286 /* Flush to make sure we send create conn cancel command if needed */
1287 flush_delayed_work(&conn->le_conn_timeout);
1288 hci_conn_failed(conn, bt_status(err));
1291 hci_dev_unlock(hdev);
1294 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1296 struct hci_conn *conn = data;
1298 bt_dev_dbg(hdev, "conn %p", conn);
1300 return hci_le_create_conn_sync(hdev, conn);
1303 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1304 u8 dst_type, bool dst_resolved, u8 sec_level,
1305 u16 conn_timeout, u8 role)
1307 struct hci_conn *conn;
1308 struct smp_irk *irk;
1311 /* Let's make sure that le is enabled.*/
1312 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1313 if (lmp_le_capable(hdev))
1314 return ERR_PTR(-ECONNREFUSED);
1316 return ERR_PTR(-EOPNOTSUPP);
1319 /* Since the controller supports only one LE connection attempt at a
1320 * time, we return -EBUSY if there is any connection attempt running.
1322 if (hci_lookup_le_connect(hdev))
1323 return ERR_PTR(-EBUSY);
1325 /* If there's already a connection object but it's not in
1326 * scanning state it means it must already be established, in
1327 * which case we can't do anything else except report a failure
1330 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1331 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1332 return ERR_PTR(-EBUSY);
1335 /* Check if the destination address has been resolved by the controller
1336 * since if it did then the identity address shall be used.
1338 if (!dst_resolved) {
1339 /* When given an identity address with existing identity
1340 * resolving key, the connection needs to be established
1341 * to a resolvable random address.
1343 * Storing the resolvable random address is required here
1344 * to handle connection failures. The address will later
1345 * be resolved back into the original identity address
1346 * from the connect request.
1348 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1349 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1351 dst_type = ADDR_LE_DEV_RANDOM;
1356 bacpy(&conn->dst, dst);
1358 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1360 return ERR_PTR(-ENOMEM);
1361 hci_conn_hold(conn);
1362 conn->pending_sec_level = sec_level;
1365 conn->dst_type = dst_type;
1366 conn->sec_level = BT_SECURITY_LOW;
1367 conn->conn_timeout = conn_timeout;
1369 conn->state = BT_CONNECT;
1370 clear_bit(HCI_CONN_SCANNING, &conn->flags);
1372 err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
1373 create_le_conn_complete);
1376 return ERR_PTR(err);
1382 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1384 struct hci_conn *conn;
1386 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1390 if (conn->state != BT_CONNECTED)
1396 /* This function requires the caller holds hdev->lock */
1397 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1398 bdaddr_t *addr, u8 addr_type)
1400 struct hci_conn_params *params;
1402 if (is_connected(hdev, addr, addr_type))
1405 params = hci_conn_params_lookup(hdev, addr, addr_type);
1407 params = hci_conn_params_add(hdev, addr, addr_type);
1411 /* If we created new params, mark them to be deleted in
1412 * hci_connect_le_scan_cleanup. It's different case than
1413 * existing disabled params, those will stay after cleanup.
1415 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1418 /* We're trying to connect, so make sure params are at pend_le_conns */
1419 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1420 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1421 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1422 list_del_init(¶ms->action);
1423 list_add(¶ms->action, &hdev->pend_le_conns);
1426 params->explicit_connect = true;
1428 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1429 params->auto_connect);
1434 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1436 struct iso_list_data data;
1438 /* Allocate a BIG if not set */
1439 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1440 for (data.big = 0x00; data.big < 0xef; data.big++) {
1444 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1450 if (data.big == 0xef)
1451 return -EADDRNOTAVAIL;
1454 qos->bcast.big = data.big;
1460 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1462 struct iso_list_data data;
1464 /* Allocate BIS if not set */
1465 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1466 /* Find an unused adv set to advertise BIS, skip instance 0x00
1467 * since it is reserved as general purpose set.
1469 for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets;
1473 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1479 if (data.bis == hdev->le_num_of_adv_sets)
1480 return -EADDRNOTAVAIL;
1483 qos->bcast.bis = data.bis;
1489 /* This function requires the caller holds hdev->lock */
1490 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1491 struct bt_iso_qos *qos)
1493 struct hci_conn *conn;
1494 struct iso_list_data data;
1497 /* Let's make sure that le is enabled.*/
1498 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1499 if (lmp_le_capable(hdev))
1500 return ERR_PTR(-ECONNREFUSED);
1501 return ERR_PTR(-EOPNOTSUPP);
1504 err = qos_set_big(hdev, qos);
1506 return ERR_PTR(err);
1508 err = qos_set_bis(hdev, qos);
1510 return ERR_PTR(err);
1512 data.big = qos->bcast.big;
1513 data.bis = qos->bcast.bis;
1516 /* Check if there is already a matching BIG/BIS */
1517 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data);
1519 return ERR_PTR(-EADDRINUSE);
1521 conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis);
1523 return ERR_PTR(-EADDRINUSE);
1525 conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1527 return ERR_PTR(-ENOMEM);
1529 set_bit(HCI_CONN_PER_ADV, &conn->flags);
1530 conn->state = BT_CONNECT;
1532 hci_conn_hold(conn);
1536 /* This function requires the caller holds hdev->lock */
1537 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1538 u8 dst_type, u8 sec_level,
1540 enum conn_reasons conn_reason)
1542 struct hci_conn *conn;
1544 /* Let's make sure that le is enabled.*/
1545 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1546 if (lmp_le_capable(hdev))
1547 return ERR_PTR(-ECONNREFUSED);
1549 return ERR_PTR(-EOPNOTSUPP);
1552 /* Some devices send ATT messages as soon as the physical link is
1553 * established. To be able to handle these ATT messages, the user-
1554 * space first establishes the connection and then starts the pairing
1557 * So if a hci_conn object already exists for the following connection
1558 * attempt, we simply update pending_sec_level and auth_type fields
1559 * and return the object found.
1561 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1563 if (conn->pending_sec_level < sec_level)
1564 conn->pending_sec_level = sec_level;
1568 BT_DBG("requesting refresh of dst_addr");
1570 conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1572 return ERR_PTR(-ENOMEM);
1574 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1576 return ERR_PTR(-EBUSY);
1579 conn->state = BT_CONNECT;
1580 set_bit(HCI_CONN_SCANNING, &conn->flags);
1581 conn->dst_type = dst_type;
1582 conn->sec_level = BT_SECURITY_LOW;
1583 conn->pending_sec_level = sec_level;
1584 conn->conn_timeout = conn_timeout;
1585 conn->conn_reason = conn_reason;
1587 hci_update_passive_scan(hdev);
1590 hci_conn_hold(conn);
1594 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1595 u8 sec_level, u8 auth_type,
1596 enum conn_reasons conn_reason)
1598 struct hci_conn *acl;
1600 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1601 if (lmp_bredr_capable(hdev))
1602 return ERR_PTR(-ECONNREFUSED);
1604 return ERR_PTR(-EOPNOTSUPP);
1607 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1609 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1611 return ERR_PTR(-ENOMEM);
1616 acl->conn_reason = conn_reason;
1617 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1618 acl->sec_level = BT_SECURITY_LOW;
1619 acl->pending_sec_level = sec_level;
1620 acl->auth_type = auth_type;
1621 hci_acl_create_connection(acl);
1627 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1628 struct hci_conn *conn)
1630 struct hci_dev *hdev = parent->hdev;
1631 struct hci_link *link;
1633 bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1641 link = kzalloc(sizeof(*link), GFP_KERNEL);
1645 link->conn = hci_conn_hold(conn);
1647 conn->parent = hci_conn_get(parent);
1649 /* Use list_add_tail_rcu append to the list */
1650 list_add_tail_rcu(&link->list, &parent->link_list);
1655 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1656 __u16 setting, struct bt_codec *codec)
1658 struct hci_conn *acl;
1659 struct hci_conn *sco;
1660 struct hci_link *link;
1662 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1663 CONN_REASON_SCO_CONNECT);
1667 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1669 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1672 return ERR_PTR(-ENOMEM);
1676 link = hci_conn_link(acl, sco);
1683 sco->setting = setting;
1684 sco->codec = *codec;
1686 if (acl->state == BT_CONNECTED &&
1687 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1688 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1689 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1691 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1692 /* defer SCO setup until mode change completed */
1693 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1697 hci_sco_setup(acl, 0x00);
1703 static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos)
1705 struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis];
1707 cis->cis_id = qos->ucast.cis;
1708 cis->c_sdu = cpu_to_le16(qos->ucast.out.sdu);
1709 cis->p_sdu = cpu_to_le16(qos->ucast.in.sdu);
1710 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy;
1711 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy;
1712 cis->c_rtn = qos->ucast.out.rtn;
1713 cis->p_rtn = qos->ucast.in.rtn;
1715 d->pdu.cp.num_cis++;
1718 static void cis_list(struct hci_conn *conn, void *data)
1720 struct iso_list_data *d = data;
1722 /* Skip if broadcast/ANY address */
1723 if (!bacmp(&conn->dst, BDADDR_ANY))
1726 if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET ||
1727 d->cis != conn->iso_qos.ucast.cis)
1732 if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET ||
1733 d->count >= ARRAY_SIZE(d->pdu.cis))
1736 cis_add(d, &conn->iso_qos);
1739 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1741 struct hci_dev *hdev = conn->hdev;
1742 struct hci_cp_le_create_big cp;
1744 memset(&cp, 0, sizeof(cp));
1746 cp.handle = qos->bcast.big;
1747 cp.adv_handle = qos->bcast.bis;
1749 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1750 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1751 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency);
1752 cp.bis.rtn = qos->bcast.out.rtn;
1753 cp.bis.phy = qos->bcast.out.phy;
1754 cp.bis.packing = qos->bcast.packing;
1755 cp.bis.framing = qos->bcast.framing;
1756 cp.bis.encryption = qos->bcast.encryption;
1757 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1759 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1762 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1764 struct hci_dev *hdev = conn->hdev;
1765 struct iso_list_data data;
1767 memset(&data, 0, sizeof(data));
1769 /* Allocate a CIG if not set */
1770 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1771 for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
1775 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1780 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
1781 BT_CONNECTED, &data);
1786 if (data.cig == 0xff)
1790 qos->ucast.cig = data.cig;
1793 data.pdu.cp.cig_id = qos->ucast.cig;
1794 hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval);
1795 hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval);
1796 data.pdu.cp.sca = qos->ucast.sca;
1797 data.pdu.cp.packing = qos->ucast.packing;
1798 data.pdu.cp.framing = qos->ucast.framing;
1799 data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1800 data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1802 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1804 data.cig = qos->ucast.cig;
1805 data.cis = qos->ucast.cis;
1807 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1812 cis_add(&data, qos);
1815 /* Reprogram all CIS(s) with the same CIG */
1816 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11;
1820 hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND,
1825 /* Allocate a CIS if not set */
1826 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) {
1828 qos->ucast.cis = data.cis;
1829 cis_add(&data, qos);
1833 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis)
1836 if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1837 sizeof(data.pdu.cp) +
1838 (data.pdu.cp.num_cis * sizeof(*data.pdu.cis)),
1845 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1846 __u8 dst_type, struct bt_iso_qos *qos)
1848 struct hci_conn *cis;
1850 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1853 cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1855 return ERR_PTR(-ENOMEM);
1856 cis->cleanup = cis_cleanup;
1857 cis->dst_type = dst_type;
1860 if (cis->state == BT_CONNECTED)
1863 /* Check if CIS has been set and the settings matches */
1864 if (cis->state == BT_BOUND &&
1865 !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1868 /* Update LINK PHYs according to QoS preference */
1869 cis->le_tx_phy = qos->ucast.out.phy;
1870 cis->le_rx_phy = qos->ucast.in.phy;
1872 /* If output interval is not set use the input interval as it cannot be
1875 if (!qos->ucast.out.interval)
1876 qos->ucast.out.interval = qos->ucast.in.interval;
1878 /* If input interval is not set use the output interval as it cannot be
1881 if (!qos->ucast.in.interval)
1882 qos->ucast.in.interval = qos->ucast.out.interval;
1884 /* If output latency is not set use the input latency as it cannot be
1887 if (!qos->ucast.out.latency)
1888 qos->ucast.out.latency = qos->ucast.in.latency;
1890 /* If input latency is not set use the output latency as it cannot be
1893 if (!qos->ucast.in.latency)
1894 qos->ucast.in.latency = qos->ucast.out.latency;
1896 if (!hci_le_set_cig_params(cis, qos)) {
1898 return ERR_PTR(-EINVAL);
1901 cis->iso_qos = *qos;
1902 cis->state = BT_BOUND;
1907 bool hci_iso_setup_path(struct hci_conn *conn)
1909 struct hci_dev *hdev = conn->hdev;
1910 struct hci_cp_le_setup_iso_path cmd;
1912 memset(&cmd, 0, sizeof(cmd));
1914 if (conn->iso_qos.ucast.out.sdu) {
1915 cmd.handle = cpu_to_le16(conn->handle);
1916 cmd.direction = 0x00; /* Input (Host to Controller) */
1917 cmd.path = 0x00; /* HCI path if enabled */
1918 cmd.codec = 0x03; /* Transparent Data */
1920 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1925 if (conn->iso_qos.ucast.in.sdu) {
1926 cmd.handle = cpu_to_le16(conn->handle);
1927 cmd.direction = 0x01; /* Output (Controller to Host) */
1928 cmd.path = 0x00; /* HCI path if enabled */
1929 cmd.codec = 0x03; /* Transparent Data */
1931 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1939 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1941 return hci_le_create_cis_sync(hdev, data);
1944 int hci_le_create_cis(struct hci_conn *conn)
1946 struct hci_conn *cis;
1947 struct hci_link *link, *t;
1948 struct hci_dev *hdev = conn->hdev;
1951 bt_dev_dbg(hdev, "hcon %p", conn);
1953 switch (conn->type) {
1955 if (conn->state != BT_CONNECTED || list_empty(&conn->link_list))
1960 /* hci_conn_link uses list_add_tail_rcu so the list is in
1961 * the same order as the connections are requested.
1963 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1964 if (link->conn->state == BT_BOUND) {
1965 err = hci_le_create_cis(link->conn);
1973 return cis ? 0 : -EINVAL;
1981 if (cis->state == BT_CONNECT)
1984 /* Queue Create CIS */
1985 err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL);
1989 cis->state = BT_CONNECT;
1994 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1995 struct bt_iso_io_qos *qos, __u8 phy)
1997 /* Only set MTU if PHY is enabled */
1998 if (!qos->sdu && qos->phy) {
1999 if (hdev->iso_mtu > 0)
2000 qos->sdu = hdev->iso_mtu;
2001 else if (hdev->le_mtu > 0)
2002 qos->sdu = hdev->le_mtu;
2004 qos->sdu = hdev->acl_mtu;
2007 /* Use the same PHY as ACL if set to any */
2008 if (qos->phy == BT_ISO_PHY_ANY)
2011 /* Use LE ACL connection interval if not set */
2013 /* ACL interval unit in 1.25 ms to us */
2014 qos->interval = conn->le_conn_interval * 1250;
2016 /* Use LE ACL connection latency if not set */
2018 qos->latency = conn->le_conn_latency;
2021 static void hci_bind_bis(struct hci_conn *conn,
2022 struct bt_iso_qos *qos)
2024 /* Update LINK PHYs according to QoS preference */
2025 conn->le_tx_phy = qos->bcast.out.phy;
2026 conn->le_tx_phy = qos->bcast.out.phy;
2027 conn->iso_qos = *qos;
2028 conn->state = BT_BOUND;
2031 static int create_big_sync(struct hci_dev *hdev, void *data)
2033 struct hci_conn *conn = data;
2034 struct bt_iso_qos *qos = &conn->iso_qos;
2035 u16 interval, sync_interval = 0;
2039 if (qos->bcast.out.phy == 0x02)
2040 flags |= MGMT_ADV_FLAG_SEC_2M;
2042 /* Align intervals */
2043 interval = qos->bcast.out.interval / 1250;
2046 sync_interval = qos->bcast.sync_interval * 1600;
2048 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2049 conn->le_per_adv_data, flags, interval,
2050 interval, sync_interval);
2054 return hci_le_create_big(conn, &conn->iso_qos);
2057 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2059 struct hci_cp_le_pa_create_sync *cp = data;
2061 bt_dev_dbg(hdev, "");
2064 bt_dev_err(hdev, "Unable to create PA: %d", err);
2069 static int create_pa_sync(struct hci_dev *hdev, void *data)
2071 struct hci_cp_le_pa_create_sync *cp = data;
2074 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2075 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2077 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2081 return hci_update_passive_scan_sync(hdev);
2084 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2085 __u8 sid, struct bt_iso_qos *qos)
2087 struct hci_cp_le_pa_create_sync *cp;
2089 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2092 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2094 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2098 cp->options = qos->bcast.options;
2100 cp->addr_type = dst_type;
2101 bacpy(&cp->addr, dst);
2102 cp->skip = cpu_to_le16(qos->bcast.skip);
2103 cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2104 cp->sync_cte_type = qos->bcast.sync_cte_type;
2106 /* Queue start pa_create_sync and scan */
2107 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2110 int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos,
2111 __u16 sync_handle, __u8 num_bis, __u8 bis[])
2114 struct hci_cp_le_big_create_sync cp;
2119 if (num_bis > sizeof(pdu.bis))
2122 err = qos_set_big(hdev, qos);
2126 memset(&pdu, 0, sizeof(pdu));
2127 pdu.cp.handle = qos->bcast.big;
2128 pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2129 pdu.cp.encryption = qos->bcast.encryption;
2130 memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2131 pdu.cp.mse = qos->bcast.mse;
2132 pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2133 pdu.cp.num_bis = num_bis;
2134 memcpy(pdu.bis, bis, num_bis);
2136 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2137 sizeof(pdu.cp) + num_bis, &pdu);
2140 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2142 struct hci_conn *conn = data;
2144 bt_dev_dbg(hdev, "conn %p", conn);
2147 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2148 hci_connect_cfm(conn, err);
2153 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2154 __u8 dst_type, struct bt_iso_qos *qos,
2155 __u8 base_len, __u8 *base)
2157 struct hci_conn *conn;
2160 /* We need hci_conn object using the BDADDR_ANY as dst */
2161 conn = hci_add_bis(hdev, dst, qos);
2165 hci_bind_bis(conn, qos);
2167 /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2168 if (base_len && base) {
2169 base_len = eir_append_service_data(conn->le_per_adv_data, 0,
2170 0x1851, base, base_len);
2171 conn->le_per_adv_data_len = base_len;
2174 /* Queue start periodic advertising and create BIG */
2175 err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2176 create_big_complete);
2178 hci_conn_drop(conn);
2179 return ERR_PTR(err);
2182 hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2183 conn->le_tx_phy ? conn->le_tx_phy :
2184 hdev->le_tx_def_phys);
2189 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2190 __u8 dst_type, struct bt_iso_qos *qos)
2192 struct hci_conn *le;
2193 struct hci_conn *cis;
2194 struct hci_link *link;
2196 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2197 le = hci_connect_le(hdev, dst, dst_type, false,
2199 HCI_LE_CONN_TIMEOUT,
2202 le = hci_connect_le_scan(hdev, dst, dst_type,
2204 HCI_LE_CONN_TIMEOUT,
2205 CONN_REASON_ISO_CONNECT);
2209 hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2210 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2211 hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2212 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2214 cis = hci_bind_cis(hdev, dst, dst_type, qos);
2220 link = hci_conn_link(le, cis);
2227 /* If LE is already connected and CIS handle is already set proceed to
2228 * Create CIS immediately.
2230 if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET)
2231 hci_le_create_cis(cis);
2236 /* Check link security requirement */
2237 int hci_conn_check_link_mode(struct hci_conn *conn)
2239 BT_DBG("hcon %p", conn);
2241 /* In Secure Connections Only mode, it is required that Secure
2242 * Connections is used and the link is encrypted with AES-CCM
2243 * using a P-256 authenticated combination key.
2245 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2246 if (!hci_conn_sc_enabled(conn) ||
2247 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2248 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2252 /* AES encryption is required for Level 4:
2254 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2257 * 128-bit equivalent strength for link and encryption keys
2258 * required using FIPS approved algorithms (E0 not allowed,
2259 * SAFER+ not allowed, and P-192 not allowed; encryption key
2262 if (conn->sec_level == BT_SECURITY_FIPS &&
2263 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2264 bt_dev_err(conn->hdev,
2265 "Invalid security: Missing AES-CCM usage");
2269 if (hci_conn_ssp_enabled(conn) &&
2270 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2276 /* Authenticate remote device */
2277 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2279 BT_DBG("hcon %p", conn);
2281 if (conn->pending_sec_level > sec_level)
2282 sec_level = conn->pending_sec_level;
2284 if (sec_level > conn->sec_level)
2285 conn->pending_sec_level = sec_level;
2286 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2289 /* Make sure we preserve an existing MITM requirement*/
2290 auth_type |= (conn->auth_type & 0x01);
2292 conn->auth_type = auth_type;
2294 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2295 struct hci_cp_auth_requested cp;
2297 cp.handle = cpu_to_le16(conn->handle);
2298 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2301 /* If we're already encrypted set the REAUTH_PEND flag,
2302 * otherwise set the ENCRYPT_PEND.
2304 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2305 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2307 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2313 /* Encrypt the link */
2314 static void hci_conn_encrypt(struct hci_conn *conn)
2316 BT_DBG("hcon %p", conn);
2318 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2319 struct hci_cp_set_conn_encrypt cp;
2320 cp.handle = cpu_to_le16(conn->handle);
2322 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2327 /* Enable security */
2328 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2331 BT_DBG("hcon %p", conn);
2333 if (conn->type == LE_LINK)
2334 return smp_conn_security(conn, sec_level);
2336 /* For sdp we don't need the link key. */
2337 if (sec_level == BT_SECURITY_SDP)
2340 /* For non 2.1 devices and low security level we don't need the link
2342 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2345 /* For other security levels we need the link key. */
2346 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2349 /* An authenticated FIPS approved combination key has sufficient
2350 * security for security level 4. */
2351 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
2352 sec_level == BT_SECURITY_FIPS)
2355 /* An authenticated combination key has sufficient security for
2356 security level 3. */
2357 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
2358 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
2359 sec_level == BT_SECURITY_HIGH)
2362 /* An unauthenticated combination key has sufficient security for
2363 security level 1 and 2. */
2364 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2365 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2366 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
2369 /* A combination key has always sufficient security for the security
2370 levels 1 or 2. High security level requires the combination key
2371 is generated using maximum PIN code length (16).
2372 For pre 2.1 units. */
2373 if (conn->key_type == HCI_LK_COMBINATION &&
2374 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
2375 conn->pin_length == 16))
2379 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2383 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2385 if (!hci_conn_auth(conn, sec_level, auth_type))
2389 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2390 /* Ensure that the encryption key size has been read,
2391 * otherwise stall the upper layer responses.
2393 if (!conn->enc_key_size)
2396 /* Nothing else needed, all requirements are met */
2400 hci_conn_encrypt(conn);
2403 EXPORT_SYMBOL(hci_conn_security);
2405 /* Check secure link requirement */
2406 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2408 BT_DBG("hcon %p", conn);
2410 /* Accept if non-secure or higher security level is required */
2411 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2414 /* Accept if secure or higher security level is already present */
2415 if (conn->sec_level == BT_SECURITY_HIGH ||
2416 conn->sec_level == BT_SECURITY_FIPS)
2419 /* Reject not secure link */
2422 EXPORT_SYMBOL(hci_conn_check_secure);
2425 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2427 BT_DBG("hcon %p", conn);
2429 if (role == conn->role)
2432 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2433 struct hci_cp_switch_role cp;
2434 bacpy(&cp.bdaddr, &conn->dst);
2436 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2441 EXPORT_SYMBOL(hci_conn_switch_role);
2443 /* Enter active mode */
2444 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2446 struct hci_dev *hdev = conn->hdev;
2448 BT_DBG("hcon %p mode %d", conn, conn->mode);
2450 if (conn->mode != HCI_CM_SNIFF)
2453 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2456 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2457 struct hci_cp_exit_sniff_mode cp;
2458 cp.handle = cpu_to_le16(conn->handle);
2459 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2463 if (hdev->idle_timeout > 0)
2464 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2465 msecs_to_jiffies(hdev->idle_timeout));
2468 /* Drop all connection on the device */
2469 void hci_conn_hash_flush(struct hci_dev *hdev)
2471 struct list_head *head = &hdev->conn_hash.list;
2472 struct hci_conn *conn;
2474 BT_DBG("hdev %s", hdev->name);
2476 /* We should not traverse the list here, because hci_conn_del
2477 * can remove extra links, which may cause the list traversal
2478 * to hit items that have already been released.
2480 while ((conn = list_first_entry_or_null(head,
2483 conn->state = BT_CLOSED;
2484 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2489 /* Check pending connect attempts */
2490 void hci_conn_check_pending(struct hci_dev *hdev)
2492 struct hci_conn *conn;
2494 BT_DBG("hdev %s", hdev->name);
2498 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2500 hci_acl_create_connection(conn);
2502 hci_dev_unlock(hdev);
2505 static u32 get_link_mode(struct hci_conn *conn)
2509 if (conn->role == HCI_ROLE_MASTER)
2510 link_mode |= HCI_LM_MASTER;
2512 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2513 link_mode |= HCI_LM_ENCRYPT;
2515 if (test_bit(HCI_CONN_AUTH, &conn->flags))
2516 link_mode |= HCI_LM_AUTH;
2518 if (test_bit(HCI_CONN_SECURE, &conn->flags))
2519 link_mode |= HCI_LM_SECURE;
2521 if (test_bit(HCI_CONN_FIPS, &conn->flags))
2522 link_mode |= HCI_LM_FIPS;
2527 int hci_get_conn_list(void __user *arg)
2530 struct hci_conn_list_req req, *cl;
2531 struct hci_conn_info *ci;
2532 struct hci_dev *hdev;
2533 int n = 0, size, err;
2535 if (copy_from_user(&req, arg, sizeof(req)))
2538 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2541 size = sizeof(req) + req.conn_num * sizeof(*ci);
2543 cl = kmalloc(size, GFP_KERNEL);
2547 hdev = hci_dev_get(req.dev_id);
2556 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2557 bacpy(&(ci + n)->bdaddr, &c->dst);
2558 (ci + n)->handle = c->handle;
2559 (ci + n)->type = c->type;
2560 (ci + n)->out = c->out;
2561 (ci + n)->state = c->state;
2562 (ci + n)->link_mode = get_link_mode(c);
2563 if (++n >= req.conn_num)
2566 hci_dev_unlock(hdev);
2568 cl->dev_id = hdev->id;
2570 size = sizeof(req) + n * sizeof(*ci);
2574 err = copy_to_user(arg, cl, size);
2577 return err ? -EFAULT : 0;
2580 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2582 struct hci_conn_info_req req;
2583 struct hci_conn_info ci;
2584 struct hci_conn *conn;
2585 char __user *ptr = arg + sizeof(req);
2587 if (copy_from_user(&req, arg, sizeof(req)))
2591 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2593 bacpy(&ci.bdaddr, &conn->dst);
2594 ci.handle = conn->handle;
2595 ci.type = conn->type;
2597 ci.state = conn->state;
2598 ci.link_mode = get_link_mode(conn);
2600 hci_dev_unlock(hdev);
2605 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2608 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2610 struct hci_auth_info_req req;
2611 struct hci_conn *conn;
2613 if (copy_from_user(&req, arg, sizeof(req)))
2617 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2619 req.type = conn->auth_type;
2620 hci_dev_unlock(hdev);
2625 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2628 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2630 struct hci_dev *hdev = conn->hdev;
2631 struct hci_chan *chan;
2633 BT_DBG("%s hcon %p", hdev->name, conn);
2635 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2636 BT_DBG("Refusing to create new hci_chan");
2640 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2644 chan->conn = hci_conn_get(conn);
2645 skb_queue_head_init(&chan->data_q);
2646 chan->state = BT_CONNECTED;
2648 list_add_rcu(&chan->list, &conn->chan_list);
2653 void hci_chan_del(struct hci_chan *chan)
2655 struct hci_conn *conn = chan->conn;
2656 struct hci_dev *hdev = conn->hdev;
2658 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2660 list_del_rcu(&chan->list);
2664 /* Prevent new hci_chan's to be created for this hci_conn */
2665 set_bit(HCI_CONN_DROP, &conn->flags);
2669 skb_queue_purge(&chan->data_q);
2673 void hci_chan_list_flush(struct hci_conn *conn)
2675 struct hci_chan *chan, *n;
2677 BT_DBG("hcon %p", conn);
2679 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2683 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2686 struct hci_chan *hchan;
2688 list_for_each_entry(hchan, &hcon->chan_list, list) {
2689 if (hchan->handle == handle)
2696 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2698 struct hci_conn_hash *h = &hdev->conn_hash;
2699 struct hci_conn *hcon;
2700 struct hci_chan *hchan = NULL;
2704 list_for_each_entry_rcu(hcon, &h->list, list) {
2705 hchan = __hci_chan_lookup_handle(hcon, handle);
2715 u32 hci_conn_get_phy(struct hci_conn *conn)
2719 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2720 * Table 6.2: Packets defined for synchronous, asynchronous, and
2721 * CPB logical transport types.
2723 switch (conn->type) {
2725 /* SCO logical transport (1 Mb/s):
2726 * HV1, HV2, HV3 and DV.
2728 phys |= BT_PHY_BR_1M_1SLOT;
2733 /* ACL logical transport (1 Mb/s) ptt=0:
2734 * DH1, DM3, DH3, DM5 and DH5.
2736 phys |= BT_PHY_BR_1M_1SLOT;
2738 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2739 phys |= BT_PHY_BR_1M_3SLOT;
2741 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2742 phys |= BT_PHY_BR_1M_5SLOT;
2744 /* ACL logical transport (2 Mb/s) ptt=1:
2745 * 2-DH1, 2-DH3 and 2-DH5.
2747 if (!(conn->pkt_type & HCI_2DH1))
2748 phys |= BT_PHY_EDR_2M_1SLOT;
2750 if (!(conn->pkt_type & HCI_2DH3))
2751 phys |= BT_PHY_EDR_2M_3SLOT;
2753 if (!(conn->pkt_type & HCI_2DH5))
2754 phys |= BT_PHY_EDR_2M_5SLOT;
2756 /* ACL logical transport (3 Mb/s) ptt=1:
2757 * 3-DH1, 3-DH3 and 3-DH5.
2759 if (!(conn->pkt_type & HCI_3DH1))
2760 phys |= BT_PHY_EDR_3M_1SLOT;
2762 if (!(conn->pkt_type & HCI_3DH3))
2763 phys |= BT_PHY_EDR_3M_3SLOT;
2765 if (!(conn->pkt_type & HCI_3DH5))
2766 phys |= BT_PHY_EDR_3M_5SLOT;
2771 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2772 phys |= BT_PHY_BR_1M_1SLOT;
2774 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2775 phys |= BT_PHY_BR_1M_3SLOT;
2777 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2778 if (!(conn->pkt_type & ESCO_2EV3))
2779 phys |= BT_PHY_EDR_2M_1SLOT;
2781 if (!(conn->pkt_type & ESCO_2EV5))
2782 phys |= BT_PHY_EDR_2M_3SLOT;
2784 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2785 if (!(conn->pkt_type & ESCO_3EV3))
2786 phys |= BT_PHY_EDR_3M_1SLOT;
2788 if (!(conn->pkt_type & ESCO_3EV5))
2789 phys |= BT_PHY_EDR_3M_3SLOT;
2794 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2795 phys |= BT_PHY_LE_1M_TX;
2797 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2798 phys |= BT_PHY_LE_1M_RX;
2800 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2801 phys |= BT_PHY_LE_2M_TX;
2803 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2804 phys |= BT_PHY_LE_2M_RX;
2806 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2807 phys |= BT_PHY_LE_CODED_TX;
2809 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2810 phys |= BT_PHY_LE_CODED_RX;
2818 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2822 if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
2825 switch (conn->state) {
2828 if (conn->type == AMP_LINK) {
2829 struct hci_cp_disconn_phy_link cp;
2831 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2833 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
2836 struct hci_cp_disconnect dc;
2838 dc.handle = cpu_to_le16(conn->handle);
2840 r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT,
2844 conn->state = BT_DISCONN;
2848 if (conn->type == LE_LINK) {
2849 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2851 r = hci_send_cmd(conn->hdev,
2852 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
2853 } else if (conn->type == ACL_LINK) {
2854 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
2856 r = hci_send_cmd(conn->hdev,
2857 HCI_OP_CREATE_CONN_CANCEL,
2862 if (conn->type == ACL_LINK) {
2863 struct hci_cp_reject_conn_req rej;
2865 bacpy(&rej.bdaddr, &conn->dst);
2866 rej.reason = reason;
2868 r = hci_send_cmd(conn->hdev,
2869 HCI_OP_REJECT_CONN_REQ,
2871 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2872 struct hci_cp_reject_sync_conn_req rej;
2874 bacpy(&rej.bdaddr, &conn->dst);
2876 /* SCO rejection has its own limited set of
2877 * allowed error values (0x0D-0x0F) which isn't
2878 * compatible with most values passed to this
2879 * function. To be safe hard-code one of the
2880 * values that's suitable for SCO.
2882 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2884 r = hci_send_cmd(conn->hdev,
2885 HCI_OP_REJECT_SYNC_CONN_REQ,
2890 conn->state = BT_CLOSED;