Merge tag 'core-entry-2024-03-23' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023-2024 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI connection handling. */
27
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40
41 struct sco_param {
42         u16 pkt_type;
43         u16 max_latency;
44         u8  retrans_effort;
45 };
46
47 struct conn_handle_t {
48         struct hci_conn *conn;
49         __u16 handle;
50 };
51
52 static const struct sco_param esco_param_cvsd[] = {
53         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
54         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
55         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
56         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
57         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
58 };
59
60 static const struct sco_param sco_param_cvsd[] = {
61         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
62         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
63 };
64
65 static const struct sco_param esco_param_msbc[] = {
66         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
67         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
68 };
69
70 /* This function requires the caller holds hdev->lock */
71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73         struct hci_conn_params *params;
74         struct hci_dev *hdev = conn->hdev;
75         struct smp_irk *irk;
76         bdaddr_t *bdaddr;
77         u8 bdaddr_type;
78
79         bdaddr = &conn->dst;
80         bdaddr_type = conn->dst_type;
81
82         /* Check if we need to convert to identity address */
83         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84         if (irk) {
85                 bdaddr = &irk->bdaddr;
86                 bdaddr_type = irk->addr_type;
87         }
88
89         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90                                            bdaddr_type);
91         if (!params)
92                 return;
93
94         if (params->conn) {
95                 hci_conn_drop(params->conn);
96                 hci_conn_put(params->conn);
97                 params->conn = NULL;
98         }
99
100         if (!params->explicit_connect)
101                 return;
102
103         /* If the status indicates successful cancellation of
104          * the attempt (i.e. Unknown Connection Id) there's no point of
105          * notifying failure since we'll go back to keep trying to
106          * connect. The only exception is explicit connect requests
107          * where a timeout + cancel does indicate an actual failure.
108          */
109         if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
111                                     conn->dst_type, status);
112
113         /* The connection attempt was doing scan for new RPA, and is
114          * in scan phase. If params are not associated with any other
115          * autoconnect action, remove them completely. If they are, just unmark
116          * them as waiting for connection, by clearing explicit_connect field.
117          */
118         params->explicit_connect = false;
119
120         hci_pend_le_list_del_init(params);
121
122         switch (params->auto_connect) {
123         case HCI_AUTO_CONN_EXPLICIT:
124                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125                 /* return instead of break to avoid duplicate scan update */
126                 return;
127         case HCI_AUTO_CONN_DIRECT:
128         case HCI_AUTO_CONN_ALWAYS:
129                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
130                 break;
131         case HCI_AUTO_CONN_REPORT:
132                 hci_pend_le_list_add(params, &hdev->pend_le_reports);
133                 break;
134         default:
135                 break;
136         }
137
138         hci_update_passive_scan(hdev);
139 }
140
141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143         struct hci_dev *hdev = conn->hdev;
144
145         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147
148         if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149                 hci_remove_link_key(hdev, &conn->dst);
150
151         hci_chan_list_flush(conn);
152
153         hci_conn_hash_del(hdev, conn);
154
155         if (HCI_CONN_HANDLE_UNSET(conn->handle))
156                 ida_free(&hdev->unset_handle_ida, conn->handle);
157
158         if (conn->cleanup)
159                 conn->cleanup(conn);
160
161         if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162                 switch (conn->setting & SCO_AIRMODE_MASK) {
163                 case SCO_AIRMODE_CVSD:
164                 case SCO_AIRMODE_TRANSP:
165                         if (hdev->notify)
166                                 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
167                         break;
168                 }
169         } else {
170                 if (hdev->notify)
171                         hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172         }
173
174         debugfs_remove_recursive(conn->debugfs);
175
176         hci_conn_del_sysfs(conn);
177
178         hci_dev_put(hdev);
179 }
180
181 int hci_disconnect(struct hci_conn *conn, __u8 reason)
182 {
183         BT_DBG("hcon %p", conn);
184
185         /* When we are central of an established connection and it enters
186          * the disconnect timeout, then go ahead and try to read the
187          * current clock offset.  Processing of the result is done
188          * within the event handling and hci_clock_offset_evt function.
189          */
190         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
191             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
192                 struct hci_dev *hdev = conn->hdev;
193                 struct hci_cp_read_clock_offset clkoff_cp;
194
195                 clkoff_cp.handle = cpu_to_le16(conn->handle);
196                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
197                              &clkoff_cp);
198         }
199
200         return hci_abort_conn(conn, reason);
201 }
202
203 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
204 {
205         struct hci_dev *hdev = conn->hdev;
206         struct hci_cp_add_sco cp;
207
208         BT_DBG("hcon %p", conn);
209
210         conn->state = BT_CONNECT;
211         conn->out = true;
212
213         conn->attempt++;
214
215         cp.handle   = cpu_to_le16(handle);
216         cp.pkt_type = cpu_to_le16(conn->pkt_type);
217
218         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
219 }
220
221 static bool find_next_esco_param(struct hci_conn *conn,
222                                  const struct sco_param *esco_param, int size)
223 {
224         if (!conn->parent)
225                 return false;
226
227         for (; conn->attempt <= size; conn->attempt++) {
228                 if (lmp_esco_2m_capable(conn->parent) ||
229                     (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
230                         break;
231                 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
232                        conn, conn->attempt);
233         }
234
235         return conn->attempt <= size;
236 }
237
238 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
239 {
240         int err;
241         __u8 vnd_len, *vnd_data = NULL;
242         struct hci_op_configure_data_path *cmd = NULL;
243
244         if (!codec->data_path || !hdev->get_codec_config_data)
245                 return 0;
246
247         /* Do not take me as error */
248         if (!hdev->get_codec_config_data)
249                 return 0;
250
251         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
252                                           &vnd_data);
253         if (err < 0)
254                 goto error;
255
256         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
257         if (!cmd) {
258                 err = -ENOMEM;
259                 goto error;
260         }
261
262         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
263         if (err < 0)
264                 goto error;
265
266         cmd->vnd_len = vnd_len;
267         memcpy(cmd->vnd_data, vnd_data, vnd_len);
268
269         cmd->direction = 0x00;
270         __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
271                               sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
272
273         cmd->direction = 0x01;
274         err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
275                                     sizeof(*cmd) + vnd_len, cmd,
276                                     HCI_CMD_TIMEOUT);
277 error:
278
279         kfree(cmd);
280         kfree(vnd_data);
281         return err;
282 }
283
284 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
285 {
286         struct conn_handle_t *conn_handle = data;
287         struct hci_conn *conn = conn_handle->conn;
288         __u16 handle = conn_handle->handle;
289         struct hci_cp_enhanced_setup_sync_conn cp;
290         const struct sco_param *param;
291
292         kfree(conn_handle);
293
294         bt_dev_dbg(hdev, "hcon %p", conn);
295
296         configure_datapath_sync(hdev, &conn->codec);
297
298         conn->state = BT_CONNECT;
299         conn->out = true;
300
301         conn->attempt++;
302
303         memset(&cp, 0x00, sizeof(cp));
304
305         cp.handle   = cpu_to_le16(handle);
306
307         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
308         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
309
310         switch (conn->codec.id) {
311         case BT_CODEC_MSBC:
312                 if (!find_next_esco_param(conn, esco_param_msbc,
313                                           ARRAY_SIZE(esco_param_msbc)))
314                         return -EINVAL;
315
316                 param = &esco_param_msbc[conn->attempt - 1];
317                 cp.tx_coding_format.id = 0x05;
318                 cp.rx_coding_format.id = 0x05;
319                 cp.tx_codec_frame_size = __cpu_to_le16(60);
320                 cp.rx_codec_frame_size = __cpu_to_le16(60);
321                 cp.in_bandwidth = __cpu_to_le32(32000);
322                 cp.out_bandwidth = __cpu_to_le32(32000);
323                 cp.in_coding_format.id = 0x04;
324                 cp.out_coding_format.id = 0x04;
325                 cp.in_coded_data_size = __cpu_to_le16(16);
326                 cp.out_coded_data_size = __cpu_to_le16(16);
327                 cp.in_pcm_data_format = 2;
328                 cp.out_pcm_data_format = 2;
329                 cp.in_pcm_sample_payload_msb_pos = 0;
330                 cp.out_pcm_sample_payload_msb_pos = 0;
331                 cp.in_data_path = conn->codec.data_path;
332                 cp.out_data_path = conn->codec.data_path;
333                 cp.in_transport_unit_size = 1;
334                 cp.out_transport_unit_size = 1;
335                 break;
336
337         case BT_CODEC_TRANSPARENT:
338                 if (!find_next_esco_param(conn, esco_param_msbc,
339                                           ARRAY_SIZE(esco_param_msbc)))
340                         return false;
341                 param = &esco_param_msbc[conn->attempt - 1];
342                 cp.tx_coding_format.id = 0x03;
343                 cp.rx_coding_format.id = 0x03;
344                 cp.tx_codec_frame_size = __cpu_to_le16(60);
345                 cp.rx_codec_frame_size = __cpu_to_le16(60);
346                 cp.in_bandwidth = __cpu_to_le32(0x1f40);
347                 cp.out_bandwidth = __cpu_to_le32(0x1f40);
348                 cp.in_coding_format.id = 0x03;
349                 cp.out_coding_format.id = 0x03;
350                 cp.in_coded_data_size = __cpu_to_le16(16);
351                 cp.out_coded_data_size = __cpu_to_le16(16);
352                 cp.in_pcm_data_format = 2;
353                 cp.out_pcm_data_format = 2;
354                 cp.in_pcm_sample_payload_msb_pos = 0;
355                 cp.out_pcm_sample_payload_msb_pos = 0;
356                 cp.in_data_path = conn->codec.data_path;
357                 cp.out_data_path = conn->codec.data_path;
358                 cp.in_transport_unit_size = 1;
359                 cp.out_transport_unit_size = 1;
360                 break;
361
362         case BT_CODEC_CVSD:
363                 if (conn->parent && lmp_esco_capable(conn->parent)) {
364                         if (!find_next_esco_param(conn, esco_param_cvsd,
365                                                   ARRAY_SIZE(esco_param_cvsd)))
366                                 return -EINVAL;
367                         param = &esco_param_cvsd[conn->attempt - 1];
368                 } else {
369                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
370                                 return -EINVAL;
371                         param = &sco_param_cvsd[conn->attempt - 1];
372                 }
373                 cp.tx_coding_format.id = 2;
374                 cp.rx_coding_format.id = 2;
375                 cp.tx_codec_frame_size = __cpu_to_le16(60);
376                 cp.rx_codec_frame_size = __cpu_to_le16(60);
377                 cp.in_bandwidth = __cpu_to_le32(16000);
378                 cp.out_bandwidth = __cpu_to_le32(16000);
379                 cp.in_coding_format.id = 4;
380                 cp.out_coding_format.id = 4;
381                 cp.in_coded_data_size = __cpu_to_le16(16);
382                 cp.out_coded_data_size = __cpu_to_le16(16);
383                 cp.in_pcm_data_format = 2;
384                 cp.out_pcm_data_format = 2;
385                 cp.in_pcm_sample_payload_msb_pos = 0;
386                 cp.out_pcm_sample_payload_msb_pos = 0;
387                 cp.in_data_path = conn->codec.data_path;
388                 cp.out_data_path = conn->codec.data_path;
389                 cp.in_transport_unit_size = 16;
390                 cp.out_transport_unit_size = 16;
391                 break;
392         default:
393                 return -EINVAL;
394         }
395
396         cp.retrans_effort = param->retrans_effort;
397         cp.pkt_type = __cpu_to_le16(param->pkt_type);
398         cp.max_latency = __cpu_to_le16(param->max_latency);
399
400         if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
401                 return -EIO;
402
403         return 0;
404 }
405
406 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
407 {
408         struct hci_dev *hdev = conn->hdev;
409         struct hci_cp_setup_sync_conn cp;
410         const struct sco_param *param;
411
412         bt_dev_dbg(hdev, "hcon %p", conn);
413
414         conn->state = BT_CONNECT;
415         conn->out = true;
416
417         conn->attempt++;
418
419         cp.handle   = cpu_to_le16(handle);
420
421         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
422         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
423         cp.voice_setting  = cpu_to_le16(conn->setting);
424
425         switch (conn->setting & SCO_AIRMODE_MASK) {
426         case SCO_AIRMODE_TRANSP:
427                 if (!find_next_esco_param(conn, esco_param_msbc,
428                                           ARRAY_SIZE(esco_param_msbc)))
429                         return false;
430                 param = &esco_param_msbc[conn->attempt - 1];
431                 break;
432         case SCO_AIRMODE_CVSD:
433                 if (conn->parent && lmp_esco_capable(conn->parent)) {
434                         if (!find_next_esco_param(conn, esco_param_cvsd,
435                                                   ARRAY_SIZE(esco_param_cvsd)))
436                                 return false;
437                         param = &esco_param_cvsd[conn->attempt - 1];
438                 } else {
439                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
440                                 return false;
441                         param = &sco_param_cvsd[conn->attempt - 1];
442                 }
443                 break;
444         default:
445                 return false;
446         }
447
448         cp.retrans_effort = param->retrans_effort;
449         cp.pkt_type = __cpu_to_le16(param->pkt_type);
450         cp.max_latency = __cpu_to_le16(param->max_latency);
451
452         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
453                 return false;
454
455         return true;
456 }
457
458 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
459 {
460         int result;
461         struct conn_handle_t *conn_handle;
462
463         if (enhanced_sync_conn_capable(conn->hdev)) {
464                 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
465
466                 if (!conn_handle)
467                         return false;
468
469                 conn_handle->conn = conn;
470                 conn_handle->handle = handle;
471                 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
472                                             conn_handle, NULL);
473                 if (result < 0)
474                         kfree(conn_handle);
475
476                 return result == 0;
477         }
478
479         return hci_setup_sync_conn(conn, handle);
480 }
481
482 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
483                       u16 to_multiplier)
484 {
485         struct hci_dev *hdev = conn->hdev;
486         struct hci_conn_params *params;
487         struct hci_cp_le_conn_update cp;
488
489         hci_dev_lock(hdev);
490
491         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
492         if (params) {
493                 params->conn_min_interval = min;
494                 params->conn_max_interval = max;
495                 params->conn_latency = latency;
496                 params->supervision_timeout = to_multiplier;
497         }
498
499         hci_dev_unlock(hdev);
500
501         memset(&cp, 0, sizeof(cp));
502         cp.handle               = cpu_to_le16(conn->handle);
503         cp.conn_interval_min    = cpu_to_le16(min);
504         cp.conn_interval_max    = cpu_to_le16(max);
505         cp.conn_latency         = cpu_to_le16(latency);
506         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
507         cp.min_ce_len           = cpu_to_le16(0x0000);
508         cp.max_ce_len           = cpu_to_le16(0x0000);
509
510         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
511
512         if (params)
513                 return 0x01;
514
515         return 0x00;
516 }
517
518 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
519                       __u8 ltk[16], __u8 key_size)
520 {
521         struct hci_dev *hdev = conn->hdev;
522         struct hci_cp_le_start_enc cp;
523
524         BT_DBG("hcon %p", conn);
525
526         memset(&cp, 0, sizeof(cp));
527
528         cp.handle = cpu_to_le16(conn->handle);
529         cp.rand = rand;
530         cp.ediv = ediv;
531         memcpy(cp.ltk, ltk, key_size);
532
533         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
534 }
535
536 /* Device _must_ be locked */
537 void hci_sco_setup(struct hci_conn *conn, __u8 status)
538 {
539         struct hci_link *link;
540
541         link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
542         if (!link || !link->conn)
543                 return;
544
545         BT_DBG("hcon %p", conn);
546
547         if (!status) {
548                 if (lmp_esco_capable(conn->hdev))
549                         hci_setup_sync(link->conn, conn->handle);
550                 else
551                         hci_add_sco(link->conn, conn->handle);
552         } else {
553                 hci_connect_cfm(link->conn, status);
554                 hci_conn_del(link->conn);
555         }
556 }
557
558 static void hci_conn_timeout(struct work_struct *work)
559 {
560         struct hci_conn *conn = container_of(work, struct hci_conn,
561                                              disc_work.work);
562         int refcnt = atomic_read(&conn->refcnt);
563
564         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
565
566         WARN_ON(refcnt < 0);
567
568         /* FIXME: It was observed that in pairing failed scenario, refcnt
569          * drops below 0. Probably this is because l2cap_conn_del calls
570          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
571          * dropped. After that loop hci_chan_del is called which also drops
572          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
573          * otherwise drop it.
574          */
575         if (refcnt > 0)
576                 return;
577
578         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
579 }
580
581 /* Enter sniff mode */
582 static void hci_conn_idle(struct work_struct *work)
583 {
584         struct hci_conn *conn = container_of(work, struct hci_conn,
585                                              idle_work.work);
586         struct hci_dev *hdev = conn->hdev;
587
588         BT_DBG("hcon %p mode %d", conn, conn->mode);
589
590         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
591                 return;
592
593         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
594                 return;
595
596         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
597                 struct hci_cp_sniff_subrate cp;
598                 cp.handle             = cpu_to_le16(conn->handle);
599                 cp.max_latency        = cpu_to_le16(0);
600                 cp.min_remote_timeout = cpu_to_le16(0);
601                 cp.min_local_timeout  = cpu_to_le16(0);
602                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
603         }
604
605         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
606                 struct hci_cp_sniff_mode cp;
607                 cp.handle       = cpu_to_le16(conn->handle);
608                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
609                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
610                 cp.attempt      = cpu_to_le16(4);
611                 cp.timeout      = cpu_to_le16(1);
612                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
613         }
614 }
615
616 static void hci_conn_auto_accept(struct work_struct *work)
617 {
618         struct hci_conn *conn = container_of(work, struct hci_conn,
619                                              auto_accept_work.work);
620
621         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
622                      &conn->dst);
623 }
624
625 static void le_disable_advertising(struct hci_dev *hdev)
626 {
627         if (ext_adv_capable(hdev)) {
628                 struct hci_cp_le_set_ext_adv_enable cp;
629
630                 cp.enable = 0x00;
631                 cp.num_of_sets = 0x00;
632
633                 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
634                              &cp);
635         } else {
636                 u8 enable = 0x00;
637                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
638                              &enable);
639         }
640 }
641
642 static void le_conn_timeout(struct work_struct *work)
643 {
644         struct hci_conn *conn = container_of(work, struct hci_conn,
645                                              le_conn_timeout.work);
646         struct hci_dev *hdev = conn->hdev;
647
648         BT_DBG("");
649
650         /* We could end up here due to having done directed advertising,
651          * so clean up the state if necessary. This should however only
652          * happen with broken hardware or if low duty cycle was used
653          * (which doesn't have a timeout of its own).
654          */
655         if (conn->role == HCI_ROLE_SLAVE) {
656                 /* Disable LE Advertising */
657                 le_disable_advertising(hdev);
658                 hci_dev_lock(hdev);
659                 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
660                 hci_dev_unlock(hdev);
661                 return;
662         }
663
664         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
665 }
666
667 struct iso_cig_params {
668         struct hci_cp_le_set_cig_params cp;
669         struct hci_cis_params cis[0x1f];
670 };
671
672 struct iso_list_data {
673         union {
674                 u8  cig;
675                 u8  big;
676         };
677         union {
678                 u8  cis;
679                 u8  bis;
680                 u16 sync_handle;
681         };
682         int count;
683         bool big_term;
684         bool pa_sync_term;
685         bool big_sync_term;
686 };
687
688 static void bis_list(struct hci_conn *conn, void *data)
689 {
690         struct iso_list_data *d = data;
691
692         /* Skip if not broadcast/ANY address */
693         if (bacmp(&conn->dst, BDADDR_ANY))
694                 return;
695
696         if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
697             d->bis != conn->iso_qos.bcast.bis)
698                 return;
699
700         d->count++;
701 }
702
703 static int terminate_big_sync(struct hci_dev *hdev, void *data)
704 {
705         struct iso_list_data *d = data;
706
707         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
708
709         hci_disable_per_advertising_sync(hdev, d->bis);
710         hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
711
712         /* Only terminate BIG if it has been created */
713         if (!d->big_term)
714                 return 0;
715
716         return hci_le_terminate_big_sync(hdev, d->big,
717                                          HCI_ERROR_LOCAL_HOST_TERM);
718 }
719
720 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
721 {
722         kfree(data);
723 }
724
725 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
726 {
727         struct iso_list_data *d;
728         int ret;
729
730         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
731                    conn->iso_qos.bcast.bis);
732
733         d = kzalloc(sizeof(*d), GFP_KERNEL);
734         if (!d)
735                 return -ENOMEM;
736
737         d->big = conn->iso_qos.bcast.big;
738         d->bis = conn->iso_qos.bcast.bis;
739         d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
740
741         ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
742                                  terminate_big_destroy);
743         if (ret)
744                 kfree(d);
745
746         return ret;
747 }
748
749 static int big_terminate_sync(struct hci_dev *hdev, void *data)
750 {
751         struct iso_list_data *d = data;
752
753         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
754                    d->sync_handle);
755
756         if (d->big_sync_term)
757                 hci_le_big_terminate_sync(hdev, d->big);
758
759         if (d->pa_sync_term)
760                 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
761
762         return 0;
763 }
764
765 static void find_bis(struct hci_conn *conn, void *data)
766 {
767         struct iso_list_data *d = data;
768
769         /* Ignore if BIG doesn't match */
770         if (d->big != conn->iso_qos.bcast.big)
771                 return;
772
773         d->count++;
774 }
775
776 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
777 {
778         struct iso_list_data *d;
779         int ret;
780
781         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
782
783         d = kzalloc(sizeof(*d), GFP_KERNEL);
784         if (!d)
785                 return -ENOMEM;
786
787         memset(d, 0, sizeof(*d));
788         d->big = big;
789         d->sync_handle = conn->sync_handle;
790
791         if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
792                 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
793                                         HCI_CONN_PA_SYNC, d);
794
795                 if (!d->count)
796                         d->pa_sync_term = true;
797
798                 d->count = 0;
799         }
800
801         if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
802                 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
803                                         HCI_CONN_BIG_SYNC, d);
804
805                 if (!d->count)
806                         d->big_sync_term = true;
807         }
808
809         ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
810                                  terminate_big_destroy);
811         if (ret)
812                 kfree(d);
813
814         return ret;
815 }
816
817 /* Cleanup BIS connection
818  *
819  * Detects if there any BIS left connected in a BIG
820  * broadcaster: Remove advertising instance and terminate BIG.
821  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
822  */
823 static void bis_cleanup(struct hci_conn *conn)
824 {
825         struct hci_dev *hdev = conn->hdev;
826         struct hci_conn *bis;
827
828         bt_dev_dbg(hdev, "conn %p", conn);
829
830         if (conn->role == HCI_ROLE_MASTER) {
831                 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
832                         return;
833
834                 /* Check if ISO connection is a BIS and terminate advertising
835                  * set and BIG if there are no other connections using it.
836                  */
837                 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
838                 if (bis)
839                         return;
840
841                 hci_le_terminate_big(hdev, conn);
842         } else {
843                 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
844                                      conn);
845         }
846 }
847
848 static int remove_cig_sync(struct hci_dev *hdev, void *data)
849 {
850         u8 handle = PTR_UINT(data);
851
852         return hci_le_remove_cig_sync(hdev, handle);
853 }
854
855 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
856 {
857         bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
858
859         return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
860                                   NULL);
861 }
862
863 static void find_cis(struct hci_conn *conn, void *data)
864 {
865         struct iso_list_data *d = data;
866
867         /* Ignore broadcast or if CIG don't match */
868         if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
869                 return;
870
871         d->count++;
872 }
873
874 /* Cleanup CIS connection:
875  *
876  * Detects if there any CIS left connected in a CIG and remove it.
877  */
878 static void cis_cleanup(struct hci_conn *conn)
879 {
880         struct hci_dev *hdev = conn->hdev;
881         struct iso_list_data d;
882
883         if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
884                 return;
885
886         memset(&d, 0, sizeof(d));
887         d.cig = conn->iso_qos.ucast.cig;
888
889         /* Check if ISO connection is a CIS and remove CIG if there are
890          * no other connections using it.
891          */
892         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
893         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
894         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
895         if (d.count)
896                 return;
897
898         hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
899 }
900
901 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
902 {
903         return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
904                                U16_MAX, GFP_ATOMIC);
905 }
906
907 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
908                               u8 role, u16 handle)
909 {
910         struct hci_conn *conn;
911
912         bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
913
914         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
915         if (!conn)
916                 return NULL;
917
918         bacpy(&conn->dst, dst);
919         bacpy(&conn->src, &hdev->bdaddr);
920         conn->handle = handle;
921         conn->hdev  = hdev;
922         conn->type  = type;
923         conn->role  = role;
924         conn->mode  = HCI_CM_ACTIVE;
925         conn->state = BT_OPEN;
926         conn->auth_type = HCI_AT_GENERAL_BONDING;
927         conn->io_capability = hdev->io_capability;
928         conn->remote_auth = 0xff;
929         conn->key_type = 0xff;
930         conn->rssi = HCI_RSSI_INVALID;
931         conn->tx_power = HCI_TX_POWER_INVALID;
932         conn->max_tx_power = HCI_TX_POWER_INVALID;
933         conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
934
935         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
936         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
937
938         /* Set Default Authenticated payload timeout to 30s */
939         conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
940
941         if (conn->role == HCI_ROLE_MASTER)
942                 conn->out = true;
943
944         switch (type) {
945         case ACL_LINK:
946                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
947                 break;
948         case LE_LINK:
949                 /* conn->src should reflect the local identity address */
950                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
951                 break;
952         case ISO_LINK:
953                 /* conn->src should reflect the local identity address */
954                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
955
956                 /* set proper cleanup function */
957                 if (!bacmp(dst, BDADDR_ANY))
958                         conn->cleanup = bis_cleanup;
959                 else if (conn->role == HCI_ROLE_MASTER)
960                         conn->cleanup = cis_cleanup;
961
962                 break;
963         case SCO_LINK:
964                 if (lmp_esco_capable(hdev))
965                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
966                                         (hdev->esco_type & EDR_ESCO_MASK);
967                 else
968                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
969                 break;
970         case ESCO_LINK:
971                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
972                 break;
973         }
974
975         skb_queue_head_init(&conn->data_q);
976
977         INIT_LIST_HEAD(&conn->chan_list);
978         INIT_LIST_HEAD(&conn->link_list);
979
980         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
981         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
982         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
983         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
984
985         atomic_set(&conn->refcnt, 0);
986
987         hci_dev_hold(hdev);
988
989         hci_conn_hash_add(hdev, conn);
990
991         /* The SCO and eSCO connections will only be notified when their
992          * setup has been completed. This is different to ACL links which
993          * can be notified right away.
994          */
995         if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
996                 if (hdev->notify)
997                         hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
998         }
999
1000         hci_conn_init_sysfs(conn);
1001
1002         return conn;
1003 }
1004
1005 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1006                                     bdaddr_t *dst, u8 role)
1007 {
1008         int handle;
1009
1010         bt_dev_dbg(hdev, "dst %pMR", dst);
1011
1012         handle = hci_conn_hash_alloc_unset(hdev);
1013         if (unlikely(handle < 0))
1014                 return NULL;
1015
1016         return hci_conn_add(hdev, type, dst, role, handle);
1017 }
1018
1019 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1020 {
1021         if (!reason)
1022                 reason = HCI_ERROR_REMOTE_USER_TERM;
1023
1024         /* Due to race, SCO/ISO conn might be not established yet at this point,
1025          * and nothing else will clean it up. In other cases it is done via HCI
1026          * events.
1027          */
1028         switch (conn->type) {
1029         case SCO_LINK:
1030         case ESCO_LINK:
1031                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
1032                         hci_conn_failed(conn, reason);
1033                 break;
1034         case ISO_LINK:
1035                 if ((conn->state != BT_CONNECTED &&
1036                     !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1037                     test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1038                         hci_conn_failed(conn, reason);
1039                 break;
1040         }
1041 }
1042
1043 static void hci_conn_unlink(struct hci_conn *conn)
1044 {
1045         struct hci_dev *hdev = conn->hdev;
1046
1047         bt_dev_dbg(hdev, "hcon %p", conn);
1048
1049         if (!conn->parent) {
1050                 struct hci_link *link, *t;
1051
1052                 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1053                         struct hci_conn *child = link->conn;
1054
1055                         hci_conn_unlink(child);
1056
1057                         /* If hdev is down it means
1058                          * hci_dev_close_sync/hci_conn_hash_flush is in progress
1059                          * and links don't need to be cleanup as all connections
1060                          * would be cleanup.
1061                          */
1062                         if (!test_bit(HCI_UP, &hdev->flags))
1063                                 continue;
1064
1065                         hci_conn_cleanup_child(child, conn->abort_reason);
1066                 }
1067
1068                 return;
1069         }
1070
1071         if (!conn->link)
1072                 return;
1073
1074         list_del_rcu(&conn->link->list);
1075         synchronize_rcu();
1076
1077         hci_conn_drop(conn->parent);
1078         hci_conn_put(conn->parent);
1079         conn->parent = NULL;
1080
1081         kfree(conn->link);
1082         conn->link = NULL;
1083 }
1084
1085 void hci_conn_del(struct hci_conn *conn)
1086 {
1087         struct hci_dev *hdev = conn->hdev;
1088
1089         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1090
1091         hci_conn_unlink(conn);
1092
1093         cancel_delayed_work_sync(&conn->disc_work);
1094         cancel_delayed_work_sync(&conn->auto_accept_work);
1095         cancel_delayed_work_sync(&conn->idle_work);
1096
1097         if (conn->type == ACL_LINK) {
1098                 /* Unacked frames */
1099                 hdev->acl_cnt += conn->sent;
1100         } else if (conn->type == LE_LINK) {
1101                 cancel_delayed_work(&conn->le_conn_timeout);
1102
1103                 if (hdev->le_pkts)
1104                         hdev->le_cnt += conn->sent;
1105                 else
1106                         hdev->acl_cnt += conn->sent;
1107         } else {
1108                 /* Unacked ISO frames */
1109                 if (conn->type == ISO_LINK) {
1110                         if (hdev->iso_pkts)
1111                                 hdev->iso_cnt += conn->sent;
1112                         else if (hdev->le_pkts)
1113                                 hdev->le_cnt += conn->sent;
1114                         else
1115                                 hdev->acl_cnt += conn->sent;
1116                 }
1117         }
1118
1119         skb_queue_purge(&conn->data_q);
1120
1121         /* Remove the connection from the list and cleanup its remaining
1122          * state. This is a separate function since for some cases like
1123          * BT_CONNECT_SCAN we *only* want the cleanup part without the
1124          * rest of hci_conn_del.
1125          */
1126         hci_conn_cleanup(conn);
1127
1128         /* Dequeue callbacks using connection pointer as data */
1129         hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1130 }
1131
1132 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1133 {
1134         int use_src = bacmp(src, BDADDR_ANY);
1135         struct hci_dev *hdev = NULL, *d;
1136
1137         BT_DBG("%pMR -> %pMR", src, dst);
1138
1139         read_lock(&hci_dev_list_lock);
1140
1141         list_for_each_entry(d, &hci_dev_list, list) {
1142                 if (!test_bit(HCI_UP, &d->flags) ||
1143                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1144                     d->dev_type != HCI_PRIMARY)
1145                         continue;
1146
1147                 /* Simple routing:
1148                  *   No source address - find interface with bdaddr != dst
1149                  *   Source address    - find interface with bdaddr == src
1150                  */
1151
1152                 if (use_src) {
1153                         bdaddr_t id_addr;
1154                         u8 id_addr_type;
1155
1156                         if (src_type == BDADDR_BREDR) {
1157                                 if (!lmp_bredr_capable(d))
1158                                         continue;
1159                                 bacpy(&id_addr, &d->bdaddr);
1160                                 id_addr_type = BDADDR_BREDR;
1161                         } else {
1162                                 if (!lmp_le_capable(d))
1163                                         continue;
1164
1165                                 hci_copy_identity_address(d, &id_addr,
1166                                                           &id_addr_type);
1167
1168                                 /* Convert from HCI to three-value type */
1169                                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1170                                         id_addr_type = BDADDR_LE_PUBLIC;
1171                                 else
1172                                         id_addr_type = BDADDR_LE_RANDOM;
1173                         }
1174
1175                         if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1176                                 hdev = d; break;
1177                         }
1178                 } else {
1179                         if (bacmp(&d->bdaddr, dst)) {
1180                                 hdev = d; break;
1181                         }
1182                 }
1183         }
1184
1185         if (hdev)
1186                 hdev = hci_dev_hold(hdev);
1187
1188         read_unlock(&hci_dev_list_lock);
1189         return hdev;
1190 }
1191 EXPORT_SYMBOL(hci_get_route);
1192
1193 /* This function requires the caller holds hdev->lock */
1194 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1195 {
1196         struct hci_dev *hdev = conn->hdev;
1197
1198         hci_connect_le_scan_cleanup(conn, status);
1199
1200         /* Enable advertising in case this was a failed connection
1201          * attempt as a peripheral.
1202          */
1203         hci_enable_advertising(hdev);
1204 }
1205
1206 /* This function requires the caller holds hdev->lock */
1207 void hci_conn_failed(struct hci_conn *conn, u8 status)
1208 {
1209         struct hci_dev *hdev = conn->hdev;
1210
1211         bt_dev_dbg(hdev, "status 0x%2.2x", status);
1212
1213         switch (conn->type) {
1214         case LE_LINK:
1215                 hci_le_conn_failed(conn, status);
1216                 break;
1217         case ACL_LINK:
1218                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1219                                     conn->dst_type, status);
1220                 break;
1221         }
1222
1223         /* In case of BIG/PA sync failed, clear conn flags so that
1224          * the conns will be correctly cleaned up by ISO layer
1225          */
1226         test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1227         test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1228
1229         conn->state = BT_CLOSED;
1230         hci_connect_cfm(conn, status);
1231         hci_conn_del(conn);
1232 }
1233
1234 /* This function requires the caller holds hdev->lock */
1235 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1236 {
1237         struct hci_dev *hdev = conn->hdev;
1238
1239         bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1240
1241         if (conn->handle == handle)
1242                 return 0;
1243
1244         if (handle > HCI_CONN_HANDLE_MAX) {
1245                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1246                            handle, HCI_CONN_HANDLE_MAX);
1247                 return HCI_ERROR_INVALID_PARAMETERS;
1248         }
1249
1250         /* If abort_reason has been sent it means the connection is being
1251          * aborted and the handle shall not be changed.
1252          */
1253         if (conn->abort_reason)
1254                 return conn->abort_reason;
1255
1256         if (HCI_CONN_HANDLE_UNSET(conn->handle))
1257                 ida_free(&hdev->unset_handle_ida, conn->handle);
1258
1259         conn->handle = handle;
1260
1261         return 0;
1262 }
1263
1264 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1265                                 u8 dst_type, bool dst_resolved, u8 sec_level,
1266                                 u16 conn_timeout, u8 role)
1267 {
1268         struct hci_conn *conn;
1269         struct smp_irk *irk;
1270         int err;
1271
1272         /* Let's make sure that le is enabled.*/
1273         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1274                 if (lmp_le_capable(hdev))
1275                         return ERR_PTR(-ECONNREFUSED);
1276
1277                 return ERR_PTR(-EOPNOTSUPP);
1278         }
1279
1280         /* Since the controller supports only one LE connection attempt at a
1281          * time, we return -EBUSY if there is any connection attempt running.
1282          */
1283         if (hci_lookup_le_connect(hdev))
1284                 return ERR_PTR(-EBUSY);
1285
1286         /* If there's already a connection object but it's not in
1287          * scanning state it means it must already be established, in
1288          * which case we can't do anything else except report a failure
1289          * to connect.
1290          */
1291         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1292         if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1293                 return ERR_PTR(-EBUSY);
1294         }
1295
1296         /* Check if the destination address has been resolved by the controller
1297          * since if it did then the identity address shall be used.
1298          */
1299         if (!dst_resolved) {
1300                 /* When given an identity address with existing identity
1301                  * resolving key, the connection needs to be established
1302                  * to a resolvable random address.
1303                  *
1304                  * Storing the resolvable random address is required here
1305                  * to handle connection failures. The address will later
1306                  * be resolved back into the original identity address
1307                  * from the connect request.
1308                  */
1309                 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1310                 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1311                         dst = &irk->rpa;
1312                         dst_type = ADDR_LE_DEV_RANDOM;
1313                 }
1314         }
1315
1316         if (conn) {
1317                 bacpy(&conn->dst, dst);
1318         } else {
1319                 conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1320                 if (!conn)
1321                         return ERR_PTR(-ENOMEM);
1322                 hci_conn_hold(conn);
1323                 conn->pending_sec_level = sec_level;
1324         }
1325
1326         conn->dst_type = dst_type;
1327         conn->sec_level = BT_SECURITY_LOW;
1328         conn->conn_timeout = conn_timeout;
1329
1330         err = hci_connect_le_sync(hdev, conn);
1331         if (err) {
1332                 hci_conn_del(conn);
1333                 return ERR_PTR(err);
1334         }
1335
1336         return conn;
1337 }
1338
1339 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1340 {
1341         struct hci_conn *conn;
1342
1343         conn = hci_conn_hash_lookup_le(hdev, addr, type);
1344         if (!conn)
1345                 return false;
1346
1347         if (conn->state != BT_CONNECTED)
1348                 return false;
1349
1350         return true;
1351 }
1352
1353 /* This function requires the caller holds hdev->lock */
1354 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1355                                         bdaddr_t *addr, u8 addr_type)
1356 {
1357         struct hci_conn_params *params;
1358
1359         if (is_connected(hdev, addr, addr_type))
1360                 return -EISCONN;
1361
1362         params = hci_conn_params_lookup(hdev, addr, addr_type);
1363         if (!params) {
1364                 params = hci_conn_params_add(hdev, addr, addr_type);
1365                 if (!params)
1366                         return -ENOMEM;
1367
1368                 /* If we created new params, mark them to be deleted in
1369                  * hci_connect_le_scan_cleanup. It's different case than
1370                  * existing disabled params, those will stay after cleanup.
1371                  */
1372                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1373         }
1374
1375         /* We're trying to connect, so make sure params are at pend_le_conns */
1376         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1377             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1378             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1379                 hci_pend_le_list_del_init(params);
1380                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1381         }
1382
1383         params->explicit_connect = true;
1384
1385         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1386                params->auto_connect);
1387
1388         return 0;
1389 }
1390
1391 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1392 {
1393         struct hci_conn *conn;
1394         u8  big;
1395
1396         /* Allocate a BIG if not set */
1397         if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1398                 for (big = 0x00; big < 0xef; big++) {
1399
1400                         conn = hci_conn_hash_lookup_big(hdev, big);
1401                         if (!conn)
1402                                 break;
1403                 }
1404
1405                 if (big == 0xef)
1406                         return -EADDRNOTAVAIL;
1407
1408                 /* Update BIG */
1409                 qos->bcast.big = big;
1410         }
1411
1412         return 0;
1413 }
1414
1415 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1416 {
1417         struct hci_conn *conn;
1418         u8  bis;
1419
1420         /* Allocate BIS if not set */
1421         if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1422                 if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1423                         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1424
1425                         if (conn) {
1426                                 /* If the BIG handle is already matched to an advertising
1427                                  * handle, do not allocate a new one.
1428                                  */
1429                                 qos->bcast.bis = conn->iso_qos.bcast.bis;
1430                                 return 0;
1431                         }
1432                 }
1433
1434                 /* Find an unused adv set to advertise BIS, skip instance 0x00
1435                  * since it is reserved as general purpose set.
1436                  */
1437                 for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1438                      bis++) {
1439
1440                         conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1441                         if (!conn)
1442                                 break;
1443                 }
1444
1445                 if (bis == hdev->le_num_of_adv_sets)
1446                         return -EADDRNOTAVAIL;
1447
1448                 /* Update BIS */
1449                 qos->bcast.bis = bis;
1450         }
1451
1452         return 0;
1453 }
1454
1455 /* This function requires the caller holds hdev->lock */
1456 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1457                                     struct bt_iso_qos *qos, __u8 base_len,
1458                                     __u8 *base)
1459 {
1460         struct hci_conn *conn;
1461         int err;
1462
1463         /* Let's make sure that le is enabled.*/
1464         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1465                 if (lmp_le_capable(hdev))
1466                         return ERR_PTR(-ECONNREFUSED);
1467                 return ERR_PTR(-EOPNOTSUPP);
1468         }
1469
1470         err = qos_set_big(hdev, qos);
1471         if (err)
1472                 return ERR_PTR(err);
1473
1474         err = qos_set_bis(hdev, qos);
1475         if (err)
1476                 return ERR_PTR(err);
1477
1478         /* Check if the LE Create BIG command has already been sent */
1479         conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1480                                                 qos->bcast.big);
1481         if (conn)
1482                 return ERR_PTR(-EADDRINUSE);
1483
1484         /* Check BIS settings against other bound BISes, since all
1485          * BISes in a BIG must have the same value for all parameters
1486          */
1487         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1488
1489         if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1490                      base_len != conn->le_per_adv_data_len ||
1491                      memcmp(conn->le_per_adv_data, base, base_len)))
1492                 return ERR_PTR(-EADDRINUSE);
1493
1494         conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1495         if (!conn)
1496                 return ERR_PTR(-ENOMEM);
1497
1498         conn->state = BT_CONNECT;
1499
1500         hci_conn_hold(conn);
1501         return conn;
1502 }
1503
1504 /* This function requires the caller holds hdev->lock */
1505 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1506                                      u8 dst_type, u8 sec_level,
1507                                      u16 conn_timeout,
1508                                      enum conn_reasons conn_reason)
1509 {
1510         struct hci_conn *conn;
1511
1512         /* Let's make sure that le is enabled.*/
1513         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1514                 if (lmp_le_capable(hdev))
1515                         return ERR_PTR(-ECONNREFUSED);
1516
1517                 return ERR_PTR(-EOPNOTSUPP);
1518         }
1519
1520         /* Some devices send ATT messages as soon as the physical link is
1521          * established. To be able to handle these ATT messages, the user-
1522          * space first establishes the connection and then starts the pairing
1523          * process.
1524          *
1525          * So if a hci_conn object already exists for the following connection
1526          * attempt, we simply update pending_sec_level and auth_type fields
1527          * and return the object found.
1528          */
1529         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1530         if (conn) {
1531                 if (conn->pending_sec_level < sec_level)
1532                         conn->pending_sec_level = sec_level;
1533                 goto done;
1534         }
1535
1536         BT_DBG("requesting refresh of dst_addr");
1537
1538         conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1539         if (!conn)
1540                 return ERR_PTR(-ENOMEM);
1541
1542         if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1543                 hci_conn_del(conn);
1544                 return ERR_PTR(-EBUSY);
1545         }
1546
1547         conn->state = BT_CONNECT;
1548         set_bit(HCI_CONN_SCANNING, &conn->flags);
1549         conn->dst_type = dst_type;
1550         conn->sec_level = BT_SECURITY_LOW;
1551         conn->pending_sec_level = sec_level;
1552         conn->conn_timeout = conn_timeout;
1553         conn->conn_reason = conn_reason;
1554
1555         hci_update_passive_scan(hdev);
1556
1557 done:
1558         hci_conn_hold(conn);
1559         return conn;
1560 }
1561
1562 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1563                                  u8 sec_level, u8 auth_type,
1564                                  enum conn_reasons conn_reason, u16 timeout)
1565 {
1566         struct hci_conn *acl;
1567
1568         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1569                 if (lmp_bredr_capable(hdev))
1570                         return ERR_PTR(-ECONNREFUSED);
1571
1572                 return ERR_PTR(-EOPNOTSUPP);
1573         }
1574
1575         /* Reject outgoing connection to device with same BD ADDR against
1576          * CVE-2020-26555
1577          */
1578         if (!bacmp(&hdev->bdaddr, dst)) {
1579                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1580                            dst);
1581                 return ERR_PTR(-ECONNREFUSED);
1582         }
1583
1584         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1585         if (!acl) {
1586                 acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1587                 if (!acl)
1588                         return ERR_PTR(-ENOMEM);
1589         }
1590
1591         hci_conn_hold(acl);
1592
1593         acl->conn_reason = conn_reason;
1594         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1595                 int err;
1596
1597                 acl->sec_level = BT_SECURITY_LOW;
1598                 acl->pending_sec_level = sec_level;
1599                 acl->auth_type = auth_type;
1600                 acl->conn_timeout = timeout;
1601
1602                 err = hci_connect_acl_sync(hdev, acl);
1603                 if (err) {
1604                         hci_conn_del(acl);
1605                         return ERR_PTR(err);
1606                 }
1607         }
1608
1609         return acl;
1610 }
1611
1612 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1613                                       struct hci_conn *conn)
1614 {
1615         struct hci_dev *hdev = parent->hdev;
1616         struct hci_link *link;
1617
1618         bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1619
1620         if (conn->link)
1621                 return conn->link;
1622
1623         if (conn->parent)
1624                 return NULL;
1625
1626         link = kzalloc(sizeof(*link), GFP_KERNEL);
1627         if (!link)
1628                 return NULL;
1629
1630         link->conn = hci_conn_hold(conn);
1631         conn->link = link;
1632         conn->parent = hci_conn_get(parent);
1633
1634         /* Use list_add_tail_rcu append to the list */
1635         list_add_tail_rcu(&link->list, &parent->link_list);
1636
1637         return link;
1638 }
1639
1640 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1641                                  __u16 setting, struct bt_codec *codec,
1642                                  u16 timeout)
1643 {
1644         struct hci_conn *acl;
1645         struct hci_conn *sco;
1646         struct hci_link *link;
1647
1648         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1649                               CONN_REASON_SCO_CONNECT, timeout);
1650         if (IS_ERR(acl))
1651                 return acl;
1652
1653         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1654         if (!sco) {
1655                 sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1656                 if (!sco) {
1657                         hci_conn_drop(acl);
1658                         return ERR_PTR(-ENOMEM);
1659                 }
1660         }
1661
1662         link = hci_conn_link(acl, sco);
1663         if (!link) {
1664                 hci_conn_drop(acl);
1665                 hci_conn_drop(sco);
1666                 return ERR_PTR(-ENOLINK);
1667         }
1668
1669         sco->setting = setting;
1670         sco->codec = *codec;
1671
1672         if (acl->state == BT_CONNECTED &&
1673             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1674                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1675                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1676
1677                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1678                         /* defer SCO setup until mode change completed */
1679                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1680                         return sco;
1681                 }
1682
1683                 hci_sco_setup(acl, 0x00);
1684         }
1685
1686         return sco;
1687 }
1688
1689 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1690 {
1691         struct hci_dev *hdev = conn->hdev;
1692         struct hci_cp_le_create_big cp;
1693         struct iso_list_data data;
1694
1695         memset(&cp, 0, sizeof(cp));
1696
1697         data.big = qos->bcast.big;
1698         data.bis = qos->bcast.bis;
1699         data.count = 0;
1700
1701         /* Create a BIS for each bound connection */
1702         hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1703                                  BT_BOUND, &data);
1704
1705         cp.handle = qos->bcast.big;
1706         cp.adv_handle = qos->bcast.bis;
1707         cp.num_bis  = data.count;
1708         hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1709         cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1710         cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1711         cp.bis.rtn  = qos->bcast.out.rtn;
1712         cp.bis.phy  = qos->bcast.out.phy;
1713         cp.bis.packing = qos->bcast.packing;
1714         cp.bis.framing = qos->bcast.framing;
1715         cp.bis.encryption = qos->bcast.encryption;
1716         memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1717
1718         return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1719 }
1720
1721 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1722 {
1723         u8 cig_id = PTR_UINT(data);
1724         struct hci_conn *conn;
1725         struct bt_iso_qos *qos;
1726         struct iso_cig_params pdu;
1727         u8 cis_id;
1728
1729         conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1730         if (!conn)
1731                 return 0;
1732
1733         memset(&pdu, 0, sizeof(pdu));
1734
1735         qos = &conn->iso_qos;
1736         pdu.cp.cig_id = cig_id;
1737         hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1738         hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1739         pdu.cp.sca = qos->ucast.sca;
1740         pdu.cp.packing = qos->ucast.packing;
1741         pdu.cp.framing = qos->ucast.framing;
1742         pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1743         pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1744
1745         /* Reprogram all CIS(s) with the same CIG, valid range are:
1746          * num_cis: 0x00 to 0x1F
1747          * cis_id: 0x00 to 0xEF
1748          */
1749         for (cis_id = 0x00; cis_id < 0xf0 &&
1750              pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1751                 struct hci_cis_params *cis;
1752
1753                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1754                 if (!conn)
1755                         continue;
1756
1757                 qos = &conn->iso_qos;
1758
1759                 cis = &pdu.cis[pdu.cp.num_cis++];
1760                 cis->cis_id = cis_id;
1761                 cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1762                 cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1763                 cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1764                               qos->ucast.in.phy;
1765                 cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1766                               qos->ucast.out.phy;
1767                 cis->c_rtn  = qos->ucast.out.rtn;
1768                 cis->p_rtn  = qos->ucast.in.rtn;
1769         }
1770
1771         if (!pdu.cp.num_cis)
1772                 return 0;
1773
1774         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1775                                      sizeof(pdu.cp) +
1776                                      pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1777                                      HCI_CMD_TIMEOUT);
1778 }
1779
1780 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1781 {
1782         struct hci_dev *hdev = conn->hdev;
1783         struct iso_list_data data;
1784
1785         memset(&data, 0, sizeof(data));
1786
1787         /* Allocate first still reconfigurable CIG if not set */
1788         if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1789                 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1790                         data.count = 0;
1791
1792                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1793                                                  BT_CONNECT, &data);
1794                         if (data.count)
1795                                 continue;
1796
1797                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1798                                                  BT_CONNECTED, &data);
1799                         if (!data.count)
1800                                 break;
1801                 }
1802
1803                 if (data.cig == 0xf0)
1804                         return false;
1805
1806                 /* Update CIG */
1807                 qos->ucast.cig = data.cig;
1808         }
1809
1810         if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1811                 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1812                                              qos->ucast.cis))
1813                         return false;
1814                 goto done;
1815         }
1816
1817         /* Allocate first available CIS if not set */
1818         for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1819              data.cis++) {
1820                 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1821                                               data.cis)) {
1822                         /* Update CIS */
1823                         qos->ucast.cis = data.cis;
1824                         break;
1825                 }
1826         }
1827
1828         if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1829                 return false;
1830
1831 done:
1832         if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1833                                UINT_PTR(qos->ucast.cig), NULL) < 0)
1834                 return false;
1835
1836         return true;
1837 }
1838
1839 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1840                               __u8 dst_type, struct bt_iso_qos *qos)
1841 {
1842         struct hci_conn *cis;
1843
1844         cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1845                                        qos->ucast.cis);
1846         if (!cis) {
1847                 cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1848                 if (!cis)
1849                         return ERR_PTR(-ENOMEM);
1850                 cis->cleanup = cis_cleanup;
1851                 cis->dst_type = dst_type;
1852                 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1853                 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1854         }
1855
1856         if (cis->state == BT_CONNECTED)
1857                 return cis;
1858
1859         /* Check if CIS has been set and the settings matches */
1860         if (cis->state == BT_BOUND &&
1861             !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1862                 return cis;
1863
1864         /* Update LINK PHYs according to QoS preference */
1865         cis->le_tx_phy = qos->ucast.out.phy;
1866         cis->le_rx_phy = qos->ucast.in.phy;
1867
1868         /* If output interval is not set use the input interval as it cannot be
1869          * 0x000000.
1870          */
1871         if (!qos->ucast.out.interval)
1872                 qos->ucast.out.interval = qos->ucast.in.interval;
1873
1874         /* If input interval is not set use the output interval as it cannot be
1875          * 0x000000.
1876          */
1877         if (!qos->ucast.in.interval)
1878                 qos->ucast.in.interval = qos->ucast.out.interval;
1879
1880         /* If output latency is not set use the input latency as it cannot be
1881          * 0x0000.
1882          */
1883         if (!qos->ucast.out.latency)
1884                 qos->ucast.out.latency = qos->ucast.in.latency;
1885
1886         /* If input latency is not set use the output latency as it cannot be
1887          * 0x0000.
1888          */
1889         if (!qos->ucast.in.latency)
1890                 qos->ucast.in.latency = qos->ucast.out.latency;
1891
1892         if (!hci_le_set_cig_params(cis, qos)) {
1893                 hci_conn_drop(cis);
1894                 return ERR_PTR(-EINVAL);
1895         }
1896
1897         hci_conn_hold(cis);
1898
1899         cis->iso_qos = *qos;
1900         cis->state = BT_BOUND;
1901
1902         return cis;
1903 }
1904
1905 bool hci_iso_setup_path(struct hci_conn *conn)
1906 {
1907         struct hci_dev *hdev = conn->hdev;
1908         struct hci_cp_le_setup_iso_path cmd;
1909
1910         memset(&cmd, 0, sizeof(cmd));
1911
1912         if (conn->iso_qos.ucast.out.sdu) {
1913                 cmd.handle = cpu_to_le16(conn->handle);
1914                 cmd.direction = 0x00; /* Input (Host to Controller) */
1915                 cmd.path = 0x00; /* HCI path if enabled */
1916                 cmd.codec = 0x03; /* Transparent Data */
1917
1918                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1919                                  &cmd) < 0)
1920                         return false;
1921         }
1922
1923         if (conn->iso_qos.ucast.in.sdu) {
1924                 cmd.handle = cpu_to_le16(conn->handle);
1925                 cmd.direction = 0x01; /* Output (Controller to Host) */
1926                 cmd.path = 0x00; /* HCI path if enabled */
1927                 cmd.codec = 0x03; /* Transparent Data */
1928
1929                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1930                                  &cmd) < 0)
1931                         return false;
1932         }
1933
1934         return true;
1935 }
1936
1937 int hci_conn_check_create_cis(struct hci_conn *conn)
1938 {
1939         if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1940                 return -EINVAL;
1941
1942         if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1943             conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1944                 return 1;
1945
1946         return 0;
1947 }
1948
1949 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1950 {
1951         return hci_le_create_cis_sync(hdev);
1952 }
1953
1954 int hci_le_create_cis_pending(struct hci_dev *hdev)
1955 {
1956         struct hci_conn *conn;
1957         bool pending = false;
1958
1959         rcu_read_lock();
1960
1961         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1962                 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1963                         rcu_read_unlock();
1964                         return -EBUSY;
1965                 }
1966
1967                 if (!hci_conn_check_create_cis(conn))
1968                         pending = true;
1969         }
1970
1971         rcu_read_unlock();
1972
1973         if (!pending)
1974                 return 0;
1975
1976         /* Queue Create CIS */
1977         return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1978 }
1979
1980 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1981                               struct bt_iso_io_qos *qos, __u8 phy)
1982 {
1983         /* Only set MTU if PHY is enabled */
1984         if (!qos->sdu && qos->phy) {
1985                 if (hdev->iso_mtu > 0)
1986                         qos->sdu = hdev->iso_mtu;
1987                 else if (hdev->le_mtu > 0)
1988                         qos->sdu = hdev->le_mtu;
1989                 else
1990                         qos->sdu = hdev->acl_mtu;
1991         }
1992
1993         /* Use the same PHY as ACL if set to any */
1994         if (qos->phy == BT_ISO_PHY_ANY)
1995                 qos->phy = phy;
1996
1997         /* Use LE ACL connection interval if not set */
1998         if (!qos->interval)
1999                 /* ACL interval unit in 1.25 ms to us */
2000                 qos->interval = conn->le_conn_interval * 1250;
2001
2002         /* Use LE ACL connection latency if not set */
2003         if (!qos->latency)
2004                 qos->latency = conn->le_conn_latency;
2005 }
2006
2007 static int create_big_sync(struct hci_dev *hdev, void *data)
2008 {
2009         struct hci_conn *conn = data;
2010         struct bt_iso_qos *qos = &conn->iso_qos;
2011         u16 interval, sync_interval = 0;
2012         u32 flags = 0;
2013         int err;
2014
2015         if (qos->bcast.out.phy == 0x02)
2016                 flags |= MGMT_ADV_FLAG_SEC_2M;
2017
2018         /* Align intervals */
2019         interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2020
2021         if (qos->bcast.bis)
2022                 sync_interval = interval * 4;
2023
2024         err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2025                                      conn->le_per_adv_data, flags, interval,
2026                                      interval, sync_interval);
2027         if (err)
2028                 return err;
2029
2030         return hci_le_create_big(conn, &conn->iso_qos);
2031 }
2032
2033 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2034 {
2035         struct hci_cp_le_pa_create_sync *cp = data;
2036
2037         bt_dev_dbg(hdev, "");
2038
2039         if (err)
2040                 bt_dev_err(hdev, "Unable to create PA: %d", err);
2041
2042         kfree(cp);
2043 }
2044
2045 static int create_pa_sync(struct hci_dev *hdev, void *data)
2046 {
2047         struct hci_cp_le_pa_create_sync *cp = data;
2048         int err;
2049
2050         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2051                                     sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2052         if (err) {
2053                 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2054                 return err;
2055         }
2056
2057         return hci_update_passive_scan_sync(hdev);
2058 }
2059
2060 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
2061                                     __u8 dst_type, __u8 sid,
2062                                     struct bt_iso_qos *qos)
2063 {
2064         struct hci_cp_le_pa_create_sync *cp;
2065         struct hci_conn *conn;
2066         int err;
2067
2068         if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2069                 return ERR_PTR(-EBUSY);
2070
2071         conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_SLAVE);
2072         if (!conn)
2073                 return ERR_PTR(-ENOMEM);
2074
2075         conn->iso_qos = *qos;
2076         conn->state = BT_LISTEN;
2077
2078         hci_conn_hold(conn);
2079
2080         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2081         if (!cp) {
2082                 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2083                 hci_conn_drop(conn);
2084                 return ERR_PTR(-ENOMEM);
2085         }
2086
2087         cp->options = qos->bcast.options;
2088         cp->sid = sid;
2089         cp->addr_type = dst_type;
2090         bacpy(&cp->addr, dst);
2091         cp->skip = cpu_to_le16(qos->bcast.skip);
2092         cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2093         cp->sync_cte_type = qos->bcast.sync_cte_type;
2094
2095         /* Queue start pa_create_sync and scan */
2096         err = hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2097         if (err < 0) {
2098                 hci_conn_drop(conn);
2099                 kfree(cp);
2100                 return ERR_PTR(err);
2101         }
2102
2103         return conn;
2104 }
2105
2106 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2107                            struct bt_iso_qos *qos,
2108                            __u16 sync_handle, __u8 num_bis, __u8 bis[])
2109 {
2110         struct _packed {
2111                 struct hci_cp_le_big_create_sync cp;
2112                 __u8  bis[0x11];
2113         } pdu;
2114         int err;
2115
2116         if (num_bis < 0x01 || num_bis > sizeof(pdu.bis))
2117                 return -EINVAL;
2118
2119         err = qos_set_big(hdev, qos);
2120         if (err)
2121                 return err;
2122
2123         if (hcon)
2124                 hcon->iso_qos.bcast.big = qos->bcast.big;
2125
2126         memset(&pdu, 0, sizeof(pdu));
2127         pdu.cp.handle = qos->bcast.big;
2128         pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2129         pdu.cp.encryption = qos->bcast.encryption;
2130         memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2131         pdu.cp.mse = qos->bcast.mse;
2132         pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2133         pdu.cp.num_bis = num_bis;
2134         memcpy(pdu.bis, bis, num_bis);
2135
2136         return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2137                             sizeof(pdu.cp) + num_bis, &pdu);
2138 }
2139
2140 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2141 {
2142         struct hci_conn *conn = data;
2143
2144         bt_dev_dbg(hdev, "conn %p", conn);
2145
2146         if (err) {
2147                 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2148                 hci_connect_cfm(conn, err);
2149                 hci_conn_del(conn);
2150         }
2151 }
2152
2153 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2154                               struct bt_iso_qos *qos,
2155                               __u8 base_len, __u8 *base)
2156 {
2157         struct hci_conn *conn;
2158         struct hci_conn *parent;
2159         __u8 eir[HCI_MAX_PER_AD_LENGTH];
2160         struct hci_link *link;
2161
2162         /* Look for any BIS that is open for rebinding */
2163         conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN);
2164         if (conn) {
2165                 memcpy(qos, &conn->iso_qos, sizeof(*qos));
2166                 conn->state = BT_CONNECTED;
2167                 return conn;
2168         }
2169
2170         if (base_len && base)
2171                 base_len = eir_append_service_data(eir, 0,  0x1851,
2172                                                    base, base_len);
2173
2174         /* We need hci_conn object using the BDADDR_ANY as dst */
2175         conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2176         if (IS_ERR(conn))
2177                 return conn;
2178
2179         /* Update LINK PHYs according to QoS preference */
2180         conn->le_tx_phy = qos->bcast.out.phy;
2181         conn->le_tx_phy = qos->bcast.out.phy;
2182
2183         /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2184         if (base_len && base) {
2185                 memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2186                 conn->le_per_adv_data_len = base_len;
2187         }
2188
2189         hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2190                           conn->le_tx_phy ? conn->le_tx_phy :
2191                           hdev->le_tx_def_phys);
2192
2193         conn->iso_qos = *qos;
2194         conn->state = BT_BOUND;
2195
2196         /* Link BISes together */
2197         parent = hci_conn_hash_lookup_big(hdev,
2198                                           conn->iso_qos.bcast.big);
2199         if (parent && parent != conn) {
2200                 link = hci_conn_link(parent, conn);
2201                 if (!link) {
2202                         hci_conn_drop(conn);
2203                         return ERR_PTR(-ENOLINK);
2204                 }
2205
2206                 /* Link takes the refcount */
2207                 hci_conn_drop(conn);
2208         }
2209
2210         return conn;
2211 }
2212
2213 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2214 {
2215         struct iso_list_data *d = data;
2216
2217         /* Skip if not broadcast/ANY address */
2218         if (bacmp(&conn->dst, BDADDR_ANY))
2219                 return;
2220
2221         if (d->big != conn->iso_qos.bcast.big ||
2222             d->bis == BT_ISO_QOS_BIS_UNSET ||
2223             d->bis != conn->iso_qos.bcast.bis)
2224                 return;
2225
2226         set_bit(HCI_CONN_PER_ADV, &conn->flags);
2227 }
2228
2229 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2230                                  __u8 dst_type, struct bt_iso_qos *qos,
2231                                  __u8 base_len, __u8 *base)
2232 {
2233         struct hci_conn *conn;
2234         int err;
2235         struct iso_list_data data;
2236
2237         conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2238         if (IS_ERR(conn))
2239                 return conn;
2240
2241         if (conn->state == BT_CONNECTED)
2242                 return conn;
2243
2244         data.big = qos->bcast.big;
2245         data.bis = qos->bcast.bis;
2246
2247         /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2248          * the start periodic advertising and create BIG commands have
2249          * been queued
2250          */
2251         hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2252                                  BT_BOUND, &data);
2253
2254         /* Queue start periodic advertising and create BIG */
2255         err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2256                                  create_big_complete);
2257         if (err < 0) {
2258                 hci_conn_drop(conn);
2259                 return ERR_PTR(err);
2260         }
2261
2262         return conn;
2263 }
2264
2265 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2266                                  __u8 dst_type, struct bt_iso_qos *qos)
2267 {
2268         struct hci_conn *le;
2269         struct hci_conn *cis;
2270         struct hci_link *link;
2271
2272         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2273                 le = hci_connect_le(hdev, dst, dst_type, false,
2274                                     BT_SECURITY_LOW,
2275                                     HCI_LE_CONN_TIMEOUT,
2276                                     HCI_ROLE_SLAVE);
2277         else
2278                 le = hci_connect_le_scan(hdev, dst, dst_type,
2279                                          BT_SECURITY_LOW,
2280                                          HCI_LE_CONN_TIMEOUT,
2281                                          CONN_REASON_ISO_CONNECT);
2282         if (IS_ERR(le))
2283                 return le;
2284
2285         hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2286                           le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2287         hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2288                           le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2289
2290         cis = hci_bind_cis(hdev, dst, dst_type, qos);
2291         if (IS_ERR(cis)) {
2292                 hci_conn_drop(le);
2293                 return cis;
2294         }
2295
2296         link = hci_conn_link(le, cis);
2297         if (!link) {
2298                 hci_conn_drop(le);
2299                 hci_conn_drop(cis);
2300                 return ERR_PTR(-ENOLINK);
2301         }
2302
2303         /* Link takes the refcount */
2304         hci_conn_drop(cis);
2305
2306         cis->state = BT_CONNECT;
2307
2308         hci_le_create_cis_pending(hdev);
2309
2310         return cis;
2311 }
2312
2313 /* Check link security requirement */
2314 int hci_conn_check_link_mode(struct hci_conn *conn)
2315 {
2316         BT_DBG("hcon %p", conn);
2317
2318         /* In Secure Connections Only mode, it is required that Secure
2319          * Connections is used and the link is encrypted with AES-CCM
2320          * using a P-256 authenticated combination key.
2321          */
2322         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2323                 if (!hci_conn_sc_enabled(conn) ||
2324                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2325                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2326                         return 0;
2327         }
2328
2329          /* AES encryption is required for Level 4:
2330           *
2331           * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2332           * page 1319:
2333           *
2334           * 128-bit equivalent strength for link and encryption keys
2335           * required using FIPS approved algorithms (E0 not allowed,
2336           * SAFER+ not allowed, and P-192 not allowed; encryption key
2337           * not shortened)
2338           */
2339         if (conn->sec_level == BT_SECURITY_FIPS &&
2340             !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2341                 bt_dev_err(conn->hdev,
2342                            "Invalid security: Missing AES-CCM usage");
2343                 return 0;
2344         }
2345
2346         if (hci_conn_ssp_enabled(conn) &&
2347             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2348                 return 0;
2349
2350         return 1;
2351 }
2352
2353 /* Authenticate remote device */
2354 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2355 {
2356         BT_DBG("hcon %p", conn);
2357
2358         if (conn->pending_sec_level > sec_level)
2359                 sec_level = conn->pending_sec_level;
2360
2361         if (sec_level > conn->sec_level)
2362                 conn->pending_sec_level = sec_level;
2363         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2364                 return 1;
2365
2366         /* Make sure we preserve an existing MITM requirement*/
2367         auth_type |= (conn->auth_type & 0x01);
2368
2369         conn->auth_type = auth_type;
2370
2371         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2372                 struct hci_cp_auth_requested cp;
2373
2374                 cp.handle = cpu_to_le16(conn->handle);
2375                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2376                              sizeof(cp), &cp);
2377
2378                 /* Set the ENCRYPT_PEND to trigger encryption after
2379                  * authentication.
2380                  */
2381                 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2382                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2383         }
2384
2385         return 0;
2386 }
2387
2388 /* Encrypt the link */
2389 static void hci_conn_encrypt(struct hci_conn *conn)
2390 {
2391         BT_DBG("hcon %p", conn);
2392
2393         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2394                 struct hci_cp_set_conn_encrypt cp;
2395                 cp.handle  = cpu_to_le16(conn->handle);
2396                 cp.encrypt = 0x01;
2397                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2398                              &cp);
2399         }
2400 }
2401
2402 /* Enable security */
2403 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2404                       bool initiator)
2405 {
2406         BT_DBG("hcon %p", conn);
2407
2408         if (conn->type == LE_LINK)
2409                 return smp_conn_security(conn, sec_level);
2410
2411         /* For sdp we don't need the link key. */
2412         if (sec_level == BT_SECURITY_SDP)
2413                 return 1;
2414
2415         /* For non 2.1 devices and low security level we don't need the link
2416            key. */
2417         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2418                 return 1;
2419
2420         /* For other security levels we need the link key. */
2421         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2422                 goto auth;
2423
2424         switch (conn->key_type) {
2425         case HCI_LK_AUTH_COMBINATION_P256:
2426                 /* An authenticated FIPS approved combination key has
2427                  * sufficient security for security level 4 or lower.
2428                  */
2429                 if (sec_level <= BT_SECURITY_FIPS)
2430                         goto encrypt;
2431                 break;
2432         case HCI_LK_AUTH_COMBINATION_P192:
2433                 /* An authenticated combination key has sufficient security for
2434                  * security level 3 or lower.
2435                  */
2436                 if (sec_level <= BT_SECURITY_HIGH)
2437                         goto encrypt;
2438                 break;
2439         case HCI_LK_UNAUTH_COMBINATION_P192:
2440         case HCI_LK_UNAUTH_COMBINATION_P256:
2441                 /* An unauthenticated combination key has sufficient security
2442                  * for security level 2 or lower.
2443                  */
2444                 if (sec_level <= BT_SECURITY_MEDIUM)
2445                         goto encrypt;
2446                 break;
2447         case HCI_LK_COMBINATION:
2448                 /* A combination key has always sufficient security for the
2449                  * security levels 2 or lower. High security level requires the
2450                  * combination key is generated using maximum PIN code length
2451                  * (16). For pre 2.1 units.
2452                  */
2453                 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2454                         goto encrypt;
2455                 break;
2456         default:
2457                 break;
2458         }
2459
2460 auth:
2461         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2462                 return 0;
2463
2464         if (initiator)
2465                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2466
2467         if (!hci_conn_auth(conn, sec_level, auth_type))
2468                 return 0;
2469
2470 encrypt:
2471         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2472                 /* Ensure that the encryption key size has been read,
2473                  * otherwise stall the upper layer responses.
2474                  */
2475                 if (!conn->enc_key_size)
2476                         return 0;
2477
2478                 /* Nothing else needed, all requirements are met */
2479                 return 1;
2480         }
2481
2482         hci_conn_encrypt(conn);
2483         return 0;
2484 }
2485 EXPORT_SYMBOL(hci_conn_security);
2486
2487 /* Check secure link requirement */
2488 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2489 {
2490         BT_DBG("hcon %p", conn);
2491
2492         /* Accept if non-secure or higher security level is required */
2493         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2494                 return 1;
2495
2496         /* Accept if secure or higher security level is already present */
2497         if (conn->sec_level == BT_SECURITY_HIGH ||
2498             conn->sec_level == BT_SECURITY_FIPS)
2499                 return 1;
2500
2501         /* Reject not secure link */
2502         return 0;
2503 }
2504 EXPORT_SYMBOL(hci_conn_check_secure);
2505
2506 /* Switch role */
2507 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2508 {
2509         BT_DBG("hcon %p", conn);
2510
2511         if (role == conn->role)
2512                 return 1;
2513
2514         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2515                 struct hci_cp_switch_role cp;
2516                 bacpy(&cp.bdaddr, &conn->dst);
2517                 cp.role = role;
2518                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2519         }
2520
2521         return 0;
2522 }
2523 EXPORT_SYMBOL(hci_conn_switch_role);
2524
2525 /* Enter active mode */
2526 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2527 {
2528         struct hci_dev *hdev = conn->hdev;
2529
2530         BT_DBG("hcon %p mode %d", conn, conn->mode);
2531
2532         if (conn->mode != HCI_CM_SNIFF)
2533                 goto timer;
2534
2535         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2536                 goto timer;
2537
2538         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2539                 struct hci_cp_exit_sniff_mode cp;
2540                 cp.handle = cpu_to_le16(conn->handle);
2541                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2542         }
2543
2544 timer:
2545         if (hdev->idle_timeout > 0)
2546                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2547                                    msecs_to_jiffies(hdev->idle_timeout));
2548 }
2549
2550 /* Drop all connection on the device */
2551 void hci_conn_hash_flush(struct hci_dev *hdev)
2552 {
2553         struct list_head *head = &hdev->conn_hash.list;
2554         struct hci_conn *conn;
2555
2556         BT_DBG("hdev %s", hdev->name);
2557
2558         /* We should not traverse the list here, because hci_conn_del
2559          * can remove extra links, which may cause the list traversal
2560          * to hit items that have already been released.
2561          */
2562         while ((conn = list_first_entry_or_null(head,
2563                                                 struct hci_conn,
2564                                                 list)) != NULL) {
2565                 conn->state = BT_CLOSED;
2566                 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2567                 hci_conn_del(conn);
2568         }
2569 }
2570
2571 static u32 get_link_mode(struct hci_conn *conn)
2572 {
2573         u32 link_mode = 0;
2574
2575         if (conn->role == HCI_ROLE_MASTER)
2576                 link_mode |= HCI_LM_MASTER;
2577
2578         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2579                 link_mode |= HCI_LM_ENCRYPT;
2580
2581         if (test_bit(HCI_CONN_AUTH, &conn->flags))
2582                 link_mode |= HCI_LM_AUTH;
2583
2584         if (test_bit(HCI_CONN_SECURE, &conn->flags))
2585                 link_mode |= HCI_LM_SECURE;
2586
2587         if (test_bit(HCI_CONN_FIPS, &conn->flags))
2588                 link_mode |= HCI_LM_FIPS;
2589
2590         return link_mode;
2591 }
2592
2593 int hci_get_conn_list(void __user *arg)
2594 {
2595         struct hci_conn *c;
2596         struct hci_conn_list_req req, *cl;
2597         struct hci_conn_info *ci;
2598         struct hci_dev *hdev;
2599         int n = 0, size, err;
2600
2601         if (copy_from_user(&req, arg, sizeof(req)))
2602                 return -EFAULT;
2603
2604         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2605                 return -EINVAL;
2606
2607         size = sizeof(req) + req.conn_num * sizeof(*ci);
2608
2609         cl = kmalloc(size, GFP_KERNEL);
2610         if (!cl)
2611                 return -ENOMEM;
2612
2613         hdev = hci_dev_get(req.dev_id);
2614         if (!hdev) {
2615                 kfree(cl);
2616                 return -ENODEV;
2617         }
2618
2619         ci = cl->conn_info;
2620
2621         hci_dev_lock(hdev);
2622         list_for_each_entry(c, &hdev->conn_hash.list, list) {
2623                 bacpy(&(ci + n)->bdaddr, &c->dst);
2624                 (ci + n)->handle = c->handle;
2625                 (ci + n)->type  = c->type;
2626                 (ci + n)->out   = c->out;
2627                 (ci + n)->state = c->state;
2628                 (ci + n)->link_mode = get_link_mode(c);
2629                 if (++n >= req.conn_num)
2630                         break;
2631         }
2632         hci_dev_unlock(hdev);
2633
2634         cl->dev_id = hdev->id;
2635         cl->conn_num = n;
2636         size = sizeof(req) + n * sizeof(*ci);
2637
2638         hci_dev_put(hdev);
2639
2640         err = copy_to_user(arg, cl, size);
2641         kfree(cl);
2642
2643         return err ? -EFAULT : 0;
2644 }
2645
2646 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2647 {
2648         struct hci_conn_info_req req;
2649         struct hci_conn_info ci;
2650         struct hci_conn *conn;
2651         char __user *ptr = arg + sizeof(req);
2652
2653         if (copy_from_user(&req, arg, sizeof(req)))
2654                 return -EFAULT;
2655
2656         hci_dev_lock(hdev);
2657         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2658         if (conn) {
2659                 bacpy(&ci.bdaddr, &conn->dst);
2660                 ci.handle = conn->handle;
2661                 ci.type  = conn->type;
2662                 ci.out   = conn->out;
2663                 ci.state = conn->state;
2664                 ci.link_mode = get_link_mode(conn);
2665         }
2666         hci_dev_unlock(hdev);
2667
2668         if (!conn)
2669                 return -ENOENT;
2670
2671         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2672 }
2673
2674 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2675 {
2676         struct hci_auth_info_req req;
2677         struct hci_conn *conn;
2678
2679         if (copy_from_user(&req, arg, sizeof(req)))
2680                 return -EFAULT;
2681
2682         hci_dev_lock(hdev);
2683         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2684         if (conn)
2685                 req.type = conn->auth_type;
2686         hci_dev_unlock(hdev);
2687
2688         if (!conn)
2689                 return -ENOENT;
2690
2691         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2692 }
2693
2694 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2695 {
2696         struct hci_dev *hdev = conn->hdev;
2697         struct hci_chan *chan;
2698
2699         BT_DBG("%s hcon %p", hdev->name, conn);
2700
2701         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2702                 BT_DBG("Refusing to create new hci_chan");
2703                 return NULL;
2704         }
2705
2706         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2707         if (!chan)
2708                 return NULL;
2709
2710         chan->conn = hci_conn_get(conn);
2711         skb_queue_head_init(&chan->data_q);
2712         chan->state = BT_CONNECTED;
2713
2714         list_add_rcu(&chan->list, &conn->chan_list);
2715
2716         return chan;
2717 }
2718
2719 void hci_chan_del(struct hci_chan *chan)
2720 {
2721         struct hci_conn *conn = chan->conn;
2722         struct hci_dev *hdev = conn->hdev;
2723
2724         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2725
2726         list_del_rcu(&chan->list);
2727
2728         synchronize_rcu();
2729
2730         /* Prevent new hci_chan's to be created for this hci_conn */
2731         set_bit(HCI_CONN_DROP, &conn->flags);
2732
2733         hci_conn_put(conn);
2734
2735         skb_queue_purge(&chan->data_q);
2736         kfree(chan);
2737 }
2738
2739 void hci_chan_list_flush(struct hci_conn *conn)
2740 {
2741         struct hci_chan *chan, *n;
2742
2743         BT_DBG("hcon %p", conn);
2744
2745         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2746                 hci_chan_del(chan);
2747 }
2748
2749 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2750                                                  __u16 handle)
2751 {
2752         struct hci_chan *hchan;
2753
2754         list_for_each_entry(hchan, &hcon->chan_list, list) {
2755                 if (hchan->handle == handle)
2756                         return hchan;
2757         }
2758
2759         return NULL;
2760 }
2761
2762 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2763 {
2764         struct hci_conn_hash *h = &hdev->conn_hash;
2765         struct hci_conn *hcon;
2766         struct hci_chan *hchan = NULL;
2767
2768         rcu_read_lock();
2769
2770         list_for_each_entry_rcu(hcon, &h->list, list) {
2771                 hchan = __hci_chan_lookup_handle(hcon, handle);
2772                 if (hchan)
2773                         break;
2774         }
2775
2776         rcu_read_unlock();
2777
2778         return hchan;
2779 }
2780
2781 u32 hci_conn_get_phy(struct hci_conn *conn)
2782 {
2783         u32 phys = 0;
2784
2785         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2786          * Table 6.2: Packets defined for synchronous, asynchronous, and
2787          * CPB logical transport types.
2788          */
2789         switch (conn->type) {
2790         case SCO_LINK:
2791                 /* SCO logical transport (1 Mb/s):
2792                  * HV1, HV2, HV3 and DV.
2793                  */
2794                 phys |= BT_PHY_BR_1M_1SLOT;
2795
2796                 break;
2797
2798         case ACL_LINK:
2799                 /* ACL logical transport (1 Mb/s) ptt=0:
2800                  * DH1, DM3, DH3, DM5 and DH5.
2801                  */
2802                 phys |= BT_PHY_BR_1M_1SLOT;
2803
2804                 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2805                         phys |= BT_PHY_BR_1M_3SLOT;
2806
2807                 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2808                         phys |= BT_PHY_BR_1M_5SLOT;
2809
2810                 /* ACL logical transport (2 Mb/s) ptt=1:
2811                  * 2-DH1, 2-DH3 and 2-DH5.
2812                  */
2813                 if (!(conn->pkt_type & HCI_2DH1))
2814                         phys |= BT_PHY_EDR_2M_1SLOT;
2815
2816                 if (!(conn->pkt_type & HCI_2DH3))
2817                         phys |= BT_PHY_EDR_2M_3SLOT;
2818
2819                 if (!(conn->pkt_type & HCI_2DH5))
2820                         phys |= BT_PHY_EDR_2M_5SLOT;
2821
2822                 /* ACL logical transport (3 Mb/s) ptt=1:
2823                  * 3-DH1, 3-DH3 and 3-DH5.
2824                  */
2825                 if (!(conn->pkt_type & HCI_3DH1))
2826                         phys |= BT_PHY_EDR_3M_1SLOT;
2827
2828                 if (!(conn->pkt_type & HCI_3DH3))
2829                         phys |= BT_PHY_EDR_3M_3SLOT;
2830
2831                 if (!(conn->pkt_type & HCI_3DH5))
2832                         phys |= BT_PHY_EDR_3M_5SLOT;
2833
2834                 break;
2835
2836         case ESCO_LINK:
2837                 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2838                 phys |= BT_PHY_BR_1M_1SLOT;
2839
2840                 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2841                         phys |= BT_PHY_BR_1M_3SLOT;
2842
2843                 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2844                 if (!(conn->pkt_type & ESCO_2EV3))
2845                         phys |= BT_PHY_EDR_2M_1SLOT;
2846
2847                 if (!(conn->pkt_type & ESCO_2EV5))
2848                         phys |= BT_PHY_EDR_2M_3SLOT;
2849
2850                 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2851                 if (!(conn->pkt_type & ESCO_3EV3))
2852                         phys |= BT_PHY_EDR_3M_1SLOT;
2853
2854                 if (!(conn->pkt_type & ESCO_3EV5))
2855                         phys |= BT_PHY_EDR_3M_3SLOT;
2856
2857                 break;
2858
2859         case LE_LINK:
2860                 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2861                         phys |= BT_PHY_LE_1M_TX;
2862
2863                 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2864                         phys |= BT_PHY_LE_1M_RX;
2865
2866                 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2867                         phys |= BT_PHY_LE_2M_TX;
2868
2869                 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2870                         phys |= BT_PHY_LE_2M_RX;
2871
2872                 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2873                         phys |= BT_PHY_LE_CODED_TX;
2874
2875                 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2876                         phys |= BT_PHY_LE_CODED_RX;
2877
2878                 break;
2879         }
2880
2881         return phys;
2882 }
2883
2884 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2885 {
2886         struct hci_conn *conn = data;
2887
2888         if (!hci_conn_valid(hdev, conn))
2889                 return -ECANCELED;
2890
2891         return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2892 }
2893
2894 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2895 {
2896         struct hci_dev *hdev = conn->hdev;
2897
2898         /* If abort_reason has already been set it means the connection is
2899          * already being aborted so don't attempt to overwrite it.
2900          */
2901         if (conn->abort_reason)
2902                 return 0;
2903
2904         bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2905
2906         conn->abort_reason = reason;
2907
2908         /* If the connection is pending check the command opcode since that
2909          * might be blocking on hci_cmd_sync_work while waiting its respective
2910          * event so we need to hci_cmd_sync_cancel to cancel it.
2911          *
2912          * hci_connect_le serializes the connection attempts so only one
2913          * connection can be in BT_CONNECT at time.
2914          */
2915         if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2916                 switch (hci_skb_event(hdev->sent_cmd)) {
2917                 case HCI_EV_CONN_COMPLETE:
2918                 case HCI_EV_LE_CONN_COMPLETE:
2919                 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2920                 case HCI_EVT_LE_CIS_ESTABLISHED:
2921                         hci_cmd_sync_cancel(hdev, ECANCELED);
2922                         break;
2923                 }
2924         /* Cancel connect attempt if still queued/pending */
2925         } else if (!hci_cancel_connect_sync(hdev, conn)) {
2926                 return 0;
2927         }
2928
2929         return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
2930 }