Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-microblaze.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39         u16 pkt_type;
40         u16 max_latency;
41         u8  retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
46         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
47         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
48         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
49         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
54         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
59         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
60 };
61
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65         struct hci_conn_params *params;
66         struct hci_dev *hdev = conn->hdev;
67         struct smp_irk *irk;
68         bdaddr_t *bdaddr;
69         u8 bdaddr_type;
70
71         bdaddr = &conn->dst;
72         bdaddr_type = conn->dst_type;
73
74         /* Check if we need to convert to identity address */
75         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76         if (irk) {
77                 bdaddr = &irk->bdaddr;
78                 bdaddr_type = irk->addr_type;
79         }
80
81         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82                                            bdaddr_type);
83         if (!params || !params->explicit_connect)
84                 return;
85
86         /* The connection attempt was doing scan for new RPA, and is
87          * in scan phase. If params are not associated with any other
88          * autoconnect action, remove them completely. If they are, just unmark
89          * them as waiting for connection, by clearing explicit_connect field.
90          */
91         params->explicit_connect = false;
92
93         list_del_init(&params->action);
94
95         switch (params->auto_connect) {
96         case HCI_AUTO_CONN_EXPLICIT:
97                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98                 /* return instead of break to avoid duplicate scan update */
99                 return;
100         case HCI_AUTO_CONN_DIRECT:
101         case HCI_AUTO_CONN_ALWAYS:
102                 list_add(&params->action, &hdev->pend_le_conns);
103                 break;
104         case HCI_AUTO_CONN_REPORT:
105                 list_add(&params->action, &hdev->pend_le_reports);
106                 break;
107         default:
108                 break;
109         }
110
111         hci_update_background_scan(hdev);
112 }
113
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116         struct hci_dev *hdev = conn->hdev;
117
118         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120
121         hci_chan_list_flush(conn);
122
123         hci_conn_hash_del(hdev, conn);
124
125         if (hdev->notify)
126                 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
127
128         hci_conn_del_sysfs(conn);
129
130         debugfs_remove_recursive(conn->debugfs);
131
132         hci_dev_put(hdev);
133
134         hci_conn_put(conn);
135 }
136
137 static void le_scan_cleanup(struct work_struct *work)
138 {
139         struct hci_conn *conn = container_of(work, struct hci_conn,
140                                              le_scan_cleanup);
141         struct hci_dev *hdev = conn->hdev;
142         struct hci_conn *c = NULL;
143
144         BT_DBG("%s hcon %p", hdev->name, conn);
145
146         hci_dev_lock(hdev);
147
148         /* Check that the hci_conn is still around */
149         rcu_read_lock();
150         list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
151                 if (c == conn)
152                         break;
153         }
154         rcu_read_unlock();
155
156         if (c == conn) {
157                 hci_connect_le_scan_cleanup(conn);
158                 hci_conn_cleanup(conn);
159         }
160
161         hci_dev_unlock(hdev);
162         hci_dev_put(hdev);
163         hci_conn_put(conn);
164 }
165
166 static void hci_connect_le_scan_remove(struct hci_conn *conn)
167 {
168         BT_DBG("%s hcon %p", conn->hdev->name, conn);
169
170         /* We can't call hci_conn_del/hci_conn_cleanup here since that
171          * could deadlock with another hci_conn_del() call that's holding
172          * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
173          * Instead, grab temporary extra references to the hci_dev and
174          * hci_conn and perform the necessary cleanup in a separate work
175          * callback.
176          */
177
178         hci_dev_hold(conn->hdev);
179         hci_conn_get(conn);
180
181         /* Even though we hold a reference to the hdev, many other
182          * things might get cleaned up meanwhile, including the hdev's
183          * own workqueue, so we can't use that for scheduling.
184          */
185         schedule_work(&conn->le_scan_cleanup);
186 }
187
188 static void hci_acl_create_connection(struct hci_conn *conn)
189 {
190         struct hci_dev *hdev = conn->hdev;
191         struct inquiry_entry *ie;
192         struct hci_cp_create_conn cp;
193
194         BT_DBG("hcon %p", conn);
195
196         conn->state = BT_CONNECT;
197         conn->out = true;
198         conn->role = HCI_ROLE_MASTER;
199
200         conn->attempt++;
201
202         conn->link_policy = hdev->link_policy;
203
204         memset(&cp, 0, sizeof(cp));
205         bacpy(&cp.bdaddr, &conn->dst);
206         cp.pscan_rep_mode = 0x02;
207
208         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
209         if (ie) {
210                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
211                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
212                         cp.pscan_mode     = ie->data.pscan_mode;
213                         cp.clock_offset   = ie->data.clock_offset |
214                                             cpu_to_le16(0x8000);
215                 }
216
217                 memcpy(conn->dev_class, ie->data.dev_class, 3);
218                 if (ie->data.ssp_mode > 0)
219                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
220         }
221
222         cp.pkt_type = cpu_to_le16(conn->pkt_type);
223         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
224                 cp.role_switch = 0x01;
225         else
226                 cp.role_switch = 0x00;
227
228         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
229 }
230
231 int hci_disconnect(struct hci_conn *conn, __u8 reason)
232 {
233         BT_DBG("hcon %p", conn);
234
235         /* When we are master of an established connection and it enters
236          * the disconnect timeout, then go ahead and try to read the
237          * current clock offset.  Processing of the result is done
238          * within the event handling and hci_clock_offset_evt function.
239          */
240         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
241             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
242                 struct hci_dev *hdev = conn->hdev;
243                 struct hci_cp_read_clock_offset clkoff_cp;
244
245                 clkoff_cp.handle = cpu_to_le16(conn->handle);
246                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
247                              &clkoff_cp);
248         }
249
250         return hci_abort_conn(conn, reason);
251 }
252
253 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
254 {
255         struct hci_dev *hdev = conn->hdev;
256         struct hci_cp_add_sco cp;
257
258         BT_DBG("hcon %p", conn);
259
260         conn->state = BT_CONNECT;
261         conn->out = true;
262
263         conn->attempt++;
264
265         cp.handle   = cpu_to_le16(handle);
266         cp.pkt_type = cpu_to_le16(conn->pkt_type);
267
268         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
269 }
270
271 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
272 {
273         struct hci_dev *hdev = conn->hdev;
274         struct hci_cp_setup_sync_conn cp;
275         const struct sco_param *param;
276
277         BT_DBG("hcon %p", conn);
278
279         conn->state = BT_CONNECT;
280         conn->out = true;
281
282         conn->attempt++;
283
284         cp.handle   = cpu_to_le16(handle);
285
286         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
287         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
288         cp.voice_setting  = cpu_to_le16(conn->setting);
289
290         switch (conn->setting & SCO_AIRMODE_MASK) {
291         case SCO_AIRMODE_TRANSP:
292                 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
293                         return false;
294                 param = &esco_param_msbc[conn->attempt - 1];
295                 break;
296         case SCO_AIRMODE_CVSD:
297                 if (lmp_esco_capable(conn->link)) {
298                         if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
299                                 return false;
300                         param = &esco_param_cvsd[conn->attempt - 1];
301                 } else {
302                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
303                                 return false;
304                         param = &sco_param_cvsd[conn->attempt - 1];
305                 }
306                 break;
307         default:
308                 return false;
309         }
310
311         cp.retrans_effort = param->retrans_effort;
312         cp.pkt_type = __cpu_to_le16(param->pkt_type);
313         cp.max_latency = __cpu_to_le16(param->max_latency);
314
315         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
316                 return false;
317
318         return true;
319 }
320
321 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
322                       u16 to_multiplier)
323 {
324         struct hci_dev *hdev = conn->hdev;
325         struct hci_conn_params *params;
326         struct hci_cp_le_conn_update cp;
327
328         hci_dev_lock(hdev);
329
330         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
331         if (params) {
332                 params->conn_min_interval = min;
333                 params->conn_max_interval = max;
334                 params->conn_latency = latency;
335                 params->supervision_timeout = to_multiplier;
336         }
337
338         hci_dev_unlock(hdev);
339
340         memset(&cp, 0, sizeof(cp));
341         cp.handle               = cpu_to_le16(conn->handle);
342         cp.conn_interval_min    = cpu_to_le16(min);
343         cp.conn_interval_max    = cpu_to_le16(max);
344         cp.conn_latency         = cpu_to_le16(latency);
345         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
346         cp.min_ce_len           = cpu_to_le16(0x0000);
347         cp.max_ce_len           = cpu_to_le16(0x0000);
348
349         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
350
351         if (params)
352                 return 0x01;
353
354         return 0x00;
355 }
356
357 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
358                       __u8 ltk[16], __u8 key_size)
359 {
360         struct hci_dev *hdev = conn->hdev;
361         struct hci_cp_le_start_enc cp;
362
363         BT_DBG("hcon %p", conn);
364
365         memset(&cp, 0, sizeof(cp));
366
367         cp.handle = cpu_to_le16(conn->handle);
368         cp.rand = rand;
369         cp.ediv = ediv;
370         memcpy(cp.ltk, ltk, key_size);
371
372         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
373 }
374
375 /* Device _must_ be locked */
376 void hci_sco_setup(struct hci_conn *conn, __u8 status)
377 {
378         struct hci_conn *sco = conn->link;
379
380         if (!sco)
381                 return;
382
383         BT_DBG("hcon %p", conn);
384
385         if (!status) {
386                 if (lmp_esco_capable(conn->hdev))
387                         hci_setup_sync(sco, conn->handle);
388                 else
389                         hci_add_sco(sco, conn->handle);
390         } else {
391                 hci_connect_cfm(sco, status);
392                 hci_conn_del(sco);
393         }
394 }
395
396 static void hci_conn_timeout(struct work_struct *work)
397 {
398         struct hci_conn *conn = container_of(work, struct hci_conn,
399                                              disc_work.work);
400         int refcnt = atomic_read(&conn->refcnt);
401
402         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
403
404         WARN_ON(refcnt < 0);
405
406         /* FIXME: It was observed that in pairing failed scenario, refcnt
407          * drops below 0. Probably this is because l2cap_conn_del calls
408          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
409          * dropped. After that loop hci_chan_del is called which also drops
410          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
411          * otherwise drop it.
412          */
413         if (refcnt > 0)
414                 return;
415
416         /* LE connections in scanning state need special handling */
417         if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
418             test_bit(HCI_CONN_SCANNING, &conn->flags)) {
419                 hci_connect_le_scan_remove(conn);
420                 return;
421         }
422
423         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
424 }
425
426 /* Enter sniff mode */
427 static void hci_conn_idle(struct work_struct *work)
428 {
429         struct hci_conn *conn = container_of(work, struct hci_conn,
430                                              idle_work.work);
431         struct hci_dev *hdev = conn->hdev;
432
433         BT_DBG("hcon %p mode %d", conn, conn->mode);
434
435         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
436                 return;
437
438         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
439                 return;
440
441         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
442                 struct hci_cp_sniff_subrate cp;
443                 cp.handle             = cpu_to_le16(conn->handle);
444                 cp.max_latency        = cpu_to_le16(0);
445                 cp.min_remote_timeout = cpu_to_le16(0);
446                 cp.min_local_timeout  = cpu_to_le16(0);
447                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
448         }
449
450         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
451                 struct hci_cp_sniff_mode cp;
452                 cp.handle       = cpu_to_le16(conn->handle);
453                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
454                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
455                 cp.attempt      = cpu_to_le16(4);
456                 cp.timeout      = cpu_to_le16(1);
457                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
458         }
459 }
460
461 static void hci_conn_auto_accept(struct work_struct *work)
462 {
463         struct hci_conn *conn = container_of(work, struct hci_conn,
464                                              auto_accept_work.work);
465
466         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
467                      &conn->dst);
468 }
469
470 static void le_conn_timeout(struct work_struct *work)
471 {
472         struct hci_conn *conn = container_of(work, struct hci_conn,
473                                              le_conn_timeout.work);
474         struct hci_dev *hdev = conn->hdev;
475
476         BT_DBG("");
477
478         /* We could end up here due to having done directed advertising,
479          * so clean up the state if necessary. This should however only
480          * happen with broken hardware or if low duty cycle was used
481          * (which doesn't have a timeout of its own).
482          */
483         if (conn->role == HCI_ROLE_SLAVE) {
484                 u8 enable = 0x00;
485                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
486                              &enable);
487                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
488                 return;
489         }
490
491         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
492 }
493
494 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
495                               u8 role)
496 {
497         struct hci_conn *conn;
498
499         BT_DBG("%s dst %pMR", hdev->name, dst);
500
501         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
502         if (!conn)
503                 return NULL;
504
505         bacpy(&conn->dst, dst);
506         bacpy(&conn->src, &hdev->bdaddr);
507         conn->hdev  = hdev;
508         conn->type  = type;
509         conn->role  = role;
510         conn->mode  = HCI_CM_ACTIVE;
511         conn->state = BT_OPEN;
512         conn->auth_type = HCI_AT_GENERAL_BONDING;
513         conn->io_capability = hdev->io_capability;
514         conn->remote_auth = 0xff;
515         conn->key_type = 0xff;
516         conn->rssi = HCI_RSSI_INVALID;
517         conn->tx_power = HCI_TX_POWER_INVALID;
518         conn->max_tx_power = HCI_TX_POWER_INVALID;
519
520         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
521         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
522
523         /* Set Default Authenticated payload timeout to 30s */
524         conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
525
526         if (conn->role == HCI_ROLE_MASTER)
527                 conn->out = true;
528
529         switch (type) {
530         case ACL_LINK:
531                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
532                 break;
533         case LE_LINK:
534                 /* conn->src should reflect the local identity address */
535                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
536                 break;
537         case SCO_LINK:
538                 if (lmp_esco_capable(hdev))
539                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
540                                         (hdev->esco_type & EDR_ESCO_MASK);
541                 else
542                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
543                 break;
544         case ESCO_LINK:
545                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
546                 break;
547         }
548
549         skb_queue_head_init(&conn->data_q);
550
551         INIT_LIST_HEAD(&conn->chan_list);
552
553         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
554         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
555         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
556         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
557         INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
558
559         atomic_set(&conn->refcnt, 0);
560
561         hci_dev_hold(hdev);
562
563         hci_conn_hash_add(hdev, conn);
564         if (hdev->notify)
565                 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
566
567         hci_conn_init_sysfs(conn);
568
569         return conn;
570 }
571
572 int hci_conn_del(struct hci_conn *conn)
573 {
574         struct hci_dev *hdev = conn->hdev;
575
576         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
577
578         cancel_delayed_work_sync(&conn->disc_work);
579         cancel_delayed_work_sync(&conn->auto_accept_work);
580         cancel_delayed_work_sync(&conn->idle_work);
581
582         if (conn->type == ACL_LINK) {
583                 struct hci_conn *sco = conn->link;
584                 if (sco)
585                         sco->link = NULL;
586
587                 /* Unacked frames */
588                 hdev->acl_cnt += conn->sent;
589         } else if (conn->type == LE_LINK) {
590                 cancel_delayed_work(&conn->le_conn_timeout);
591
592                 if (hdev->le_pkts)
593                         hdev->le_cnt += conn->sent;
594                 else
595                         hdev->acl_cnt += conn->sent;
596         } else {
597                 struct hci_conn *acl = conn->link;
598                 if (acl) {
599                         acl->link = NULL;
600                         hci_conn_drop(acl);
601                 }
602         }
603
604         if (conn->amp_mgr)
605                 amp_mgr_put(conn->amp_mgr);
606
607         skb_queue_purge(&conn->data_q);
608
609         /* Remove the connection from the list and cleanup its remaining
610          * state. This is a separate function since for some cases like
611          * BT_CONNECT_SCAN we *only* want the cleanup part without the
612          * rest of hci_conn_del.
613          */
614         hci_conn_cleanup(conn);
615
616         return 0;
617 }
618
619 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
620 {
621         int use_src = bacmp(src, BDADDR_ANY);
622         struct hci_dev *hdev = NULL, *d;
623
624         BT_DBG("%pMR -> %pMR", src, dst);
625
626         read_lock(&hci_dev_list_lock);
627
628         list_for_each_entry(d, &hci_dev_list, list) {
629                 if (!test_bit(HCI_UP, &d->flags) ||
630                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
631                     d->dev_type != HCI_PRIMARY)
632                         continue;
633
634                 /* Simple routing:
635                  *   No source address - find interface with bdaddr != dst
636                  *   Source address    - find interface with bdaddr == src
637                  */
638
639                 if (use_src) {
640                         bdaddr_t id_addr;
641                         u8 id_addr_type;
642
643                         if (src_type == BDADDR_BREDR) {
644                                 if (!lmp_bredr_capable(d))
645                                         continue;
646                                 bacpy(&id_addr, &d->bdaddr);
647                                 id_addr_type = BDADDR_BREDR;
648                         } else {
649                                 if (!lmp_le_capable(d))
650                                         continue;
651
652                                 hci_copy_identity_address(d, &id_addr,
653                                                           &id_addr_type);
654
655                                 /* Convert from HCI to three-value type */
656                                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
657                                         id_addr_type = BDADDR_LE_PUBLIC;
658                                 else
659                                         id_addr_type = BDADDR_LE_RANDOM;
660                         }
661
662                         if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
663                                 hdev = d; break;
664                         }
665                 } else {
666                         if (bacmp(&d->bdaddr, dst)) {
667                                 hdev = d; break;
668                         }
669                 }
670         }
671
672         if (hdev)
673                 hdev = hci_dev_hold(hdev);
674
675         read_unlock(&hci_dev_list_lock);
676         return hdev;
677 }
678 EXPORT_SYMBOL(hci_get_route);
679
680 /* This function requires the caller holds hdev->lock */
681 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
682 {
683         struct hci_dev *hdev = conn->hdev;
684         struct hci_conn_params *params;
685
686         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
687                                            conn->dst_type);
688         if (params && params->conn) {
689                 hci_conn_drop(params->conn);
690                 hci_conn_put(params->conn);
691                 params->conn = NULL;
692         }
693
694         conn->state = BT_CLOSED;
695
696         /* If the status indicates successful cancellation of
697          * the attempt (i.e. Unkown Connection Id) there's no point of
698          * notifying failure since we'll go back to keep trying to
699          * connect. The only exception is explicit connect requests
700          * where a timeout + cancel does indicate an actual failure.
701          */
702         if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
703             (params && params->explicit_connect))
704                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
705                                     conn->dst_type, status);
706
707         hci_connect_cfm(conn, status);
708
709         hci_conn_del(conn);
710
711         /* Since we may have temporarily stopped the background scanning in
712          * favor of connection establishment, we should restart it.
713          */
714         hci_update_background_scan(hdev);
715
716         /* Re-enable advertising in case this was a failed connection
717          * attempt as a peripheral.
718          */
719         hci_req_reenable_advertising(hdev);
720 }
721
722 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
723 {
724         struct hci_conn *conn;
725
726         hci_dev_lock(hdev);
727
728         conn = hci_lookup_le_connect(hdev);
729
730         if (!status) {
731                 hci_connect_le_scan_cleanup(conn);
732                 goto done;
733         }
734
735         bt_dev_err(hdev, "request failed to create LE connection: "
736                    "status 0x%2.2x", status);
737
738         if (!conn)
739                 goto done;
740
741         hci_le_conn_failed(conn, status);
742
743 done:
744         hci_dev_unlock(hdev);
745 }
746
747 static bool conn_use_rpa(struct hci_conn *conn)
748 {
749         struct hci_dev *hdev = conn->hdev;
750
751         return hci_dev_test_flag(hdev, HCI_PRIVACY);
752 }
753
754 static void set_ext_conn_params(struct hci_conn *conn,
755                                 struct hci_cp_le_ext_conn_param *p)
756 {
757         struct hci_dev *hdev = conn->hdev;
758
759         memset(p, 0, sizeof(*p));
760
761         /* Set window to be the same value as the interval to
762          * enable continuous scanning.
763          */
764         p->scan_interval = cpu_to_le16(hdev->le_scan_interval);
765         p->scan_window = p->scan_interval;
766         p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
767         p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
768         p->conn_latency = cpu_to_le16(conn->le_conn_latency);
769         p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
770         p->min_ce_len = cpu_to_le16(0x0000);
771         p->max_ce_len = cpu_to_le16(0x0000);
772 }
773
774 static void hci_req_add_le_create_conn(struct hci_request *req,
775                                        struct hci_conn *conn,
776                                        bdaddr_t *direct_rpa)
777 {
778         struct hci_dev *hdev = conn->hdev;
779         u8 own_addr_type;
780
781         /* If direct address was provided we use it instead of current
782          * address.
783          */
784         if (direct_rpa) {
785                 if (bacmp(&req->hdev->random_addr, direct_rpa))
786                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
787                                                                 direct_rpa);
788
789                 /* direct address is always RPA */
790                 own_addr_type = ADDR_LE_DEV_RANDOM;
791         } else {
792                 /* Update random address, but set require_privacy to false so
793                  * that we never connect with an non-resolvable address.
794                  */
795                 if (hci_update_random_address(req, false, conn_use_rpa(conn),
796                                               &own_addr_type))
797                         return;
798         }
799
800         if (use_ext_conn(hdev)) {
801                 struct hci_cp_le_ext_create_conn *cp;
802                 struct hci_cp_le_ext_conn_param *p;
803                 u8 data[sizeof(*cp) + sizeof(*p) * 3];
804                 u32 plen;
805
806                 cp = (void *) data;
807                 p = (void *) cp->data;
808
809                 memset(cp, 0, sizeof(*cp));
810
811                 bacpy(&cp->peer_addr, &conn->dst);
812                 cp->peer_addr_type = conn->dst_type;
813                 cp->own_addr_type = own_addr_type;
814
815                 plen = sizeof(*cp);
816
817                 if (scan_1m(hdev)) {
818                         cp->phys |= LE_SCAN_PHY_1M;
819                         set_ext_conn_params(conn, p);
820
821                         p++;
822                         plen += sizeof(*p);
823                 }
824
825                 if (scan_2m(hdev)) {
826                         cp->phys |= LE_SCAN_PHY_2M;
827                         set_ext_conn_params(conn, p);
828
829                         p++;
830                         plen += sizeof(*p);
831                 }
832
833                 if (scan_coded(hdev)) {
834                         cp->phys |= LE_SCAN_PHY_CODED;
835                         set_ext_conn_params(conn, p);
836
837                         plen += sizeof(*p);
838                 }
839
840                 hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
841
842         } else {
843                 struct hci_cp_le_create_conn cp;
844
845                 memset(&cp, 0, sizeof(cp));
846
847                 /* Set window to be the same value as the interval to enable
848                  * continuous scanning.
849                  */
850                 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
851                 cp.scan_window = cp.scan_interval;
852
853                 bacpy(&cp.peer_addr, &conn->dst);
854                 cp.peer_addr_type = conn->dst_type;
855                 cp.own_address_type = own_addr_type;
856                 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
857                 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
858                 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
859                 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
860                 cp.min_ce_len = cpu_to_le16(0x0000);
861                 cp.max_ce_len = cpu_to_le16(0x0000);
862
863                 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
864         }
865
866         conn->state = BT_CONNECT;
867         clear_bit(HCI_CONN_SCANNING, &conn->flags);
868 }
869
870 static void hci_req_directed_advertising(struct hci_request *req,
871                                          struct hci_conn *conn)
872 {
873         struct hci_dev *hdev = req->hdev;
874         u8 own_addr_type;
875         u8 enable;
876
877         if (ext_adv_capable(hdev)) {
878                 struct hci_cp_le_set_ext_adv_params cp;
879                 bdaddr_t random_addr;
880
881                 /* Set require_privacy to false so that the remote device has a
882                  * chance of identifying us.
883                  */
884                 if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
885                                            &own_addr_type, &random_addr) < 0)
886                         return;
887
888                 memset(&cp, 0, sizeof(cp));
889
890                 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
891                 cp.own_addr_type = own_addr_type;
892                 cp.channel_map = hdev->le_adv_channel_map;
893                 cp.tx_power = HCI_TX_POWER_INVALID;
894                 cp.primary_phy = HCI_ADV_PHY_1M;
895                 cp.secondary_phy = HCI_ADV_PHY_1M;
896                 cp.handle = 0; /* Use instance 0 for directed adv */
897                 cp.own_addr_type = own_addr_type;
898                 cp.peer_addr_type = conn->dst_type;
899                 bacpy(&cp.peer_addr, &conn->dst);
900
901                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
902
903                 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
904                     bacmp(&random_addr, BDADDR_ANY) &&
905                     bacmp(&random_addr, &hdev->random_addr)) {
906                         struct hci_cp_le_set_adv_set_rand_addr cp;
907
908                         memset(&cp, 0, sizeof(cp));
909
910                         cp.handle = 0;
911                         bacpy(&cp.bdaddr, &random_addr);
912
913                         hci_req_add(req,
914                                     HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
915                                     sizeof(cp), &cp);
916                 }
917
918                 __hci_req_enable_ext_advertising(req, 0x00);
919         } else {
920                 struct hci_cp_le_set_adv_param cp;
921
922                 /* Clear the HCI_LE_ADV bit temporarily so that the
923                  * hci_update_random_address knows that it's safe to go ahead
924                  * and write a new random address. The flag will be set back on
925                  * as soon as the SET_ADV_ENABLE HCI command completes.
926                  */
927                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
928
929                 /* Set require_privacy to false so that the remote device has a
930                  * chance of identifying us.
931                  */
932                 if (hci_update_random_address(req, false, conn_use_rpa(conn),
933                                               &own_addr_type) < 0)
934                         return;
935
936                 memset(&cp, 0, sizeof(cp));
937                 cp.type = LE_ADV_DIRECT_IND;
938                 cp.own_address_type = own_addr_type;
939                 cp.direct_addr_type = conn->dst_type;
940                 bacpy(&cp.direct_addr, &conn->dst);
941                 cp.channel_map = hdev->le_adv_channel_map;
942
943                 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
944
945                 enable = 0x01;
946                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
947                             &enable);
948         }
949
950         conn->state = BT_CONNECT;
951 }
952
953 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
954                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
955                                 u8 role, bdaddr_t *direct_rpa)
956 {
957         struct hci_conn_params *params;
958         struct hci_conn *conn;
959         struct smp_irk *irk;
960         struct hci_request req;
961         int err;
962
963         /* Let's make sure that le is enabled.*/
964         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
965                 if (lmp_le_capable(hdev))
966                         return ERR_PTR(-ECONNREFUSED);
967
968                 return ERR_PTR(-EOPNOTSUPP);
969         }
970
971         /* Since the controller supports only one LE connection attempt at a
972          * time, we return -EBUSY if there is any connection attempt running.
973          */
974         if (hci_lookup_le_connect(hdev))
975                 return ERR_PTR(-EBUSY);
976
977         /* If there's already a connection object but it's not in
978          * scanning state it means it must already be established, in
979          * which case we can't do anything else except report a failure
980          * to connect.
981          */
982         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
983         if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
984                 return ERR_PTR(-EBUSY);
985         }
986
987         /* When given an identity address with existing identity
988          * resolving key, the connection needs to be established
989          * to a resolvable random address.
990          *
991          * Storing the resolvable random address is required here
992          * to handle connection failures. The address will later
993          * be resolved back into the original identity address
994          * from the connect request.
995          */
996         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
997         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
998                 dst = &irk->rpa;
999                 dst_type = ADDR_LE_DEV_RANDOM;
1000         }
1001
1002         if (conn) {
1003                 bacpy(&conn->dst, dst);
1004         } else {
1005                 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1006                 if (!conn)
1007                         return ERR_PTR(-ENOMEM);
1008                 hci_conn_hold(conn);
1009                 conn->pending_sec_level = sec_level;
1010         }
1011
1012         conn->dst_type = dst_type;
1013         conn->sec_level = BT_SECURITY_LOW;
1014         conn->conn_timeout = conn_timeout;
1015
1016         hci_req_init(&req, hdev);
1017
1018         /* Disable advertising if we're active. For master role
1019          * connections most controllers will refuse to connect if
1020          * advertising is enabled, and for slave role connections we
1021          * anyway have to disable it in order to start directed
1022          * advertising.
1023          */
1024         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1025                 u8 enable = 0x00;
1026                 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
1027                             &enable);
1028         }
1029
1030         /* If requested to connect as slave use directed advertising */
1031         if (conn->role == HCI_ROLE_SLAVE) {
1032                 /* If we're active scanning most controllers are unable
1033                  * to initiate advertising. Simply reject the attempt.
1034                  */
1035                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1036                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
1037                         hci_req_purge(&req);
1038                         hci_conn_del(conn);
1039                         return ERR_PTR(-EBUSY);
1040                 }
1041
1042                 hci_req_directed_advertising(&req, conn);
1043                 goto create_conn;
1044         }
1045
1046         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1047         if (params) {
1048                 conn->le_conn_min_interval = params->conn_min_interval;
1049                 conn->le_conn_max_interval = params->conn_max_interval;
1050                 conn->le_conn_latency = params->conn_latency;
1051                 conn->le_supv_timeout = params->supervision_timeout;
1052         } else {
1053                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
1054                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
1055                 conn->le_conn_latency = hdev->le_conn_latency;
1056                 conn->le_supv_timeout = hdev->le_supv_timeout;
1057         }
1058
1059         /* If controller is scanning, we stop it since some controllers are
1060          * not able to scan and connect at the same time. Also set the
1061          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
1062          * handler for scan disabling knows to set the correct discovery
1063          * state.
1064          */
1065         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1066                 hci_req_add_le_scan_disable(&req);
1067                 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1068         }
1069
1070         hci_req_add_le_create_conn(&req, conn, direct_rpa);
1071
1072 create_conn:
1073         err = hci_req_run(&req, create_le_conn_complete);
1074         if (err) {
1075                 hci_conn_del(conn);
1076                 return ERR_PTR(err);
1077         }
1078
1079         return conn;
1080 }
1081
1082 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1083 {
1084         struct hci_conn *conn;
1085
1086         conn = hci_conn_hash_lookup_le(hdev, addr, type);
1087         if (!conn)
1088                 return false;
1089
1090         if (conn->state != BT_CONNECTED)
1091                 return false;
1092
1093         return true;
1094 }
1095
1096 /* This function requires the caller holds hdev->lock */
1097 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1098                                         bdaddr_t *addr, u8 addr_type)
1099 {
1100         struct hci_conn_params *params;
1101
1102         if (is_connected(hdev, addr, addr_type))
1103                 return -EISCONN;
1104
1105         params = hci_conn_params_lookup(hdev, addr, addr_type);
1106         if (!params) {
1107                 params = hci_conn_params_add(hdev, addr, addr_type);
1108                 if (!params)
1109                         return -ENOMEM;
1110
1111                 /* If we created new params, mark them to be deleted in
1112                  * hci_connect_le_scan_cleanup. It's different case than
1113                  * existing disabled params, those will stay after cleanup.
1114                  */
1115                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1116         }
1117
1118         /* We're trying to connect, so make sure params are at pend_le_conns */
1119         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1120             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1121             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1122                 list_del_init(&params->action);
1123                 list_add(&params->action, &hdev->pend_le_conns);
1124         }
1125
1126         params->explicit_connect = true;
1127
1128         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1129                params->auto_connect);
1130
1131         return 0;
1132 }
1133
1134 /* This function requires the caller holds hdev->lock */
1135 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1136                                      u8 dst_type, u8 sec_level,
1137                                      u16 conn_timeout)
1138 {
1139         struct hci_conn *conn;
1140
1141         /* Let's make sure that le is enabled.*/
1142         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1143                 if (lmp_le_capable(hdev))
1144                         return ERR_PTR(-ECONNREFUSED);
1145
1146                 return ERR_PTR(-EOPNOTSUPP);
1147         }
1148
1149         /* Some devices send ATT messages as soon as the physical link is
1150          * established. To be able to handle these ATT messages, the user-
1151          * space first establishes the connection and then starts the pairing
1152          * process.
1153          *
1154          * So if a hci_conn object already exists for the following connection
1155          * attempt, we simply update pending_sec_level and auth_type fields
1156          * and return the object found.
1157          */
1158         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1159         if (conn) {
1160                 if (conn->pending_sec_level < sec_level)
1161                         conn->pending_sec_level = sec_level;
1162                 goto done;
1163         }
1164
1165         BT_DBG("requesting refresh of dst_addr");
1166
1167         conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1168         if (!conn)
1169                 return ERR_PTR(-ENOMEM);
1170
1171         if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
1172                 return ERR_PTR(-EBUSY);
1173
1174         conn->state = BT_CONNECT;
1175         set_bit(HCI_CONN_SCANNING, &conn->flags);
1176         conn->dst_type = dst_type;
1177         conn->sec_level = BT_SECURITY_LOW;
1178         conn->pending_sec_level = sec_level;
1179         conn->conn_timeout = conn_timeout;
1180
1181         hci_update_background_scan(hdev);
1182
1183 done:
1184         hci_conn_hold(conn);
1185         return conn;
1186 }
1187
1188 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1189                                  u8 sec_level, u8 auth_type)
1190 {
1191         struct hci_conn *acl;
1192
1193         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1194                 if (lmp_bredr_capable(hdev))
1195                         return ERR_PTR(-ECONNREFUSED);
1196
1197                 return ERR_PTR(-EOPNOTSUPP);
1198         }
1199
1200         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1201         if (!acl) {
1202                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1203                 if (!acl)
1204                         return ERR_PTR(-ENOMEM);
1205         }
1206
1207         hci_conn_hold(acl);
1208
1209         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1210                 acl->sec_level = BT_SECURITY_LOW;
1211                 acl->pending_sec_level = sec_level;
1212                 acl->auth_type = auth_type;
1213                 hci_acl_create_connection(acl);
1214         }
1215
1216         return acl;
1217 }
1218
1219 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1220                                  __u16 setting)
1221 {
1222         struct hci_conn *acl;
1223         struct hci_conn *sco;
1224
1225         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1226         if (IS_ERR(acl))
1227                 return acl;
1228
1229         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1230         if (!sco) {
1231                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1232                 if (!sco) {
1233                         hci_conn_drop(acl);
1234                         return ERR_PTR(-ENOMEM);
1235                 }
1236         }
1237
1238         acl->link = sco;
1239         sco->link = acl;
1240
1241         hci_conn_hold(sco);
1242
1243         sco->setting = setting;
1244
1245         if (acl->state == BT_CONNECTED &&
1246             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1247                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1248                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1249
1250                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1251                         /* defer SCO setup until mode change completed */
1252                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1253                         return sco;
1254                 }
1255
1256                 hci_sco_setup(acl, 0x00);
1257         }
1258
1259         return sco;
1260 }
1261
1262 /* Check link security requirement */
1263 int hci_conn_check_link_mode(struct hci_conn *conn)
1264 {
1265         BT_DBG("hcon %p", conn);
1266
1267         /* In Secure Connections Only mode, it is required that Secure
1268          * Connections is used and the link is encrypted with AES-CCM
1269          * using a P-256 authenticated combination key.
1270          */
1271         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1272                 if (!hci_conn_sc_enabled(conn) ||
1273                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1274                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1275                         return 0;
1276         }
1277
1278         if (hci_conn_ssp_enabled(conn) &&
1279             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1280                 return 0;
1281
1282         return 1;
1283 }
1284
1285 /* Authenticate remote device */
1286 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1287 {
1288         BT_DBG("hcon %p", conn);
1289
1290         if (conn->pending_sec_level > sec_level)
1291                 sec_level = conn->pending_sec_level;
1292
1293         if (sec_level > conn->sec_level)
1294                 conn->pending_sec_level = sec_level;
1295         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1296                 return 1;
1297
1298         /* Make sure we preserve an existing MITM requirement*/
1299         auth_type |= (conn->auth_type & 0x01);
1300
1301         conn->auth_type = auth_type;
1302
1303         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1304                 struct hci_cp_auth_requested cp;
1305
1306                 cp.handle = cpu_to_le16(conn->handle);
1307                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1308                              sizeof(cp), &cp);
1309
1310                 /* If we're already encrypted set the REAUTH_PEND flag,
1311                  * otherwise set the ENCRYPT_PEND.
1312                  */
1313                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1314                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1315                 else
1316                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1317         }
1318
1319         return 0;
1320 }
1321
1322 /* Encrypt the the link */
1323 static void hci_conn_encrypt(struct hci_conn *conn)
1324 {
1325         BT_DBG("hcon %p", conn);
1326
1327         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1328                 struct hci_cp_set_conn_encrypt cp;
1329                 cp.handle  = cpu_to_le16(conn->handle);
1330                 cp.encrypt = 0x01;
1331                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1332                              &cp);
1333         }
1334 }
1335
1336 /* Enable security */
1337 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1338                       bool initiator)
1339 {
1340         BT_DBG("hcon %p", conn);
1341
1342         if (conn->type == LE_LINK)
1343                 return smp_conn_security(conn, sec_level);
1344
1345         /* For sdp we don't need the link key. */
1346         if (sec_level == BT_SECURITY_SDP)
1347                 return 1;
1348
1349         /* For non 2.1 devices and low security level we don't need the link
1350            key. */
1351         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1352                 return 1;
1353
1354         /* For other security levels we need the link key. */
1355         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1356                 goto auth;
1357
1358         /* An authenticated FIPS approved combination key has sufficient
1359          * security for security level 4. */
1360         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1361             sec_level == BT_SECURITY_FIPS)
1362                 goto encrypt;
1363
1364         /* An authenticated combination key has sufficient security for
1365            security level 3. */
1366         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1367              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1368             sec_level == BT_SECURITY_HIGH)
1369                 goto encrypt;
1370
1371         /* An unauthenticated combination key has sufficient security for
1372            security level 1 and 2. */
1373         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1374              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1375             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1376                 goto encrypt;
1377
1378         /* A combination key has always sufficient security for the security
1379            levels 1 or 2. High security level requires the combination key
1380            is generated using maximum PIN code length (16).
1381            For pre 2.1 units. */
1382         if (conn->key_type == HCI_LK_COMBINATION &&
1383             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1384              conn->pin_length == 16))
1385                 goto encrypt;
1386
1387 auth:
1388         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1389                 return 0;
1390
1391         if (initiator)
1392                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1393
1394         if (!hci_conn_auth(conn, sec_level, auth_type))
1395                 return 0;
1396
1397 encrypt:
1398         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1399                 /* Ensure that the encryption key size has been read,
1400                  * otherwise stall the upper layer responses.
1401                  */
1402                 if (!conn->enc_key_size)
1403                         return 0;
1404
1405                 /* Nothing else needed, all requirements are met */
1406                 return 1;
1407         }
1408
1409         hci_conn_encrypt(conn);
1410         return 0;
1411 }
1412 EXPORT_SYMBOL(hci_conn_security);
1413
1414 /* Check secure link requirement */
1415 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1416 {
1417         BT_DBG("hcon %p", conn);
1418
1419         /* Accept if non-secure or higher security level is required */
1420         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1421                 return 1;
1422
1423         /* Accept if secure or higher security level is already present */
1424         if (conn->sec_level == BT_SECURITY_HIGH ||
1425             conn->sec_level == BT_SECURITY_FIPS)
1426                 return 1;
1427
1428         /* Reject not secure link */
1429         return 0;
1430 }
1431 EXPORT_SYMBOL(hci_conn_check_secure);
1432
1433 /* Switch role */
1434 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1435 {
1436         BT_DBG("hcon %p", conn);
1437
1438         if (role == conn->role)
1439                 return 1;
1440
1441         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1442                 struct hci_cp_switch_role cp;
1443                 bacpy(&cp.bdaddr, &conn->dst);
1444                 cp.role = role;
1445                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1446         }
1447
1448         return 0;
1449 }
1450 EXPORT_SYMBOL(hci_conn_switch_role);
1451
1452 /* Enter active mode */
1453 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1454 {
1455         struct hci_dev *hdev = conn->hdev;
1456
1457         BT_DBG("hcon %p mode %d", conn, conn->mode);
1458
1459         if (conn->mode != HCI_CM_SNIFF)
1460                 goto timer;
1461
1462         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1463                 goto timer;
1464
1465         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1466                 struct hci_cp_exit_sniff_mode cp;
1467                 cp.handle = cpu_to_le16(conn->handle);
1468                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1469         }
1470
1471 timer:
1472         if (hdev->idle_timeout > 0)
1473                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1474                                    msecs_to_jiffies(hdev->idle_timeout));
1475 }
1476
1477 /* Drop all connection on the device */
1478 void hci_conn_hash_flush(struct hci_dev *hdev)
1479 {
1480         struct hci_conn_hash *h = &hdev->conn_hash;
1481         struct hci_conn *c, *n;
1482
1483         BT_DBG("hdev %s", hdev->name);
1484
1485         list_for_each_entry_safe(c, n, &h->list, list) {
1486                 c->state = BT_CLOSED;
1487
1488                 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1489                 hci_conn_del(c);
1490         }
1491 }
1492
1493 /* Check pending connect attempts */
1494 void hci_conn_check_pending(struct hci_dev *hdev)
1495 {
1496         struct hci_conn *conn;
1497
1498         BT_DBG("hdev %s", hdev->name);
1499
1500         hci_dev_lock(hdev);
1501
1502         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1503         if (conn)
1504                 hci_acl_create_connection(conn);
1505
1506         hci_dev_unlock(hdev);
1507 }
1508
1509 static u32 get_link_mode(struct hci_conn *conn)
1510 {
1511         u32 link_mode = 0;
1512
1513         if (conn->role == HCI_ROLE_MASTER)
1514                 link_mode |= HCI_LM_MASTER;
1515
1516         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1517                 link_mode |= HCI_LM_ENCRYPT;
1518
1519         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1520                 link_mode |= HCI_LM_AUTH;
1521
1522         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1523                 link_mode |= HCI_LM_SECURE;
1524
1525         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1526                 link_mode |= HCI_LM_FIPS;
1527
1528         return link_mode;
1529 }
1530
1531 int hci_get_conn_list(void __user *arg)
1532 {
1533         struct hci_conn *c;
1534         struct hci_conn_list_req req, *cl;
1535         struct hci_conn_info *ci;
1536         struct hci_dev *hdev;
1537         int n = 0, size, err;
1538
1539         if (copy_from_user(&req, arg, sizeof(req)))
1540                 return -EFAULT;
1541
1542         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1543                 return -EINVAL;
1544
1545         size = sizeof(req) + req.conn_num * sizeof(*ci);
1546
1547         cl = kmalloc(size, GFP_KERNEL);
1548         if (!cl)
1549                 return -ENOMEM;
1550
1551         hdev = hci_dev_get(req.dev_id);
1552         if (!hdev) {
1553                 kfree(cl);
1554                 return -ENODEV;
1555         }
1556
1557         ci = cl->conn_info;
1558
1559         hci_dev_lock(hdev);
1560         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1561                 bacpy(&(ci + n)->bdaddr, &c->dst);
1562                 (ci + n)->handle = c->handle;
1563                 (ci + n)->type  = c->type;
1564                 (ci + n)->out   = c->out;
1565                 (ci + n)->state = c->state;
1566                 (ci + n)->link_mode = get_link_mode(c);
1567                 if (++n >= req.conn_num)
1568                         break;
1569         }
1570         hci_dev_unlock(hdev);
1571
1572         cl->dev_id = hdev->id;
1573         cl->conn_num = n;
1574         size = sizeof(req) + n * sizeof(*ci);
1575
1576         hci_dev_put(hdev);
1577
1578         err = copy_to_user(arg, cl, size);
1579         kfree(cl);
1580
1581         return err ? -EFAULT : 0;
1582 }
1583
1584 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1585 {
1586         struct hci_conn_info_req req;
1587         struct hci_conn_info ci;
1588         struct hci_conn *conn;
1589         char __user *ptr = arg + sizeof(req);
1590
1591         if (copy_from_user(&req, arg, sizeof(req)))
1592                 return -EFAULT;
1593
1594         hci_dev_lock(hdev);
1595         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1596         if (conn) {
1597                 bacpy(&ci.bdaddr, &conn->dst);
1598                 ci.handle = conn->handle;
1599                 ci.type  = conn->type;
1600                 ci.out   = conn->out;
1601                 ci.state = conn->state;
1602                 ci.link_mode = get_link_mode(conn);
1603         }
1604         hci_dev_unlock(hdev);
1605
1606         if (!conn)
1607                 return -ENOENT;
1608
1609         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1610 }
1611
1612 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1613 {
1614         struct hci_auth_info_req req;
1615         struct hci_conn *conn;
1616
1617         if (copy_from_user(&req, arg, sizeof(req)))
1618                 return -EFAULT;
1619
1620         hci_dev_lock(hdev);
1621         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1622         if (conn)
1623                 req.type = conn->auth_type;
1624         hci_dev_unlock(hdev);
1625
1626         if (!conn)
1627                 return -ENOENT;
1628
1629         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1630 }
1631
1632 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1633 {
1634         struct hci_dev *hdev = conn->hdev;
1635         struct hci_chan *chan;
1636
1637         BT_DBG("%s hcon %p", hdev->name, conn);
1638
1639         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1640                 BT_DBG("Refusing to create new hci_chan");
1641                 return NULL;
1642         }
1643
1644         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1645         if (!chan)
1646                 return NULL;
1647
1648         chan->conn = hci_conn_get(conn);
1649         skb_queue_head_init(&chan->data_q);
1650         chan->state = BT_CONNECTED;
1651
1652         list_add_rcu(&chan->list, &conn->chan_list);
1653
1654         return chan;
1655 }
1656
1657 void hci_chan_del(struct hci_chan *chan)
1658 {
1659         struct hci_conn *conn = chan->conn;
1660         struct hci_dev *hdev = conn->hdev;
1661
1662         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1663
1664         list_del_rcu(&chan->list);
1665
1666         synchronize_rcu();
1667
1668         /* Prevent new hci_chan's to be created for this hci_conn */
1669         set_bit(HCI_CONN_DROP, &conn->flags);
1670
1671         hci_conn_put(conn);
1672
1673         skb_queue_purge(&chan->data_q);
1674         kfree(chan);
1675 }
1676
1677 void hci_chan_list_flush(struct hci_conn *conn)
1678 {
1679         struct hci_chan *chan, *n;
1680
1681         BT_DBG("hcon %p", conn);
1682
1683         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1684                 hci_chan_del(chan);
1685 }
1686
1687 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1688                                                  __u16 handle)
1689 {
1690         struct hci_chan *hchan;
1691
1692         list_for_each_entry(hchan, &hcon->chan_list, list) {
1693                 if (hchan->handle == handle)
1694                         return hchan;
1695         }
1696
1697         return NULL;
1698 }
1699
1700 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1701 {
1702         struct hci_conn_hash *h = &hdev->conn_hash;
1703         struct hci_conn *hcon;
1704         struct hci_chan *hchan = NULL;
1705
1706         rcu_read_lock();
1707
1708         list_for_each_entry_rcu(hcon, &h->list, list) {
1709                 hchan = __hci_chan_lookup_handle(hcon, handle);
1710                 if (hchan)
1711                         break;
1712         }
1713
1714         rcu_read_unlock();
1715
1716         return hchan;
1717 }