Merge tag 'amd-drm-fixes-5.19-2022-07-06' of https://gitlab.freedesktop.org/agd5f...
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48 #include "hci_codec.h"
49
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
53
54 /* HCI device list */
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
57
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
61
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
64
65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
66 {
67         __u8 scan = opt;
68
69         BT_DBG("%s %x", req->hdev->name, scan);
70
71         /* Inquiry and Page scans */
72         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
73         return 0;
74 }
75
76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
77 {
78         __u8 auth = opt;
79
80         BT_DBG("%s %x", req->hdev->name, auth);
81
82         /* Authentication */
83         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
84         return 0;
85 }
86
87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
88 {
89         __u8 encrypt = opt;
90
91         BT_DBG("%s %x", req->hdev->name, encrypt);
92
93         /* Encryption */
94         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
95         return 0;
96 }
97
98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
99 {
100         __le16 policy = cpu_to_le16(opt);
101
102         BT_DBG("%s %x", req->hdev->name, policy);
103
104         /* Default link policy */
105         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
106         return 0;
107 }
108
109 /* Get HCI device by index.
110  * Device is held on return. */
111 struct hci_dev *hci_dev_get(int index)
112 {
113         struct hci_dev *hdev = NULL, *d;
114
115         BT_DBG("%d", index);
116
117         if (index < 0)
118                 return NULL;
119
120         read_lock(&hci_dev_list_lock);
121         list_for_each_entry(d, &hci_dev_list, list) {
122                 if (d->id == index) {
123                         hdev = hci_dev_hold(d);
124                         break;
125                 }
126         }
127         read_unlock(&hci_dev_list_lock);
128         return hdev;
129 }
130
131 /* ---- Inquiry support ---- */
132
133 bool hci_discovery_active(struct hci_dev *hdev)
134 {
135         struct discovery_state *discov = &hdev->discovery;
136
137         switch (discov->state) {
138         case DISCOVERY_FINDING:
139         case DISCOVERY_RESOLVING:
140                 return true;
141
142         default:
143                 return false;
144         }
145 }
146
147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
148 {
149         int old_state = hdev->discovery.state;
150
151         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
152
153         if (old_state == state)
154                 return;
155
156         hdev->discovery.state = state;
157
158         switch (state) {
159         case DISCOVERY_STOPPED:
160                 hci_update_passive_scan(hdev);
161
162                 if (old_state != DISCOVERY_STARTING)
163                         mgmt_discovering(hdev, 0);
164                 break;
165         case DISCOVERY_STARTING:
166                 break;
167         case DISCOVERY_FINDING:
168                 mgmt_discovering(hdev, 1);
169                 break;
170         case DISCOVERY_RESOLVING:
171                 break;
172         case DISCOVERY_STOPPING:
173                 break;
174         }
175 }
176
177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
178 {
179         struct discovery_state *cache = &hdev->discovery;
180         struct inquiry_entry *p, *n;
181
182         list_for_each_entry_safe(p, n, &cache->all, all) {
183                 list_del(&p->all);
184                 kfree(p);
185         }
186
187         INIT_LIST_HEAD(&cache->unknown);
188         INIT_LIST_HEAD(&cache->resolve);
189 }
190
191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
192                                                bdaddr_t *bdaddr)
193 {
194         struct discovery_state *cache = &hdev->discovery;
195         struct inquiry_entry *e;
196
197         BT_DBG("cache %p, %pMR", cache, bdaddr);
198
199         list_for_each_entry(e, &cache->all, all) {
200                 if (!bacmp(&e->data.bdaddr, bdaddr))
201                         return e;
202         }
203
204         return NULL;
205 }
206
207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
208                                                        bdaddr_t *bdaddr)
209 {
210         struct discovery_state *cache = &hdev->discovery;
211         struct inquiry_entry *e;
212
213         BT_DBG("cache %p, %pMR", cache, bdaddr);
214
215         list_for_each_entry(e, &cache->unknown, list) {
216                 if (!bacmp(&e->data.bdaddr, bdaddr))
217                         return e;
218         }
219
220         return NULL;
221 }
222
223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
224                                                        bdaddr_t *bdaddr,
225                                                        int state)
226 {
227         struct discovery_state *cache = &hdev->discovery;
228         struct inquiry_entry *e;
229
230         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
231
232         list_for_each_entry(e, &cache->resolve, list) {
233                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
234                         return e;
235                 if (!bacmp(&e->data.bdaddr, bdaddr))
236                         return e;
237         }
238
239         return NULL;
240 }
241
242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243                                       struct inquiry_entry *ie)
244 {
245         struct discovery_state *cache = &hdev->discovery;
246         struct list_head *pos = &cache->resolve;
247         struct inquiry_entry *p;
248
249         list_del(&ie->list);
250
251         list_for_each_entry(p, &cache->resolve, list) {
252                 if (p->name_state != NAME_PENDING &&
253                     abs(p->data.rssi) >= abs(ie->data.rssi))
254                         break;
255                 pos = &p->list;
256         }
257
258         list_add(&ie->list, pos);
259 }
260
261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
262                              bool name_known)
263 {
264         struct discovery_state *cache = &hdev->discovery;
265         struct inquiry_entry *ie;
266         u32 flags = 0;
267
268         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
269
270         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
271
272         if (!data->ssp_mode)
273                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
274
275         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
276         if (ie) {
277                 if (!ie->data.ssp_mode)
278                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
279
280                 if (ie->name_state == NAME_NEEDED &&
281                     data->rssi != ie->data.rssi) {
282                         ie->data.rssi = data->rssi;
283                         hci_inquiry_cache_update_resolve(hdev, ie);
284                 }
285
286                 goto update;
287         }
288
289         /* Entry not in the cache. Add new one. */
290         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
291         if (!ie) {
292                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
293                 goto done;
294         }
295
296         list_add(&ie->all, &cache->all);
297
298         if (name_known) {
299                 ie->name_state = NAME_KNOWN;
300         } else {
301                 ie->name_state = NAME_NOT_KNOWN;
302                 list_add(&ie->list, &cache->unknown);
303         }
304
305 update:
306         if (name_known && ie->name_state != NAME_KNOWN &&
307             ie->name_state != NAME_PENDING) {
308                 ie->name_state = NAME_KNOWN;
309                 list_del(&ie->list);
310         }
311
312         memcpy(&ie->data, data, sizeof(*data));
313         ie->timestamp = jiffies;
314         cache->timestamp = jiffies;
315
316         if (ie->name_state == NAME_NOT_KNOWN)
317                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
318
319 done:
320         return flags;
321 }
322
323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
324 {
325         struct discovery_state *cache = &hdev->discovery;
326         struct inquiry_info *info = (struct inquiry_info *) buf;
327         struct inquiry_entry *e;
328         int copied = 0;
329
330         list_for_each_entry(e, &cache->all, all) {
331                 struct inquiry_data *data = &e->data;
332
333                 if (copied >= num)
334                         break;
335
336                 bacpy(&info->bdaddr, &data->bdaddr);
337                 info->pscan_rep_mode    = data->pscan_rep_mode;
338                 info->pscan_period_mode = data->pscan_period_mode;
339                 info->pscan_mode        = data->pscan_mode;
340                 memcpy(info->dev_class, data->dev_class, 3);
341                 info->clock_offset      = data->clock_offset;
342
343                 info++;
344                 copied++;
345         }
346
347         BT_DBG("cache %p, copied %d", cache, copied);
348         return copied;
349 }
350
351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
352 {
353         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354         struct hci_dev *hdev = req->hdev;
355         struct hci_cp_inquiry cp;
356
357         BT_DBG("%s", hdev->name);
358
359         if (test_bit(HCI_INQUIRY, &hdev->flags))
360                 return 0;
361
362         /* Start Inquiry */
363         memcpy(&cp.lap, &ir->lap, 3);
364         cp.length  = ir->length;
365         cp.num_rsp = ir->num_rsp;
366         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
367
368         return 0;
369 }
370
371 int hci_inquiry(void __user *arg)
372 {
373         __u8 __user *ptr = arg;
374         struct hci_inquiry_req ir;
375         struct hci_dev *hdev;
376         int err = 0, do_inquiry = 0, max_rsp;
377         long timeo;
378         __u8 *buf;
379
380         if (copy_from_user(&ir, ptr, sizeof(ir)))
381                 return -EFAULT;
382
383         hdev = hci_dev_get(ir.dev_id);
384         if (!hdev)
385                 return -ENODEV;
386
387         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
388                 err = -EBUSY;
389                 goto done;
390         }
391
392         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
393                 err = -EOPNOTSUPP;
394                 goto done;
395         }
396
397         if (hdev->dev_type != HCI_PRIMARY) {
398                 err = -EOPNOTSUPP;
399                 goto done;
400         }
401
402         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
403                 err = -EOPNOTSUPP;
404                 goto done;
405         }
406
407         /* Restrict maximum inquiry length to 60 seconds */
408         if (ir.length > 60) {
409                 err = -EINVAL;
410                 goto done;
411         }
412
413         hci_dev_lock(hdev);
414         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416                 hci_inquiry_cache_flush(hdev);
417                 do_inquiry = 1;
418         }
419         hci_dev_unlock(hdev);
420
421         timeo = ir.length * msecs_to_jiffies(2000);
422
423         if (do_inquiry) {
424                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
425                                    timeo, NULL);
426                 if (err < 0)
427                         goto done;
428
429                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430                  * cleared). If it is interrupted by a signal, return -EINTR.
431                  */
432                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433                                 TASK_INTERRUPTIBLE)) {
434                         err = -EINTR;
435                         goto done;
436                 }
437         }
438
439         /* for unlimited number of responses we will use buffer with
440          * 255 entries
441          */
442         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443
444         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445          * copy it to the user space.
446          */
447         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
448         if (!buf) {
449                 err = -ENOMEM;
450                 goto done;
451         }
452
453         hci_dev_lock(hdev);
454         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455         hci_dev_unlock(hdev);
456
457         BT_DBG("num_rsp %d", ir.num_rsp);
458
459         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
460                 ptr += sizeof(ir);
461                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
462                                  ir.num_rsp))
463                         err = -EFAULT;
464         } else
465                 err = -EFAULT;
466
467         kfree(buf);
468
469 done:
470         hci_dev_put(hdev);
471         return err;
472 }
473
474 static int hci_dev_do_open(struct hci_dev *hdev)
475 {
476         int ret = 0;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_sync_lock(hdev);
481
482         ret = hci_dev_open_sync(hdev);
483
484         hci_req_sync_unlock(hdev);
485         return ret;
486 }
487
488 /* ---- HCI ioctl helpers ---- */
489
490 int hci_dev_open(__u16 dev)
491 {
492         struct hci_dev *hdev;
493         int err;
494
495         hdev = hci_dev_get(dev);
496         if (!hdev)
497                 return -ENODEV;
498
499         /* Devices that are marked as unconfigured can only be powered
500          * up as user channel. Trying to bring them up as normal devices
501          * will result into a failure. Only user channel operation is
502          * possible.
503          *
504          * When this function is called for a user channel, the flag
505          * HCI_USER_CHANNEL will be set first before attempting to
506          * open the device.
507          */
508         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
510                 err = -EOPNOTSUPP;
511                 goto done;
512         }
513
514         /* We need to ensure that no other power on/off work is pending
515          * before proceeding to call hci_dev_do_open. This is
516          * particularly important if the setup procedure has not yet
517          * completed.
518          */
519         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520                 cancel_delayed_work(&hdev->power_off);
521
522         /* After this call it is guaranteed that the setup procedure
523          * has finished. This means that error conditions like RFKILL
524          * or no valid public or static random address apply.
525          */
526         flush_workqueue(hdev->req_workqueue);
527
528         /* For controllers not using the management interface and that
529          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530          * so that pairing works for them. Once the management interface
531          * is in use this bit will be cleared again and userspace has
532          * to explicitly enable it.
533          */
534         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535             !hci_dev_test_flag(hdev, HCI_MGMT))
536                 hci_dev_set_flag(hdev, HCI_BONDABLE);
537
538         err = hci_dev_do_open(hdev);
539
540 done:
541         hci_dev_put(hdev);
542         return err;
543 }
544
545 int hci_dev_do_close(struct hci_dev *hdev)
546 {
547         int err;
548
549         BT_DBG("%s %p", hdev->name, hdev);
550
551         hci_req_sync_lock(hdev);
552
553         err = hci_dev_close_sync(hdev);
554
555         hci_req_sync_unlock(hdev);
556
557         return err;
558 }
559
560 int hci_dev_close(__u16 dev)
561 {
562         struct hci_dev *hdev;
563         int err;
564
565         hdev = hci_dev_get(dev);
566         if (!hdev)
567                 return -ENODEV;
568
569         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
570                 err = -EBUSY;
571                 goto done;
572         }
573
574         cancel_work_sync(&hdev->power_on);
575         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
576                 cancel_delayed_work(&hdev->power_off);
577
578         err = hci_dev_do_close(hdev);
579
580 done:
581         hci_dev_put(hdev);
582         return err;
583 }
584
585 static int hci_dev_do_reset(struct hci_dev *hdev)
586 {
587         int ret;
588
589         BT_DBG("%s %p", hdev->name, hdev);
590
591         hci_req_sync_lock(hdev);
592
593         /* Drop queues */
594         skb_queue_purge(&hdev->rx_q);
595         skb_queue_purge(&hdev->cmd_q);
596
597         /* Avoid potential lockdep warnings from the *_flush() calls by
598          * ensuring the workqueue is empty up front.
599          */
600         drain_workqueue(hdev->workqueue);
601
602         hci_dev_lock(hdev);
603         hci_inquiry_cache_flush(hdev);
604         hci_conn_hash_flush(hdev);
605         hci_dev_unlock(hdev);
606
607         if (hdev->flush)
608                 hdev->flush(hdev);
609
610         atomic_set(&hdev->cmd_cnt, 1);
611         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
612
613         ret = hci_reset_sync(hdev);
614
615         hci_req_sync_unlock(hdev);
616         return ret;
617 }
618
619 int hci_dev_reset(__u16 dev)
620 {
621         struct hci_dev *hdev;
622         int err;
623
624         hdev = hci_dev_get(dev);
625         if (!hdev)
626                 return -ENODEV;
627
628         if (!test_bit(HCI_UP, &hdev->flags)) {
629                 err = -ENETDOWN;
630                 goto done;
631         }
632
633         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
634                 err = -EBUSY;
635                 goto done;
636         }
637
638         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
639                 err = -EOPNOTSUPP;
640                 goto done;
641         }
642
643         err = hci_dev_do_reset(hdev);
644
645 done:
646         hci_dev_put(hdev);
647         return err;
648 }
649
650 int hci_dev_reset_stat(__u16 dev)
651 {
652         struct hci_dev *hdev;
653         int ret = 0;
654
655         hdev = hci_dev_get(dev);
656         if (!hdev)
657                 return -ENODEV;
658
659         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
660                 ret = -EBUSY;
661                 goto done;
662         }
663
664         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
665                 ret = -EOPNOTSUPP;
666                 goto done;
667         }
668
669         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
670
671 done:
672         hci_dev_put(hdev);
673         return ret;
674 }
675
676 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
677 {
678         bool conn_changed, discov_changed;
679
680         BT_DBG("%s scan 0x%02x", hdev->name, scan);
681
682         if ((scan & SCAN_PAGE))
683                 conn_changed = !hci_dev_test_and_set_flag(hdev,
684                                                           HCI_CONNECTABLE);
685         else
686                 conn_changed = hci_dev_test_and_clear_flag(hdev,
687                                                            HCI_CONNECTABLE);
688
689         if ((scan & SCAN_INQUIRY)) {
690                 discov_changed = !hci_dev_test_and_set_flag(hdev,
691                                                             HCI_DISCOVERABLE);
692         } else {
693                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
694                 discov_changed = hci_dev_test_and_clear_flag(hdev,
695                                                              HCI_DISCOVERABLE);
696         }
697
698         if (!hci_dev_test_flag(hdev, HCI_MGMT))
699                 return;
700
701         if (conn_changed || discov_changed) {
702                 /* In case this was disabled through mgmt */
703                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
704
705                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
706                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
707
708                 mgmt_new_settings(hdev);
709         }
710 }
711
712 int hci_dev_cmd(unsigned int cmd, void __user *arg)
713 {
714         struct hci_dev *hdev;
715         struct hci_dev_req dr;
716         int err = 0;
717
718         if (copy_from_user(&dr, arg, sizeof(dr)))
719                 return -EFAULT;
720
721         hdev = hci_dev_get(dr.dev_id);
722         if (!hdev)
723                 return -ENODEV;
724
725         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
726                 err = -EBUSY;
727                 goto done;
728         }
729
730         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
731                 err = -EOPNOTSUPP;
732                 goto done;
733         }
734
735         if (hdev->dev_type != HCI_PRIMARY) {
736                 err = -EOPNOTSUPP;
737                 goto done;
738         }
739
740         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
741                 err = -EOPNOTSUPP;
742                 goto done;
743         }
744
745         switch (cmd) {
746         case HCISETAUTH:
747                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
748                                    HCI_INIT_TIMEOUT, NULL);
749                 break;
750
751         case HCISETENCRYPT:
752                 if (!lmp_encrypt_capable(hdev)) {
753                         err = -EOPNOTSUPP;
754                         break;
755                 }
756
757                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
758                         /* Auth must be enabled first */
759                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
760                                            HCI_INIT_TIMEOUT, NULL);
761                         if (err)
762                                 break;
763                 }
764
765                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
766                                    HCI_INIT_TIMEOUT, NULL);
767                 break;
768
769         case HCISETSCAN:
770                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
771                                    HCI_INIT_TIMEOUT, NULL);
772
773                 /* Ensure that the connectable and discoverable states
774                  * get correctly modified as this was a non-mgmt change.
775                  */
776                 if (!err)
777                         hci_update_passive_scan_state(hdev, dr.dev_opt);
778                 break;
779
780         case HCISETLINKPOL:
781                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
782                                    HCI_INIT_TIMEOUT, NULL);
783                 break;
784
785         case HCISETLINKMODE:
786                 hdev->link_mode = ((__u16) dr.dev_opt) &
787                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
788                 break;
789
790         case HCISETPTYPE:
791                 if (hdev->pkt_type == (__u16) dr.dev_opt)
792                         break;
793
794                 hdev->pkt_type = (__u16) dr.dev_opt;
795                 mgmt_phy_configuration_changed(hdev, NULL);
796                 break;
797
798         case HCISETACLMTU:
799                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
800                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
801                 break;
802
803         case HCISETSCOMTU:
804                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
805                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
806                 break;
807
808         default:
809                 err = -EINVAL;
810                 break;
811         }
812
813 done:
814         hci_dev_put(hdev);
815         return err;
816 }
817
818 int hci_get_dev_list(void __user *arg)
819 {
820         struct hci_dev *hdev;
821         struct hci_dev_list_req *dl;
822         struct hci_dev_req *dr;
823         int n = 0, size, err;
824         __u16 dev_num;
825
826         if (get_user(dev_num, (__u16 __user *) arg))
827                 return -EFAULT;
828
829         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
830                 return -EINVAL;
831
832         size = sizeof(*dl) + dev_num * sizeof(*dr);
833
834         dl = kzalloc(size, GFP_KERNEL);
835         if (!dl)
836                 return -ENOMEM;
837
838         dr = dl->dev_req;
839
840         read_lock(&hci_dev_list_lock);
841         list_for_each_entry(hdev, &hci_dev_list, list) {
842                 unsigned long flags = hdev->flags;
843
844                 /* When the auto-off is configured it means the transport
845                  * is running, but in that case still indicate that the
846                  * device is actually down.
847                  */
848                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
849                         flags &= ~BIT(HCI_UP);
850
851                 (dr + n)->dev_id  = hdev->id;
852                 (dr + n)->dev_opt = flags;
853
854                 if (++n >= dev_num)
855                         break;
856         }
857         read_unlock(&hci_dev_list_lock);
858
859         dl->dev_num = n;
860         size = sizeof(*dl) + n * sizeof(*dr);
861
862         err = copy_to_user(arg, dl, size);
863         kfree(dl);
864
865         return err ? -EFAULT : 0;
866 }
867
868 int hci_get_dev_info(void __user *arg)
869 {
870         struct hci_dev *hdev;
871         struct hci_dev_info di;
872         unsigned long flags;
873         int err = 0;
874
875         if (copy_from_user(&di, arg, sizeof(di)))
876                 return -EFAULT;
877
878         hdev = hci_dev_get(di.dev_id);
879         if (!hdev)
880                 return -ENODEV;
881
882         /* When the auto-off is configured it means the transport
883          * is running, but in that case still indicate that the
884          * device is actually down.
885          */
886         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
887                 flags = hdev->flags & ~BIT(HCI_UP);
888         else
889                 flags = hdev->flags;
890
891         strcpy(di.name, hdev->name);
892         di.bdaddr   = hdev->bdaddr;
893         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
894         di.flags    = flags;
895         di.pkt_type = hdev->pkt_type;
896         if (lmp_bredr_capable(hdev)) {
897                 di.acl_mtu  = hdev->acl_mtu;
898                 di.acl_pkts = hdev->acl_pkts;
899                 di.sco_mtu  = hdev->sco_mtu;
900                 di.sco_pkts = hdev->sco_pkts;
901         } else {
902                 di.acl_mtu  = hdev->le_mtu;
903                 di.acl_pkts = hdev->le_pkts;
904                 di.sco_mtu  = 0;
905                 di.sco_pkts = 0;
906         }
907         di.link_policy = hdev->link_policy;
908         di.link_mode   = hdev->link_mode;
909
910         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
911         memcpy(&di.features, &hdev->features, sizeof(di.features));
912
913         if (copy_to_user(arg, &di, sizeof(di)))
914                 err = -EFAULT;
915
916         hci_dev_put(hdev);
917
918         return err;
919 }
920
921 /* ---- Interface to HCI drivers ---- */
922
923 static int hci_rfkill_set_block(void *data, bool blocked)
924 {
925         struct hci_dev *hdev = data;
926
927         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
928
929         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
930                 return -EBUSY;
931
932         if (blocked) {
933                 hci_dev_set_flag(hdev, HCI_RFKILLED);
934                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
935                     !hci_dev_test_flag(hdev, HCI_CONFIG))
936                         hci_dev_do_close(hdev);
937         } else {
938                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
939         }
940
941         return 0;
942 }
943
944 static const struct rfkill_ops hci_rfkill_ops = {
945         .set_block = hci_rfkill_set_block,
946 };
947
948 static void hci_power_on(struct work_struct *work)
949 {
950         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
951         int err;
952
953         BT_DBG("%s", hdev->name);
954
955         if (test_bit(HCI_UP, &hdev->flags) &&
956             hci_dev_test_flag(hdev, HCI_MGMT) &&
957             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
958                 cancel_delayed_work(&hdev->power_off);
959                 err = hci_powered_update_sync(hdev);
960                 mgmt_power_on(hdev, err);
961                 return;
962         }
963
964         err = hci_dev_do_open(hdev);
965         if (err < 0) {
966                 hci_dev_lock(hdev);
967                 mgmt_set_powered_failed(hdev, err);
968                 hci_dev_unlock(hdev);
969                 return;
970         }
971
972         /* During the HCI setup phase, a few error conditions are
973          * ignored and they need to be checked now. If they are still
974          * valid, it is important to turn the device back off.
975          */
976         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
977             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
978             (hdev->dev_type == HCI_PRIMARY &&
979              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
980              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
981                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
982                 hci_dev_do_close(hdev);
983         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
984                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
985                                    HCI_AUTO_OFF_TIMEOUT);
986         }
987
988         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
989                 /* For unconfigured devices, set the HCI_RAW flag
990                  * so that userspace can easily identify them.
991                  */
992                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
993                         set_bit(HCI_RAW, &hdev->flags);
994
995                 /* For fully configured devices, this will send
996                  * the Index Added event. For unconfigured devices,
997                  * it will send Unconfigued Index Added event.
998                  *
999                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1000                  * and no event will be send.
1001                  */
1002                 mgmt_index_added(hdev);
1003         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1004                 /* When the controller is now configured, then it
1005                  * is important to clear the HCI_RAW flag.
1006                  */
1007                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008                         clear_bit(HCI_RAW, &hdev->flags);
1009
1010                 /* Powering on the controller with HCI_CONFIG set only
1011                  * happens with the transition from unconfigured to
1012                  * configured. This will send the Index Added event.
1013                  */
1014                 mgmt_index_added(hdev);
1015         }
1016 }
1017
1018 static void hci_power_off(struct work_struct *work)
1019 {
1020         struct hci_dev *hdev = container_of(work, struct hci_dev,
1021                                             power_off.work);
1022
1023         BT_DBG("%s", hdev->name);
1024
1025         hci_dev_do_close(hdev);
1026 }
1027
1028 static void hci_error_reset(struct work_struct *work)
1029 {
1030         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1031
1032         BT_DBG("%s", hdev->name);
1033
1034         if (hdev->hw_error)
1035                 hdev->hw_error(hdev, hdev->hw_error_code);
1036         else
1037                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1038
1039         if (hci_dev_do_close(hdev))
1040                 return;
1041
1042         hci_dev_do_open(hdev);
1043 }
1044
1045 void hci_uuids_clear(struct hci_dev *hdev)
1046 {
1047         struct bt_uuid *uuid, *tmp;
1048
1049         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1050                 list_del(&uuid->list);
1051                 kfree(uuid);
1052         }
1053 }
1054
1055 void hci_link_keys_clear(struct hci_dev *hdev)
1056 {
1057         struct link_key *key;
1058
1059         list_for_each_entry(key, &hdev->link_keys, list) {
1060                 list_del_rcu(&key->list);
1061                 kfree_rcu(key, rcu);
1062         }
1063 }
1064
1065 void hci_smp_ltks_clear(struct hci_dev *hdev)
1066 {
1067         struct smp_ltk *k;
1068
1069         list_for_each_entry(k, &hdev->long_term_keys, list) {
1070                 list_del_rcu(&k->list);
1071                 kfree_rcu(k, rcu);
1072         }
1073 }
1074
1075 void hci_smp_irks_clear(struct hci_dev *hdev)
1076 {
1077         struct smp_irk *k;
1078
1079         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1080                 list_del_rcu(&k->list);
1081                 kfree_rcu(k, rcu);
1082         }
1083 }
1084
1085 void hci_blocked_keys_clear(struct hci_dev *hdev)
1086 {
1087         struct blocked_key *b;
1088
1089         list_for_each_entry(b, &hdev->blocked_keys, list) {
1090                 list_del_rcu(&b->list);
1091                 kfree_rcu(b, rcu);
1092         }
1093 }
1094
1095 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1096 {
1097         bool blocked = false;
1098         struct blocked_key *b;
1099
1100         rcu_read_lock();
1101         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1102                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1103                         blocked = true;
1104                         break;
1105                 }
1106         }
1107
1108         rcu_read_unlock();
1109         return blocked;
1110 }
1111
1112 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1113 {
1114         struct link_key *k;
1115
1116         rcu_read_lock();
1117         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1118                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1119                         rcu_read_unlock();
1120
1121                         if (hci_is_blocked_key(hdev,
1122                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1123                                                k->val)) {
1124                                 bt_dev_warn_ratelimited(hdev,
1125                                                         "Link key blocked for %pMR",
1126                                                         &k->bdaddr);
1127                                 return NULL;
1128                         }
1129
1130                         return k;
1131                 }
1132         }
1133         rcu_read_unlock();
1134
1135         return NULL;
1136 }
1137
1138 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1139                                u8 key_type, u8 old_key_type)
1140 {
1141         /* Legacy key */
1142         if (key_type < 0x03)
1143                 return true;
1144
1145         /* Debug keys are insecure so don't store them persistently */
1146         if (key_type == HCI_LK_DEBUG_COMBINATION)
1147                 return false;
1148
1149         /* Changed combination key and there's no previous one */
1150         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1151                 return false;
1152
1153         /* Security mode 3 case */
1154         if (!conn)
1155                 return true;
1156
1157         /* BR/EDR key derived using SC from an LE link */
1158         if (conn->type == LE_LINK)
1159                 return true;
1160
1161         /* Neither local nor remote side had no-bonding as requirement */
1162         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1163                 return true;
1164
1165         /* Local side had dedicated bonding as requirement */
1166         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1167                 return true;
1168
1169         /* Remote side had dedicated bonding as requirement */
1170         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1171                 return true;
1172
1173         /* If none of the above criteria match, then don't store the key
1174          * persistently */
1175         return false;
1176 }
1177
1178 static u8 ltk_role(u8 type)
1179 {
1180         if (type == SMP_LTK)
1181                 return HCI_ROLE_MASTER;
1182
1183         return HCI_ROLE_SLAVE;
1184 }
1185
1186 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1187                              u8 addr_type, u8 role)
1188 {
1189         struct smp_ltk *k;
1190
1191         rcu_read_lock();
1192         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1193                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1194                         continue;
1195
1196                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1197                         rcu_read_unlock();
1198
1199                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1200                                                k->val)) {
1201                                 bt_dev_warn_ratelimited(hdev,
1202                                                         "LTK blocked for %pMR",
1203                                                         &k->bdaddr);
1204                                 return NULL;
1205                         }
1206
1207                         return k;
1208                 }
1209         }
1210         rcu_read_unlock();
1211
1212         return NULL;
1213 }
1214
1215 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1216 {
1217         struct smp_irk *irk_to_return = NULL;
1218         struct smp_irk *irk;
1219
1220         rcu_read_lock();
1221         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1222                 if (!bacmp(&irk->rpa, rpa)) {
1223                         irk_to_return = irk;
1224                         goto done;
1225                 }
1226         }
1227
1228         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1229                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1230                         bacpy(&irk->rpa, rpa);
1231                         irk_to_return = irk;
1232                         goto done;
1233                 }
1234         }
1235
1236 done:
1237         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1238                                                 irk_to_return->val)) {
1239                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1240                                         &irk_to_return->bdaddr);
1241                 irk_to_return = NULL;
1242         }
1243
1244         rcu_read_unlock();
1245
1246         return irk_to_return;
1247 }
1248
1249 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1250                                      u8 addr_type)
1251 {
1252         struct smp_irk *irk_to_return = NULL;
1253         struct smp_irk *irk;
1254
1255         /* Identity Address must be public or static random */
1256         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1257                 return NULL;
1258
1259         rcu_read_lock();
1260         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1261                 if (addr_type == irk->addr_type &&
1262                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1263                         irk_to_return = irk;
1264                         goto done;
1265                 }
1266         }
1267
1268 done:
1269
1270         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1271                                                 irk_to_return->val)) {
1272                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1273                                         &irk_to_return->bdaddr);
1274                 irk_to_return = NULL;
1275         }
1276
1277         rcu_read_unlock();
1278
1279         return irk_to_return;
1280 }
1281
1282 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1283                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1284                                   u8 pin_len, bool *persistent)
1285 {
1286         struct link_key *key, *old_key;
1287         u8 old_key_type;
1288
1289         old_key = hci_find_link_key(hdev, bdaddr);
1290         if (old_key) {
1291                 old_key_type = old_key->type;
1292                 key = old_key;
1293         } else {
1294                 old_key_type = conn ? conn->key_type : 0xff;
1295                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1296                 if (!key)
1297                         return NULL;
1298                 list_add_rcu(&key->list, &hdev->link_keys);
1299         }
1300
1301         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1302
1303         /* Some buggy controller combinations generate a changed
1304          * combination key for legacy pairing even when there's no
1305          * previous key */
1306         if (type == HCI_LK_CHANGED_COMBINATION &&
1307             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1308                 type = HCI_LK_COMBINATION;
1309                 if (conn)
1310                         conn->key_type = type;
1311         }
1312
1313         bacpy(&key->bdaddr, bdaddr);
1314         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1315         key->pin_len = pin_len;
1316
1317         if (type == HCI_LK_CHANGED_COMBINATION)
1318                 key->type = old_key_type;
1319         else
1320                 key->type = type;
1321
1322         if (persistent)
1323                 *persistent = hci_persistent_key(hdev, conn, type,
1324                                                  old_key_type);
1325
1326         return key;
1327 }
1328
1329 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1330                             u8 addr_type, u8 type, u8 authenticated,
1331                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1332 {
1333         struct smp_ltk *key, *old_key;
1334         u8 role = ltk_role(type);
1335
1336         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1337         if (old_key)
1338                 key = old_key;
1339         else {
1340                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1341                 if (!key)
1342                         return NULL;
1343                 list_add_rcu(&key->list, &hdev->long_term_keys);
1344         }
1345
1346         bacpy(&key->bdaddr, bdaddr);
1347         key->bdaddr_type = addr_type;
1348         memcpy(key->val, tk, sizeof(key->val));
1349         key->authenticated = authenticated;
1350         key->ediv = ediv;
1351         key->rand = rand;
1352         key->enc_size = enc_size;
1353         key->type = type;
1354
1355         return key;
1356 }
1357
1358 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1359                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1360 {
1361         struct smp_irk *irk;
1362
1363         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1364         if (!irk) {
1365                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1366                 if (!irk)
1367                         return NULL;
1368
1369                 bacpy(&irk->bdaddr, bdaddr);
1370                 irk->addr_type = addr_type;
1371
1372                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1373         }
1374
1375         memcpy(irk->val, val, 16);
1376         bacpy(&irk->rpa, rpa);
1377
1378         return irk;
1379 }
1380
1381 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1382 {
1383         struct link_key *key;
1384
1385         key = hci_find_link_key(hdev, bdaddr);
1386         if (!key)
1387                 return -ENOENT;
1388
1389         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1390
1391         list_del_rcu(&key->list);
1392         kfree_rcu(key, rcu);
1393
1394         return 0;
1395 }
1396
1397 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1398 {
1399         struct smp_ltk *k;
1400         int removed = 0;
1401
1402         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1403                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1404                         continue;
1405
1406                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1407
1408                 list_del_rcu(&k->list);
1409                 kfree_rcu(k, rcu);
1410                 removed++;
1411         }
1412
1413         return removed ? 0 : -ENOENT;
1414 }
1415
1416 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1417 {
1418         struct smp_irk *k;
1419
1420         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1421                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1422                         continue;
1423
1424                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1425
1426                 list_del_rcu(&k->list);
1427                 kfree_rcu(k, rcu);
1428         }
1429 }
1430
1431 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1432 {
1433         struct smp_ltk *k;
1434         struct smp_irk *irk;
1435         u8 addr_type;
1436
1437         if (type == BDADDR_BREDR) {
1438                 if (hci_find_link_key(hdev, bdaddr))
1439                         return true;
1440                 return false;
1441         }
1442
1443         /* Convert to HCI addr type which struct smp_ltk uses */
1444         if (type == BDADDR_LE_PUBLIC)
1445                 addr_type = ADDR_LE_DEV_PUBLIC;
1446         else
1447                 addr_type = ADDR_LE_DEV_RANDOM;
1448
1449         irk = hci_get_irk(hdev, bdaddr, addr_type);
1450         if (irk) {
1451                 bdaddr = &irk->bdaddr;
1452                 addr_type = irk->addr_type;
1453         }
1454
1455         rcu_read_lock();
1456         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1457                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1458                         rcu_read_unlock();
1459                         return true;
1460                 }
1461         }
1462         rcu_read_unlock();
1463
1464         return false;
1465 }
1466
1467 /* HCI command timer function */
1468 static void hci_cmd_timeout(struct work_struct *work)
1469 {
1470         struct hci_dev *hdev = container_of(work, struct hci_dev,
1471                                             cmd_timer.work);
1472
1473         if (hdev->sent_cmd) {
1474                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1475                 u16 opcode = __le16_to_cpu(sent->opcode);
1476
1477                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1478         } else {
1479                 bt_dev_err(hdev, "command tx timeout");
1480         }
1481
1482         if (hdev->cmd_timeout)
1483                 hdev->cmd_timeout(hdev);
1484
1485         atomic_set(&hdev->cmd_cnt, 1);
1486         queue_work(hdev->workqueue, &hdev->cmd_work);
1487 }
1488
1489 /* HCI ncmd timer function */
1490 static void hci_ncmd_timeout(struct work_struct *work)
1491 {
1492         struct hci_dev *hdev = container_of(work, struct hci_dev,
1493                                             ncmd_timer.work);
1494
1495         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1496
1497         /* During HCI_INIT phase no events can be injected if the ncmd timer
1498          * triggers since the procedure has its own timeout handling.
1499          */
1500         if (test_bit(HCI_INIT, &hdev->flags))
1501                 return;
1502
1503         /* This is an irrecoverable state, inject hardware error event */
1504         hci_reset_dev(hdev);
1505 }
1506
1507 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1508                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1509 {
1510         struct oob_data *data;
1511
1512         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1513                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1514                         continue;
1515                 if (data->bdaddr_type != bdaddr_type)
1516                         continue;
1517                 return data;
1518         }
1519
1520         return NULL;
1521 }
1522
1523 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1524                                u8 bdaddr_type)
1525 {
1526         struct oob_data *data;
1527
1528         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1529         if (!data)
1530                 return -ENOENT;
1531
1532         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1533
1534         list_del(&data->list);
1535         kfree(data);
1536
1537         return 0;
1538 }
1539
1540 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1541 {
1542         struct oob_data *data, *n;
1543
1544         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1545                 list_del(&data->list);
1546                 kfree(data);
1547         }
1548 }
1549
1550 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1551                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1552                             u8 *hash256, u8 *rand256)
1553 {
1554         struct oob_data *data;
1555
1556         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1557         if (!data) {
1558                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1559                 if (!data)
1560                         return -ENOMEM;
1561
1562                 bacpy(&data->bdaddr, bdaddr);
1563                 data->bdaddr_type = bdaddr_type;
1564                 list_add(&data->list, &hdev->remote_oob_data);
1565         }
1566
1567         if (hash192 && rand192) {
1568                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1569                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1570                 if (hash256 && rand256)
1571                         data->present = 0x03;
1572         } else {
1573                 memset(data->hash192, 0, sizeof(data->hash192));
1574                 memset(data->rand192, 0, sizeof(data->rand192));
1575                 if (hash256 && rand256)
1576                         data->present = 0x02;
1577                 else
1578                         data->present = 0x00;
1579         }
1580
1581         if (hash256 && rand256) {
1582                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1583                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1584         } else {
1585                 memset(data->hash256, 0, sizeof(data->hash256));
1586                 memset(data->rand256, 0, sizeof(data->rand256));
1587                 if (hash192 && rand192)
1588                         data->present = 0x01;
1589         }
1590
1591         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1592
1593         return 0;
1594 }
1595
1596 /* This function requires the caller holds hdev->lock */
1597 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1598 {
1599         struct adv_info *adv_instance;
1600
1601         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1602                 if (adv_instance->instance == instance)
1603                         return adv_instance;
1604         }
1605
1606         return NULL;
1607 }
1608
1609 /* This function requires the caller holds hdev->lock */
1610 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1611 {
1612         struct adv_info *cur_instance;
1613
1614         cur_instance = hci_find_adv_instance(hdev, instance);
1615         if (!cur_instance)
1616                 return NULL;
1617
1618         if (cur_instance == list_last_entry(&hdev->adv_instances,
1619                                             struct adv_info, list))
1620                 return list_first_entry(&hdev->adv_instances,
1621                                                  struct adv_info, list);
1622         else
1623                 return list_next_entry(cur_instance, list);
1624 }
1625
1626 /* This function requires the caller holds hdev->lock */
1627 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1628 {
1629         struct adv_info *adv_instance;
1630
1631         adv_instance = hci_find_adv_instance(hdev, instance);
1632         if (!adv_instance)
1633                 return -ENOENT;
1634
1635         BT_DBG("%s removing %dMR", hdev->name, instance);
1636
1637         if (hdev->cur_adv_instance == instance) {
1638                 if (hdev->adv_instance_timeout) {
1639                         cancel_delayed_work(&hdev->adv_instance_expire);
1640                         hdev->adv_instance_timeout = 0;
1641                 }
1642                 hdev->cur_adv_instance = 0x00;
1643         }
1644
1645         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1646
1647         list_del(&adv_instance->list);
1648         kfree(adv_instance);
1649
1650         hdev->adv_instance_cnt--;
1651
1652         return 0;
1653 }
1654
1655 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1656 {
1657         struct adv_info *adv_instance, *n;
1658
1659         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1660                 adv_instance->rpa_expired = rpa_expired;
1661 }
1662
1663 /* This function requires the caller holds hdev->lock */
1664 void hci_adv_instances_clear(struct hci_dev *hdev)
1665 {
1666         struct adv_info *adv_instance, *n;
1667
1668         if (hdev->adv_instance_timeout) {
1669                 cancel_delayed_work(&hdev->adv_instance_expire);
1670                 hdev->adv_instance_timeout = 0;
1671         }
1672
1673         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1674                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1675                 list_del(&adv_instance->list);
1676                 kfree(adv_instance);
1677         }
1678
1679         hdev->adv_instance_cnt = 0;
1680         hdev->cur_adv_instance = 0x00;
1681 }
1682
1683 static void adv_instance_rpa_expired(struct work_struct *work)
1684 {
1685         struct adv_info *adv_instance = container_of(work, struct adv_info,
1686                                                      rpa_expired_cb.work);
1687
1688         BT_DBG("");
1689
1690         adv_instance->rpa_expired = true;
1691 }
1692
1693 /* This function requires the caller holds hdev->lock */
1694 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1695                          u16 adv_data_len, u8 *adv_data,
1696                          u16 scan_rsp_len, u8 *scan_rsp_data,
1697                          u16 timeout, u16 duration, s8 tx_power,
1698                          u32 min_interval, u32 max_interval)
1699 {
1700         struct adv_info *adv_instance;
1701
1702         adv_instance = hci_find_adv_instance(hdev, instance);
1703         if (adv_instance) {
1704                 memset(adv_instance->adv_data, 0,
1705                        sizeof(adv_instance->adv_data));
1706                 memset(adv_instance->scan_rsp_data, 0,
1707                        sizeof(adv_instance->scan_rsp_data));
1708         } else {
1709                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1710                     instance < 1 || instance > hdev->le_num_of_adv_sets)
1711                         return -EOVERFLOW;
1712
1713                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1714                 if (!adv_instance)
1715                         return -ENOMEM;
1716
1717                 adv_instance->pending = true;
1718                 adv_instance->instance = instance;
1719                 list_add(&adv_instance->list, &hdev->adv_instances);
1720                 hdev->adv_instance_cnt++;
1721         }
1722
1723         adv_instance->flags = flags;
1724         adv_instance->adv_data_len = adv_data_len;
1725         adv_instance->scan_rsp_len = scan_rsp_len;
1726         adv_instance->min_interval = min_interval;
1727         adv_instance->max_interval = max_interval;
1728         adv_instance->tx_power = tx_power;
1729
1730         if (adv_data_len)
1731                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1732
1733         if (scan_rsp_len)
1734                 memcpy(adv_instance->scan_rsp_data,
1735                        scan_rsp_data, scan_rsp_len);
1736
1737         adv_instance->timeout = timeout;
1738         adv_instance->remaining_time = timeout;
1739
1740         if (duration == 0)
1741                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1742         else
1743                 adv_instance->duration = duration;
1744
1745         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1746                           adv_instance_rpa_expired);
1747
1748         BT_DBG("%s for %dMR", hdev->name, instance);
1749
1750         return 0;
1751 }
1752
1753 /* This function requires the caller holds hdev->lock */
1754 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1755                               u16 adv_data_len, u8 *adv_data,
1756                               u16 scan_rsp_len, u8 *scan_rsp_data)
1757 {
1758         struct adv_info *adv_instance;
1759
1760         adv_instance = hci_find_adv_instance(hdev, instance);
1761
1762         /* If advertisement doesn't exist, we can't modify its data */
1763         if (!adv_instance)
1764                 return -ENOENT;
1765
1766         if (adv_data_len) {
1767                 memset(adv_instance->adv_data, 0,
1768                        sizeof(adv_instance->adv_data));
1769                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1770                 adv_instance->adv_data_len = adv_data_len;
1771         }
1772
1773         if (scan_rsp_len) {
1774                 memset(adv_instance->scan_rsp_data, 0,
1775                        sizeof(adv_instance->scan_rsp_data));
1776                 memcpy(adv_instance->scan_rsp_data,
1777                        scan_rsp_data, scan_rsp_len);
1778                 adv_instance->scan_rsp_len = scan_rsp_len;
1779         }
1780
1781         return 0;
1782 }
1783
1784 /* This function requires the caller holds hdev->lock */
1785 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1786 {
1787         u32 flags;
1788         struct adv_info *adv;
1789
1790         if (instance == 0x00) {
1791                 /* Instance 0 always manages the "Tx Power" and "Flags"
1792                  * fields
1793                  */
1794                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1795
1796                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1797                  * corresponds to the "connectable" instance flag.
1798                  */
1799                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1800                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1801
1802                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1803                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1804                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1805                         flags |= MGMT_ADV_FLAG_DISCOV;
1806
1807                 return flags;
1808         }
1809
1810         adv = hci_find_adv_instance(hdev, instance);
1811
1812         /* Return 0 when we got an invalid instance identifier. */
1813         if (!adv)
1814                 return 0;
1815
1816         return adv->flags;
1817 }
1818
1819 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1820 {
1821         struct adv_info *adv;
1822
1823         /* Instance 0x00 always set local name */
1824         if (instance == 0x00)
1825                 return true;
1826
1827         adv = hci_find_adv_instance(hdev, instance);
1828         if (!adv)
1829                 return false;
1830
1831         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1832             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1833                 return true;
1834
1835         return adv->scan_rsp_len ? true : false;
1836 }
1837
1838 /* This function requires the caller holds hdev->lock */
1839 void hci_adv_monitors_clear(struct hci_dev *hdev)
1840 {
1841         struct adv_monitor *monitor;
1842         int handle;
1843
1844         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1845                 hci_free_adv_monitor(hdev, monitor);
1846
1847         idr_destroy(&hdev->adv_monitors_idr);
1848 }
1849
1850 /* Frees the monitor structure and do some bookkeepings.
1851  * This function requires the caller holds hdev->lock.
1852  */
1853 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1854 {
1855         struct adv_pattern *pattern;
1856         struct adv_pattern *tmp;
1857
1858         if (!monitor)
1859                 return;
1860
1861         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1862                 list_del(&pattern->list);
1863                 kfree(pattern);
1864         }
1865
1866         if (monitor->handle)
1867                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1868
1869         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1870                 hdev->adv_monitors_cnt--;
1871                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1872         }
1873
1874         kfree(monitor);
1875 }
1876
1877 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1878 {
1879         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1880 }
1881
1882 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1883 {
1884         return mgmt_remove_adv_monitor_complete(hdev, status);
1885 }
1886
1887 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1888  * also attempts to forward the request to the controller.
1889  * Returns true if request is forwarded (result is pending), false otherwise.
1890  * This function requires the caller holds hdev->lock.
1891  */
1892 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1893                          int *err)
1894 {
1895         int min, max, handle;
1896
1897         *err = 0;
1898
1899         if (!monitor) {
1900                 *err = -EINVAL;
1901                 return false;
1902         }
1903
1904         min = HCI_MIN_ADV_MONITOR_HANDLE;
1905         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1906         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1907                            GFP_KERNEL);
1908         if (handle < 0) {
1909                 *err = handle;
1910                 return false;
1911         }
1912
1913         monitor->handle = handle;
1914
1915         if (!hdev_is_powered(hdev))
1916                 return false;
1917
1918         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1919         case HCI_ADV_MONITOR_EXT_NONE:
1920                 hci_update_passive_scan(hdev);
1921                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1922                 /* Message was not forwarded to controller - not an error */
1923                 return false;
1924         case HCI_ADV_MONITOR_EXT_MSFT:
1925                 *err = msft_add_monitor_pattern(hdev, monitor);
1926                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1927                            *err);
1928                 break;
1929         }
1930
1931         return (*err == 0);
1932 }
1933
1934 /* Attempts to tell the controller and free the monitor. If somehow the
1935  * controller doesn't have a corresponding handle, remove anyway.
1936  * Returns true if request is forwarded (result is pending), false otherwise.
1937  * This function requires the caller holds hdev->lock.
1938  */
1939 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1940                                    struct adv_monitor *monitor,
1941                                    u16 handle, int *err)
1942 {
1943         *err = 0;
1944
1945         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1946         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1947                 goto free_monitor;
1948         case HCI_ADV_MONITOR_EXT_MSFT:
1949                 *err = msft_remove_monitor(hdev, monitor, handle);
1950                 break;
1951         }
1952
1953         /* In case no matching handle registered, just free the monitor */
1954         if (*err == -ENOENT)
1955                 goto free_monitor;
1956
1957         return (*err == 0);
1958
1959 free_monitor:
1960         if (*err == -ENOENT)
1961                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1962                             monitor->handle);
1963         hci_free_adv_monitor(hdev, monitor);
1964
1965         *err = 0;
1966         return false;
1967 }
1968
1969 /* Returns true if request is forwarded (result is pending), false otherwise.
1970  * This function requires the caller holds hdev->lock.
1971  */
1972 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1973 {
1974         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1975         bool pending;
1976
1977         if (!monitor) {
1978                 *err = -EINVAL;
1979                 return false;
1980         }
1981
1982         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1983         if (!*err && !pending)
1984                 hci_update_passive_scan(hdev);
1985
1986         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1987                    hdev->name, handle, *err, pending ? "" : "not ");
1988
1989         return pending;
1990 }
1991
1992 /* Returns true if request is forwarded (result is pending), false otherwise.
1993  * This function requires the caller holds hdev->lock.
1994  */
1995 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
1996 {
1997         struct adv_monitor *monitor;
1998         int idr_next_id = 0;
1999         bool pending = false;
2000         bool update = false;
2001
2002         *err = 0;
2003
2004         while (!*err && !pending) {
2005                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2006                 if (!monitor)
2007                         break;
2008
2009                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2010
2011                 if (!*err && !pending)
2012                         update = true;
2013         }
2014
2015         if (update)
2016                 hci_update_passive_scan(hdev);
2017
2018         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2019                    hdev->name, *err, pending ? "" : "not ");
2020
2021         return pending;
2022 }
2023
2024 /* This function requires the caller holds hdev->lock */
2025 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2026 {
2027         return !idr_is_empty(&hdev->adv_monitors_idr);
2028 }
2029
2030 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2031 {
2032         if (msft_monitor_supported(hdev))
2033                 return HCI_ADV_MONITOR_EXT_MSFT;
2034
2035         return HCI_ADV_MONITOR_EXT_NONE;
2036 }
2037
2038 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2039                                          bdaddr_t *bdaddr, u8 type)
2040 {
2041         struct bdaddr_list *b;
2042
2043         list_for_each_entry(b, bdaddr_list, list) {
2044                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2045                         return b;
2046         }
2047
2048         return NULL;
2049 }
2050
2051 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2052                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2053                                 u8 type)
2054 {
2055         struct bdaddr_list_with_irk *b;
2056
2057         list_for_each_entry(b, bdaddr_list, list) {
2058                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2059                         return b;
2060         }
2061
2062         return NULL;
2063 }
2064
2065 struct bdaddr_list_with_flags *
2066 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2067                                   bdaddr_t *bdaddr, u8 type)
2068 {
2069         struct bdaddr_list_with_flags *b;
2070
2071         list_for_each_entry(b, bdaddr_list, list) {
2072                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2073                         return b;
2074         }
2075
2076         return NULL;
2077 }
2078
2079 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2080 {
2081         struct bdaddr_list *b, *n;
2082
2083         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2084                 list_del(&b->list);
2085                 kfree(b);
2086         }
2087 }
2088
2089 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2090 {
2091         struct bdaddr_list *entry;
2092
2093         if (!bacmp(bdaddr, BDADDR_ANY))
2094                 return -EBADF;
2095
2096         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2097                 return -EEXIST;
2098
2099         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2100         if (!entry)
2101                 return -ENOMEM;
2102
2103         bacpy(&entry->bdaddr, bdaddr);
2104         entry->bdaddr_type = type;
2105
2106         list_add(&entry->list, list);
2107
2108         return 0;
2109 }
2110
2111 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2112                                         u8 type, u8 *peer_irk, u8 *local_irk)
2113 {
2114         struct bdaddr_list_with_irk *entry;
2115
2116         if (!bacmp(bdaddr, BDADDR_ANY))
2117                 return -EBADF;
2118
2119         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2120                 return -EEXIST;
2121
2122         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2123         if (!entry)
2124                 return -ENOMEM;
2125
2126         bacpy(&entry->bdaddr, bdaddr);
2127         entry->bdaddr_type = type;
2128
2129         if (peer_irk)
2130                 memcpy(entry->peer_irk, peer_irk, 16);
2131
2132         if (local_irk)
2133                 memcpy(entry->local_irk, local_irk, 16);
2134
2135         list_add(&entry->list, list);
2136
2137         return 0;
2138 }
2139
2140 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2141                                    u8 type, u32 flags)
2142 {
2143         struct bdaddr_list_with_flags *entry;
2144
2145         if (!bacmp(bdaddr, BDADDR_ANY))
2146                 return -EBADF;
2147
2148         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2149                 return -EEXIST;
2150
2151         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2152         if (!entry)
2153                 return -ENOMEM;
2154
2155         bacpy(&entry->bdaddr, bdaddr);
2156         entry->bdaddr_type = type;
2157         entry->flags = flags;
2158
2159         list_add(&entry->list, list);
2160
2161         return 0;
2162 }
2163
2164 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2165 {
2166         struct bdaddr_list *entry;
2167
2168         if (!bacmp(bdaddr, BDADDR_ANY)) {
2169                 hci_bdaddr_list_clear(list);
2170                 return 0;
2171         }
2172
2173         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2174         if (!entry)
2175                 return -ENOENT;
2176
2177         list_del(&entry->list);
2178         kfree(entry);
2179
2180         return 0;
2181 }
2182
2183 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2184                                                         u8 type)
2185 {
2186         struct bdaddr_list_with_irk *entry;
2187
2188         if (!bacmp(bdaddr, BDADDR_ANY)) {
2189                 hci_bdaddr_list_clear(list);
2190                 return 0;
2191         }
2192
2193         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2194         if (!entry)
2195                 return -ENOENT;
2196
2197         list_del(&entry->list);
2198         kfree(entry);
2199
2200         return 0;
2201 }
2202
2203 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2204                                    u8 type)
2205 {
2206         struct bdaddr_list_with_flags *entry;
2207
2208         if (!bacmp(bdaddr, BDADDR_ANY)) {
2209                 hci_bdaddr_list_clear(list);
2210                 return 0;
2211         }
2212
2213         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2214         if (!entry)
2215                 return -ENOENT;
2216
2217         list_del(&entry->list);
2218         kfree(entry);
2219
2220         return 0;
2221 }
2222
2223 /* This function requires the caller holds hdev->lock */
2224 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2225                                                bdaddr_t *addr, u8 addr_type)
2226 {
2227         struct hci_conn_params *params;
2228
2229         list_for_each_entry(params, &hdev->le_conn_params, list) {
2230                 if (bacmp(&params->addr, addr) == 0 &&
2231                     params->addr_type == addr_type) {
2232                         return params;
2233                 }
2234         }
2235
2236         return NULL;
2237 }
2238
2239 /* This function requires the caller holds hdev->lock */
2240 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2241                                                   bdaddr_t *addr, u8 addr_type)
2242 {
2243         struct hci_conn_params *param;
2244
2245         list_for_each_entry(param, list, action) {
2246                 if (bacmp(&param->addr, addr) == 0 &&
2247                     param->addr_type == addr_type)
2248                         return param;
2249         }
2250
2251         return NULL;
2252 }
2253
2254 /* This function requires the caller holds hdev->lock */
2255 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2256                                             bdaddr_t *addr, u8 addr_type)
2257 {
2258         struct hci_conn_params *params;
2259
2260         params = hci_conn_params_lookup(hdev, addr, addr_type);
2261         if (params)
2262                 return params;
2263
2264         params = kzalloc(sizeof(*params), GFP_KERNEL);
2265         if (!params) {
2266                 bt_dev_err(hdev, "out of memory");
2267                 return NULL;
2268         }
2269
2270         bacpy(&params->addr, addr);
2271         params->addr_type = addr_type;
2272
2273         list_add(&params->list, &hdev->le_conn_params);
2274         INIT_LIST_HEAD(&params->action);
2275
2276         params->conn_min_interval = hdev->le_conn_min_interval;
2277         params->conn_max_interval = hdev->le_conn_max_interval;
2278         params->conn_latency = hdev->le_conn_latency;
2279         params->supervision_timeout = hdev->le_supv_timeout;
2280         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2281
2282         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2283
2284         return params;
2285 }
2286
2287 static void hci_conn_params_free(struct hci_conn_params *params)
2288 {
2289         if (params->conn) {
2290                 hci_conn_drop(params->conn);
2291                 hci_conn_put(params->conn);
2292         }
2293
2294         list_del(&params->action);
2295         list_del(&params->list);
2296         kfree(params);
2297 }
2298
2299 /* This function requires the caller holds hdev->lock */
2300 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2301 {
2302         struct hci_conn_params *params;
2303
2304         params = hci_conn_params_lookup(hdev, addr, addr_type);
2305         if (!params)
2306                 return;
2307
2308         hci_conn_params_free(params);
2309
2310         hci_update_passive_scan(hdev);
2311
2312         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2313 }
2314
2315 /* This function requires the caller holds hdev->lock */
2316 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2317 {
2318         struct hci_conn_params *params, *tmp;
2319
2320         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2321                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2322                         continue;
2323
2324                 /* If trying to establish one time connection to disabled
2325                  * device, leave the params, but mark them as just once.
2326                  */
2327                 if (params->explicit_connect) {
2328                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2329                         continue;
2330                 }
2331
2332                 list_del(&params->list);
2333                 kfree(params);
2334         }
2335
2336         BT_DBG("All LE disabled connection parameters were removed");
2337 }
2338
2339 /* This function requires the caller holds hdev->lock */
2340 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2341 {
2342         struct hci_conn_params *params, *tmp;
2343
2344         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2345                 hci_conn_params_free(params);
2346
2347         BT_DBG("All LE connection parameters were removed");
2348 }
2349
2350 /* Copy the Identity Address of the controller.
2351  *
2352  * If the controller has a public BD_ADDR, then by default use that one.
2353  * If this is a LE only controller without a public address, default to
2354  * the static random address.
2355  *
2356  * For debugging purposes it is possible to force controllers with a
2357  * public address to use the static random address instead.
2358  *
2359  * In case BR/EDR has been disabled on a dual-mode controller and
2360  * userspace has configured a static address, then that address
2361  * becomes the identity address instead of the public BR/EDR address.
2362  */
2363 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2364                                u8 *bdaddr_type)
2365 {
2366         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2367             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2368             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2369              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2370                 bacpy(bdaddr, &hdev->static_addr);
2371                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2372         } else {
2373                 bacpy(bdaddr, &hdev->bdaddr);
2374                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2375         }
2376 }
2377
2378 static void hci_clear_wake_reason(struct hci_dev *hdev)
2379 {
2380         hci_dev_lock(hdev);
2381
2382         hdev->wake_reason = 0;
2383         bacpy(&hdev->wake_addr, BDADDR_ANY);
2384         hdev->wake_addr_type = 0;
2385
2386         hci_dev_unlock(hdev);
2387 }
2388
2389 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2390                                 void *data)
2391 {
2392         struct hci_dev *hdev =
2393                 container_of(nb, struct hci_dev, suspend_notifier);
2394         int ret = 0;
2395
2396         if (action == PM_SUSPEND_PREPARE)
2397                 ret = hci_suspend_dev(hdev);
2398         else if (action == PM_POST_SUSPEND)
2399                 ret = hci_resume_dev(hdev);
2400
2401         if (ret)
2402                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2403                            action, ret);
2404
2405         return NOTIFY_DONE;
2406 }
2407
2408 /* Alloc HCI device */
2409 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2410 {
2411         struct hci_dev *hdev;
2412         unsigned int alloc_size;
2413
2414         alloc_size = sizeof(*hdev);
2415         if (sizeof_priv) {
2416                 /* Fixme: May need ALIGN-ment? */
2417                 alloc_size += sizeof_priv;
2418         }
2419
2420         hdev = kzalloc(alloc_size, GFP_KERNEL);
2421         if (!hdev)
2422                 return NULL;
2423
2424         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2425         hdev->esco_type = (ESCO_HV1);
2426         hdev->link_mode = (HCI_LM_ACCEPT);
2427         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2428         hdev->io_capability = 0x03;     /* No Input No Output */
2429         hdev->manufacturer = 0xffff;    /* Default to internal use */
2430         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2431         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2432         hdev->adv_instance_cnt = 0;
2433         hdev->cur_adv_instance = 0x00;
2434         hdev->adv_instance_timeout = 0;
2435
2436         hdev->advmon_allowlist_duration = 300;
2437         hdev->advmon_no_filter_duration = 500;
2438         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2439
2440         hdev->sniff_max_interval = 800;
2441         hdev->sniff_min_interval = 80;
2442
2443         hdev->le_adv_channel_map = 0x07;
2444         hdev->le_adv_min_interval = 0x0800;
2445         hdev->le_adv_max_interval = 0x0800;
2446         hdev->le_scan_interval = 0x0060;
2447         hdev->le_scan_window = 0x0030;
2448         hdev->le_scan_int_suspend = 0x0400;
2449         hdev->le_scan_window_suspend = 0x0012;
2450         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2451         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2452         hdev->le_scan_int_adv_monitor = 0x0060;
2453         hdev->le_scan_window_adv_monitor = 0x0030;
2454         hdev->le_scan_int_connect = 0x0060;
2455         hdev->le_scan_window_connect = 0x0060;
2456         hdev->le_conn_min_interval = 0x0018;
2457         hdev->le_conn_max_interval = 0x0028;
2458         hdev->le_conn_latency = 0x0000;
2459         hdev->le_supv_timeout = 0x002a;
2460         hdev->le_def_tx_len = 0x001b;
2461         hdev->le_def_tx_time = 0x0148;
2462         hdev->le_max_tx_len = 0x001b;
2463         hdev->le_max_tx_time = 0x0148;
2464         hdev->le_max_rx_len = 0x001b;
2465         hdev->le_max_rx_time = 0x0148;
2466         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2467         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2468         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2469         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2470         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2471         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2472         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2473         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2474         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2475
2476         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2477         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2478         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2479         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2480         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2481         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2482
2483         /* default 1.28 sec page scan */
2484         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2485         hdev->def_page_scan_int = 0x0800;
2486         hdev->def_page_scan_window = 0x0012;
2487
2488         mutex_init(&hdev->lock);
2489         mutex_init(&hdev->req_lock);
2490
2491         INIT_LIST_HEAD(&hdev->mgmt_pending);
2492         INIT_LIST_HEAD(&hdev->reject_list);
2493         INIT_LIST_HEAD(&hdev->accept_list);
2494         INIT_LIST_HEAD(&hdev->uuids);
2495         INIT_LIST_HEAD(&hdev->link_keys);
2496         INIT_LIST_HEAD(&hdev->long_term_keys);
2497         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2498         INIT_LIST_HEAD(&hdev->remote_oob_data);
2499         INIT_LIST_HEAD(&hdev->le_accept_list);
2500         INIT_LIST_HEAD(&hdev->le_resolv_list);
2501         INIT_LIST_HEAD(&hdev->le_conn_params);
2502         INIT_LIST_HEAD(&hdev->pend_le_conns);
2503         INIT_LIST_HEAD(&hdev->pend_le_reports);
2504         INIT_LIST_HEAD(&hdev->conn_hash.list);
2505         INIT_LIST_HEAD(&hdev->adv_instances);
2506         INIT_LIST_HEAD(&hdev->blocked_keys);
2507         INIT_LIST_HEAD(&hdev->monitored_devices);
2508
2509         INIT_LIST_HEAD(&hdev->local_codecs);
2510         INIT_WORK(&hdev->rx_work, hci_rx_work);
2511         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2512         INIT_WORK(&hdev->tx_work, hci_tx_work);
2513         INIT_WORK(&hdev->power_on, hci_power_on);
2514         INIT_WORK(&hdev->error_reset, hci_error_reset);
2515
2516         hci_cmd_sync_init(hdev);
2517
2518         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2519
2520         skb_queue_head_init(&hdev->rx_q);
2521         skb_queue_head_init(&hdev->cmd_q);
2522         skb_queue_head_init(&hdev->raw_q);
2523
2524         init_waitqueue_head(&hdev->req_wait_q);
2525
2526         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2527         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2528
2529         hci_request_setup(hdev);
2530
2531         hci_init_sysfs(hdev);
2532         discovery_init(hdev);
2533
2534         return hdev;
2535 }
2536 EXPORT_SYMBOL(hci_alloc_dev_priv);
2537
2538 /* Free HCI device */
2539 void hci_free_dev(struct hci_dev *hdev)
2540 {
2541         /* will free via device release */
2542         put_device(&hdev->dev);
2543 }
2544 EXPORT_SYMBOL(hci_free_dev);
2545
2546 /* Register HCI device */
2547 int hci_register_dev(struct hci_dev *hdev)
2548 {
2549         int id, error;
2550
2551         if (!hdev->open || !hdev->close || !hdev->send)
2552                 return -EINVAL;
2553
2554         /* Do not allow HCI_AMP devices to register at index 0,
2555          * so the index can be used as the AMP controller ID.
2556          */
2557         switch (hdev->dev_type) {
2558         case HCI_PRIMARY:
2559                 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2560                 break;
2561         case HCI_AMP:
2562                 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2563                 break;
2564         default:
2565                 return -EINVAL;
2566         }
2567
2568         if (id < 0)
2569                 return id;
2570
2571         snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2572         hdev->id = id;
2573
2574         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2575
2576         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2577         if (!hdev->workqueue) {
2578                 error = -ENOMEM;
2579                 goto err;
2580         }
2581
2582         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2583                                                       hdev->name);
2584         if (!hdev->req_workqueue) {
2585                 destroy_workqueue(hdev->workqueue);
2586                 error = -ENOMEM;
2587                 goto err;
2588         }
2589
2590         if (!IS_ERR_OR_NULL(bt_debugfs))
2591                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2592
2593         dev_set_name(&hdev->dev, "%s", hdev->name);
2594
2595         error = device_add(&hdev->dev);
2596         if (error < 0)
2597                 goto err_wqueue;
2598
2599         hci_leds_init(hdev);
2600
2601         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2602                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2603                                     hdev);
2604         if (hdev->rfkill) {
2605                 if (rfkill_register(hdev->rfkill) < 0) {
2606                         rfkill_destroy(hdev->rfkill);
2607                         hdev->rfkill = NULL;
2608                 }
2609         }
2610
2611         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2612                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2613
2614         hci_dev_set_flag(hdev, HCI_SETUP);
2615         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2616
2617         if (hdev->dev_type == HCI_PRIMARY) {
2618                 /* Assume BR/EDR support until proven otherwise (such as
2619                  * through reading supported features during init.
2620                  */
2621                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2622         }
2623
2624         write_lock(&hci_dev_list_lock);
2625         list_add(&hdev->list, &hci_dev_list);
2626         write_unlock(&hci_dev_list_lock);
2627
2628         /* Devices that are marked for raw-only usage are unconfigured
2629          * and should not be included in normal operation.
2630          */
2631         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2632                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2633
2634         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2635          * callback.
2636          */
2637         if (hdev->wakeup)
2638                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2639
2640         hci_sock_dev_event(hdev, HCI_DEV_REG);
2641         hci_dev_hold(hdev);
2642
2643         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2644                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2645                 error = register_pm_notifier(&hdev->suspend_notifier);
2646                 if (error)
2647                         goto err_wqueue;
2648         }
2649
2650         queue_work(hdev->req_workqueue, &hdev->power_on);
2651
2652         idr_init(&hdev->adv_monitors_idr);
2653         msft_register(hdev);
2654
2655         return id;
2656
2657 err_wqueue:
2658         debugfs_remove_recursive(hdev->debugfs);
2659         destroy_workqueue(hdev->workqueue);
2660         destroy_workqueue(hdev->req_workqueue);
2661 err:
2662         ida_simple_remove(&hci_index_ida, hdev->id);
2663
2664         return error;
2665 }
2666 EXPORT_SYMBOL(hci_register_dev);
2667
2668 /* Unregister HCI device */
2669 void hci_unregister_dev(struct hci_dev *hdev)
2670 {
2671         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2672
2673         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2674
2675         write_lock(&hci_dev_list_lock);
2676         list_del(&hdev->list);
2677         write_unlock(&hci_dev_list_lock);
2678
2679         cancel_work_sync(&hdev->power_on);
2680
2681         hci_cmd_sync_clear(hdev);
2682
2683         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2684                 unregister_pm_notifier(&hdev->suspend_notifier);
2685
2686         msft_unregister(hdev);
2687
2688         hci_dev_do_close(hdev);
2689
2690         if (!test_bit(HCI_INIT, &hdev->flags) &&
2691             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2692             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2693                 hci_dev_lock(hdev);
2694                 mgmt_index_removed(hdev);
2695                 hci_dev_unlock(hdev);
2696         }
2697
2698         /* mgmt_index_removed should take care of emptying the
2699          * pending list */
2700         BUG_ON(!list_empty(&hdev->mgmt_pending));
2701
2702         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2703
2704         if (hdev->rfkill) {
2705                 rfkill_unregister(hdev->rfkill);
2706                 rfkill_destroy(hdev->rfkill);
2707         }
2708
2709         device_del(&hdev->dev);
2710         /* Actual cleanup is deferred until hci_release_dev(). */
2711         hci_dev_put(hdev);
2712 }
2713 EXPORT_SYMBOL(hci_unregister_dev);
2714
2715 /* Release HCI device */
2716 void hci_release_dev(struct hci_dev *hdev)
2717 {
2718         debugfs_remove_recursive(hdev->debugfs);
2719         kfree_const(hdev->hw_info);
2720         kfree_const(hdev->fw_info);
2721
2722         destroy_workqueue(hdev->workqueue);
2723         destroy_workqueue(hdev->req_workqueue);
2724
2725         hci_dev_lock(hdev);
2726         hci_bdaddr_list_clear(&hdev->reject_list);
2727         hci_bdaddr_list_clear(&hdev->accept_list);
2728         hci_uuids_clear(hdev);
2729         hci_link_keys_clear(hdev);
2730         hci_smp_ltks_clear(hdev);
2731         hci_smp_irks_clear(hdev);
2732         hci_remote_oob_data_clear(hdev);
2733         hci_adv_instances_clear(hdev);
2734         hci_adv_monitors_clear(hdev);
2735         hci_bdaddr_list_clear(&hdev->le_accept_list);
2736         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2737         hci_conn_params_clear_all(hdev);
2738         hci_discovery_filter_clear(hdev);
2739         hci_blocked_keys_clear(hdev);
2740         hci_dev_unlock(hdev);
2741
2742         ida_simple_remove(&hci_index_ida, hdev->id);
2743         kfree_skb(hdev->sent_cmd);
2744         kfree(hdev);
2745 }
2746 EXPORT_SYMBOL(hci_release_dev);
2747
2748 /* Suspend HCI device */
2749 int hci_suspend_dev(struct hci_dev *hdev)
2750 {
2751         int ret;
2752
2753         bt_dev_dbg(hdev, "");
2754
2755         /* Suspend should only act on when powered. */
2756         if (!hdev_is_powered(hdev) ||
2757             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2758                 return 0;
2759
2760         /* If powering down don't attempt to suspend */
2761         if (mgmt_powering_down(hdev))
2762                 return 0;
2763
2764         hci_req_sync_lock(hdev);
2765         ret = hci_suspend_sync(hdev);
2766         hci_req_sync_unlock(hdev);
2767
2768         hci_clear_wake_reason(hdev);
2769         mgmt_suspending(hdev, hdev->suspend_state);
2770
2771         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2772         return ret;
2773 }
2774 EXPORT_SYMBOL(hci_suspend_dev);
2775
2776 /* Resume HCI device */
2777 int hci_resume_dev(struct hci_dev *hdev)
2778 {
2779         int ret;
2780
2781         bt_dev_dbg(hdev, "");
2782
2783         /* Resume should only act on when powered. */
2784         if (!hdev_is_powered(hdev) ||
2785             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2786                 return 0;
2787
2788         /* If powering down don't attempt to resume */
2789         if (mgmt_powering_down(hdev))
2790                 return 0;
2791
2792         hci_req_sync_lock(hdev);
2793         ret = hci_resume_sync(hdev);
2794         hci_req_sync_unlock(hdev);
2795
2796         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2797                       hdev->wake_addr_type);
2798
2799         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2800         return ret;
2801 }
2802 EXPORT_SYMBOL(hci_resume_dev);
2803
2804 /* Reset HCI device */
2805 int hci_reset_dev(struct hci_dev *hdev)
2806 {
2807         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2808         struct sk_buff *skb;
2809
2810         skb = bt_skb_alloc(3, GFP_ATOMIC);
2811         if (!skb)
2812                 return -ENOMEM;
2813
2814         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2815         skb_put_data(skb, hw_err, 3);
2816
2817         bt_dev_err(hdev, "Injecting HCI hardware error event");
2818
2819         /* Send Hardware Error to upper stack */
2820         return hci_recv_frame(hdev, skb);
2821 }
2822 EXPORT_SYMBOL(hci_reset_dev);
2823
2824 /* Receive frame from HCI drivers */
2825 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2826 {
2827         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2828                       && !test_bit(HCI_INIT, &hdev->flags))) {
2829                 kfree_skb(skb);
2830                 return -ENXIO;
2831         }
2832
2833         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2834             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2835             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2836             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2837                 kfree_skb(skb);
2838                 return -EINVAL;
2839         }
2840
2841         /* Incoming skb */
2842         bt_cb(skb)->incoming = 1;
2843
2844         /* Time stamp */
2845         __net_timestamp(skb);
2846
2847         skb_queue_tail(&hdev->rx_q, skb);
2848         queue_work(hdev->workqueue, &hdev->rx_work);
2849
2850         return 0;
2851 }
2852 EXPORT_SYMBOL(hci_recv_frame);
2853
2854 /* Receive diagnostic message from HCI drivers */
2855 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2856 {
2857         /* Mark as diagnostic packet */
2858         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2859
2860         /* Time stamp */
2861         __net_timestamp(skb);
2862
2863         skb_queue_tail(&hdev->rx_q, skb);
2864         queue_work(hdev->workqueue, &hdev->rx_work);
2865
2866         return 0;
2867 }
2868 EXPORT_SYMBOL(hci_recv_diag);
2869
2870 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2871 {
2872         va_list vargs;
2873
2874         va_start(vargs, fmt);
2875         kfree_const(hdev->hw_info);
2876         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2877         va_end(vargs);
2878 }
2879 EXPORT_SYMBOL(hci_set_hw_info);
2880
2881 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2882 {
2883         va_list vargs;
2884
2885         va_start(vargs, fmt);
2886         kfree_const(hdev->fw_info);
2887         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2888         va_end(vargs);
2889 }
2890 EXPORT_SYMBOL(hci_set_fw_info);
2891
2892 /* ---- Interface to upper protocols ---- */
2893
2894 int hci_register_cb(struct hci_cb *cb)
2895 {
2896         BT_DBG("%p name %s", cb, cb->name);
2897
2898         mutex_lock(&hci_cb_list_lock);
2899         list_add_tail(&cb->list, &hci_cb_list);
2900         mutex_unlock(&hci_cb_list_lock);
2901
2902         return 0;
2903 }
2904 EXPORT_SYMBOL(hci_register_cb);
2905
2906 int hci_unregister_cb(struct hci_cb *cb)
2907 {
2908         BT_DBG("%p name %s", cb, cb->name);
2909
2910         mutex_lock(&hci_cb_list_lock);
2911         list_del(&cb->list);
2912         mutex_unlock(&hci_cb_list_lock);
2913
2914         return 0;
2915 }
2916 EXPORT_SYMBOL(hci_unregister_cb);
2917
2918 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2919 {
2920         int err;
2921
2922         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2923                skb->len);
2924
2925         /* Time stamp */
2926         __net_timestamp(skb);
2927
2928         /* Send copy to monitor */
2929         hci_send_to_monitor(hdev, skb);
2930
2931         if (atomic_read(&hdev->promisc)) {
2932                 /* Send copy to the sockets */
2933                 hci_send_to_sock(hdev, skb);
2934         }
2935
2936         /* Get rid of skb owner, prior to sending to the driver. */
2937         skb_orphan(skb);
2938
2939         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2940                 kfree_skb(skb);
2941                 return -EINVAL;
2942         }
2943
2944         err = hdev->send(hdev, skb);
2945         if (err < 0) {
2946                 bt_dev_err(hdev, "sending frame failed (%d)", err);
2947                 kfree_skb(skb);
2948                 return err;
2949         }
2950
2951         return 0;
2952 }
2953
2954 /* Send HCI command */
2955 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2956                  const void *param)
2957 {
2958         struct sk_buff *skb;
2959
2960         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2961
2962         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2963         if (!skb) {
2964                 bt_dev_err(hdev, "no memory for command");
2965                 return -ENOMEM;
2966         }
2967
2968         /* Stand-alone HCI commands must be flagged as
2969          * single-command requests.
2970          */
2971         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2972
2973         skb_queue_tail(&hdev->cmd_q, skb);
2974         queue_work(hdev->workqueue, &hdev->cmd_work);
2975
2976         return 0;
2977 }
2978
2979 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2980                    const void *param)
2981 {
2982         struct sk_buff *skb;
2983
2984         if (hci_opcode_ogf(opcode) != 0x3f) {
2985                 /* A controller receiving a command shall respond with either
2986                  * a Command Status Event or a Command Complete Event.
2987                  * Therefore, all standard HCI commands must be sent via the
2988                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2989                  * Some vendors do not comply with this rule for vendor-specific
2990                  * commands and do not return any event. We want to support
2991                  * unresponded commands for such cases only.
2992                  */
2993                 bt_dev_err(hdev, "unresponded command not supported");
2994                 return -EINVAL;
2995         }
2996
2997         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2998         if (!skb) {
2999                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3000                            opcode);
3001                 return -ENOMEM;
3002         }
3003
3004         hci_send_frame(hdev, skb);
3005
3006         return 0;
3007 }
3008 EXPORT_SYMBOL(__hci_cmd_send);
3009
3010 /* Get data from the previously sent command */
3011 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3012 {
3013         struct hci_command_hdr *hdr;
3014
3015         if (!hdev->sent_cmd)
3016                 return NULL;
3017
3018         hdr = (void *) hdev->sent_cmd->data;
3019
3020         if (hdr->opcode != cpu_to_le16(opcode))
3021                 return NULL;
3022
3023         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3024
3025         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3026 }
3027
3028 /* Send ACL data */
3029 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3030 {
3031         struct hci_acl_hdr *hdr;
3032         int len = skb->len;
3033
3034         skb_push(skb, HCI_ACL_HDR_SIZE);
3035         skb_reset_transport_header(skb);
3036         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3037         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3038         hdr->dlen   = cpu_to_le16(len);
3039 }
3040
3041 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3042                           struct sk_buff *skb, __u16 flags)
3043 {
3044         struct hci_conn *conn = chan->conn;
3045         struct hci_dev *hdev = conn->hdev;
3046         struct sk_buff *list;
3047
3048         skb->len = skb_headlen(skb);
3049         skb->data_len = 0;
3050
3051         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3052
3053         switch (hdev->dev_type) {
3054         case HCI_PRIMARY:
3055                 hci_add_acl_hdr(skb, conn->handle, flags);
3056                 break;
3057         case HCI_AMP:
3058                 hci_add_acl_hdr(skb, chan->handle, flags);
3059                 break;
3060         default:
3061                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3062                 return;
3063         }
3064
3065         list = skb_shinfo(skb)->frag_list;
3066         if (!list) {
3067                 /* Non fragmented */
3068                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3069
3070                 skb_queue_tail(queue, skb);
3071         } else {
3072                 /* Fragmented */
3073                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3074
3075                 skb_shinfo(skb)->frag_list = NULL;
3076
3077                 /* Queue all fragments atomically. We need to use spin_lock_bh
3078                  * here because of 6LoWPAN links, as there this function is
3079                  * called from softirq and using normal spin lock could cause
3080                  * deadlocks.
3081                  */
3082                 spin_lock_bh(&queue->lock);
3083
3084                 __skb_queue_tail(queue, skb);
3085
3086                 flags &= ~ACL_START;
3087                 flags |= ACL_CONT;
3088                 do {
3089                         skb = list; list = list->next;
3090
3091                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3092                         hci_add_acl_hdr(skb, conn->handle, flags);
3093
3094                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3095
3096                         __skb_queue_tail(queue, skb);
3097                 } while (list);
3098
3099                 spin_unlock_bh(&queue->lock);
3100         }
3101 }
3102
3103 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3104 {
3105         struct hci_dev *hdev = chan->conn->hdev;
3106
3107         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3108
3109         hci_queue_acl(chan, &chan->data_q, skb, flags);
3110
3111         queue_work(hdev->workqueue, &hdev->tx_work);
3112 }
3113
3114 /* Send SCO data */
3115 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3116 {
3117         struct hci_dev *hdev = conn->hdev;
3118         struct hci_sco_hdr hdr;
3119
3120         BT_DBG("%s len %d", hdev->name, skb->len);
3121
3122         hdr.handle = cpu_to_le16(conn->handle);
3123         hdr.dlen   = skb->len;
3124
3125         skb_push(skb, HCI_SCO_HDR_SIZE);
3126         skb_reset_transport_header(skb);
3127         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3128
3129         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3130
3131         skb_queue_tail(&conn->data_q, skb);
3132         queue_work(hdev->workqueue, &hdev->tx_work);
3133 }
3134
3135 /* ---- HCI TX task (outgoing data) ---- */
3136
3137 /* HCI Connection scheduler */
3138 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3139                                      int *quote)
3140 {
3141         struct hci_conn_hash *h = &hdev->conn_hash;
3142         struct hci_conn *conn = NULL, *c;
3143         unsigned int num = 0, min = ~0;
3144
3145         /* We don't have to lock device here. Connections are always
3146          * added and removed with TX task disabled. */
3147
3148         rcu_read_lock();
3149
3150         list_for_each_entry_rcu(c, &h->list, list) {
3151                 if (c->type != type || skb_queue_empty(&c->data_q))
3152                         continue;
3153
3154                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3155                         continue;
3156
3157                 num++;
3158
3159                 if (c->sent < min) {
3160                         min  = c->sent;
3161                         conn = c;
3162                 }
3163
3164                 if (hci_conn_num(hdev, type) == num)
3165                         break;
3166         }
3167
3168         rcu_read_unlock();
3169
3170         if (conn) {
3171                 int cnt, q;
3172
3173                 switch (conn->type) {
3174                 case ACL_LINK:
3175                         cnt = hdev->acl_cnt;
3176                         break;
3177                 case SCO_LINK:
3178                 case ESCO_LINK:
3179                         cnt = hdev->sco_cnt;
3180                         break;
3181                 case LE_LINK:
3182                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3183                         break;
3184                 default:
3185                         cnt = 0;
3186                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3187                 }
3188
3189                 q = cnt / num;
3190                 *quote = q ? q : 1;
3191         } else
3192                 *quote = 0;
3193
3194         BT_DBG("conn %p quote %d", conn, *quote);
3195         return conn;
3196 }
3197
3198 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3199 {
3200         struct hci_conn_hash *h = &hdev->conn_hash;
3201         struct hci_conn *c;
3202
3203         bt_dev_err(hdev, "link tx timeout");
3204
3205         rcu_read_lock();
3206
3207         /* Kill stalled connections */
3208         list_for_each_entry_rcu(c, &h->list, list) {
3209                 if (c->type == type && c->sent) {
3210                         bt_dev_err(hdev, "killing stalled connection %pMR",
3211                                    &c->dst);
3212                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3213                 }
3214         }
3215
3216         rcu_read_unlock();
3217 }
3218
3219 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3220                                       int *quote)
3221 {
3222         struct hci_conn_hash *h = &hdev->conn_hash;
3223         struct hci_chan *chan = NULL;
3224         unsigned int num = 0, min = ~0, cur_prio = 0;
3225         struct hci_conn *conn;
3226         int cnt, q, conn_num = 0;
3227
3228         BT_DBG("%s", hdev->name);
3229
3230         rcu_read_lock();
3231
3232         list_for_each_entry_rcu(conn, &h->list, list) {
3233                 struct hci_chan *tmp;
3234
3235                 if (conn->type != type)
3236                         continue;
3237
3238                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3239                         continue;
3240
3241                 conn_num++;
3242
3243                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3244                         struct sk_buff *skb;
3245
3246                         if (skb_queue_empty(&tmp->data_q))
3247                                 continue;
3248
3249                         skb = skb_peek(&tmp->data_q);
3250                         if (skb->priority < cur_prio)
3251                                 continue;
3252
3253                         if (skb->priority > cur_prio) {
3254                                 num = 0;
3255                                 min = ~0;
3256                                 cur_prio = skb->priority;
3257                         }
3258
3259                         num++;
3260
3261                         if (conn->sent < min) {
3262                                 min  = conn->sent;
3263                                 chan = tmp;
3264                         }
3265                 }
3266
3267                 if (hci_conn_num(hdev, type) == conn_num)
3268                         break;
3269         }
3270
3271         rcu_read_unlock();
3272
3273         if (!chan)
3274                 return NULL;
3275
3276         switch (chan->conn->type) {
3277         case ACL_LINK:
3278                 cnt = hdev->acl_cnt;
3279                 break;
3280         case AMP_LINK:
3281                 cnt = hdev->block_cnt;
3282                 break;
3283         case SCO_LINK:
3284         case ESCO_LINK:
3285                 cnt = hdev->sco_cnt;
3286                 break;
3287         case LE_LINK:
3288                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3289                 break;
3290         default:
3291                 cnt = 0;
3292                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3293         }
3294
3295         q = cnt / num;
3296         *quote = q ? q : 1;
3297         BT_DBG("chan %p quote %d", chan, *quote);
3298         return chan;
3299 }
3300
3301 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3302 {
3303         struct hci_conn_hash *h = &hdev->conn_hash;
3304         struct hci_conn *conn;
3305         int num = 0;
3306
3307         BT_DBG("%s", hdev->name);
3308
3309         rcu_read_lock();
3310
3311         list_for_each_entry_rcu(conn, &h->list, list) {
3312                 struct hci_chan *chan;
3313
3314                 if (conn->type != type)
3315                         continue;
3316
3317                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3318                         continue;
3319
3320                 num++;
3321
3322                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3323                         struct sk_buff *skb;
3324
3325                         if (chan->sent) {
3326                                 chan->sent = 0;
3327                                 continue;
3328                         }
3329
3330                         if (skb_queue_empty(&chan->data_q))
3331                                 continue;
3332
3333                         skb = skb_peek(&chan->data_q);
3334                         if (skb->priority >= HCI_PRIO_MAX - 1)
3335                                 continue;
3336
3337                         skb->priority = HCI_PRIO_MAX - 1;
3338
3339                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3340                                skb->priority);
3341                 }
3342
3343                 if (hci_conn_num(hdev, type) == num)
3344                         break;
3345         }
3346
3347         rcu_read_unlock();
3348
3349 }
3350
3351 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3352 {
3353         /* Calculate count of blocks used by this packet */
3354         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3355 }
3356
3357 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3358 {
3359         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3360                 /* ACL tx timeout must be longer than maximum
3361                  * link supervision timeout (40.9 seconds) */
3362                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3363                                        HCI_ACL_TX_TIMEOUT))
3364                         hci_link_tx_to(hdev, ACL_LINK);
3365         }
3366 }
3367
3368 /* Schedule SCO */
3369 static void hci_sched_sco(struct hci_dev *hdev)
3370 {
3371         struct hci_conn *conn;
3372         struct sk_buff *skb;
3373         int quote;
3374
3375         BT_DBG("%s", hdev->name);
3376
3377         if (!hci_conn_num(hdev, SCO_LINK))
3378                 return;
3379
3380         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3381                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3382                         BT_DBG("skb %p len %d", skb, skb->len);
3383                         hci_send_frame(hdev, skb);
3384
3385                         conn->sent++;
3386                         if (conn->sent == ~0)
3387                                 conn->sent = 0;
3388                 }
3389         }
3390 }
3391
3392 static void hci_sched_esco(struct hci_dev *hdev)
3393 {
3394         struct hci_conn *conn;
3395         struct sk_buff *skb;
3396         int quote;
3397
3398         BT_DBG("%s", hdev->name);
3399
3400         if (!hci_conn_num(hdev, ESCO_LINK))
3401                 return;
3402
3403         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3404                                                      &quote))) {
3405                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3406                         BT_DBG("skb %p len %d", skb, skb->len);
3407                         hci_send_frame(hdev, skb);
3408
3409                         conn->sent++;
3410                         if (conn->sent == ~0)
3411                                 conn->sent = 0;
3412                 }
3413         }
3414 }
3415
3416 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3417 {
3418         unsigned int cnt = hdev->acl_cnt;
3419         struct hci_chan *chan;
3420         struct sk_buff *skb;
3421         int quote;
3422
3423         __check_timeout(hdev, cnt);
3424
3425         while (hdev->acl_cnt &&
3426                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3427                 u32 priority = (skb_peek(&chan->data_q))->priority;
3428                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3429                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3430                                skb->len, skb->priority);
3431
3432                         /* Stop if priority has changed */
3433                         if (skb->priority < priority)
3434                                 break;
3435
3436                         skb = skb_dequeue(&chan->data_q);
3437
3438                         hci_conn_enter_active_mode(chan->conn,
3439                                                    bt_cb(skb)->force_active);
3440
3441                         hci_send_frame(hdev, skb);
3442                         hdev->acl_last_tx = jiffies;
3443
3444                         hdev->acl_cnt--;
3445                         chan->sent++;
3446                         chan->conn->sent++;
3447
3448                         /* Send pending SCO packets right away */
3449                         hci_sched_sco(hdev);
3450                         hci_sched_esco(hdev);
3451                 }
3452         }
3453
3454         if (cnt != hdev->acl_cnt)
3455                 hci_prio_recalculate(hdev, ACL_LINK);
3456 }
3457
3458 static void hci_sched_acl_blk(struct hci_dev *hdev)
3459 {
3460         unsigned int cnt = hdev->block_cnt;
3461         struct hci_chan *chan;
3462         struct sk_buff *skb;
3463         int quote;
3464         u8 type;
3465
3466         __check_timeout(hdev, cnt);
3467
3468         BT_DBG("%s", hdev->name);
3469
3470         if (hdev->dev_type == HCI_AMP)
3471                 type = AMP_LINK;
3472         else
3473                 type = ACL_LINK;
3474
3475         while (hdev->block_cnt > 0 &&
3476                (chan = hci_chan_sent(hdev, type, &quote))) {
3477                 u32 priority = (skb_peek(&chan->data_q))->priority;
3478                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3479                         int blocks;
3480
3481                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3482                                skb->len, skb->priority);
3483
3484                         /* Stop if priority has changed */
3485                         if (skb->priority < priority)
3486                                 break;
3487
3488                         skb = skb_dequeue(&chan->data_q);
3489
3490                         blocks = __get_blocks(hdev, skb);
3491                         if (blocks > hdev->block_cnt)
3492                                 return;
3493
3494                         hci_conn_enter_active_mode(chan->conn,
3495                                                    bt_cb(skb)->force_active);
3496
3497                         hci_send_frame(hdev, skb);
3498                         hdev->acl_last_tx = jiffies;
3499
3500                         hdev->block_cnt -= blocks;
3501                         quote -= blocks;
3502
3503                         chan->sent += blocks;
3504                         chan->conn->sent += blocks;
3505                 }
3506         }
3507
3508         if (cnt != hdev->block_cnt)
3509                 hci_prio_recalculate(hdev, type);
3510 }
3511
3512 static void hci_sched_acl(struct hci_dev *hdev)
3513 {
3514         BT_DBG("%s", hdev->name);
3515
3516         /* No ACL link over BR/EDR controller */
3517         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3518                 return;
3519
3520         /* No AMP link over AMP controller */
3521         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3522                 return;
3523
3524         switch (hdev->flow_ctl_mode) {
3525         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3526                 hci_sched_acl_pkt(hdev);
3527                 break;
3528
3529         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3530                 hci_sched_acl_blk(hdev);
3531                 break;
3532         }
3533 }
3534
3535 static void hci_sched_le(struct hci_dev *hdev)
3536 {
3537         struct hci_chan *chan;
3538         struct sk_buff *skb;
3539         int quote, cnt, tmp;
3540
3541         BT_DBG("%s", hdev->name);
3542
3543         if (!hci_conn_num(hdev, LE_LINK))
3544                 return;
3545
3546         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3547
3548         __check_timeout(hdev, cnt);
3549
3550         tmp = cnt;
3551         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3552                 u32 priority = (skb_peek(&chan->data_q))->priority;
3553                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3554                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3555                                skb->len, skb->priority);
3556
3557                         /* Stop if priority has changed */
3558                         if (skb->priority < priority)
3559                                 break;
3560
3561                         skb = skb_dequeue(&chan->data_q);
3562
3563                         hci_send_frame(hdev, skb);
3564                         hdev->le_last_tx = jiffies;
3565
3566                         cnt--;
3567                         chan->sent++;
3568                         chan->conn->sent++;
3569
3570                         /* Send pending SCO packets right away */
3571                         hci_sched_sco(hdev);
3572                         hci_sched_esco(hdev);
3573                 }
3574         }
3575
3576         if (hdev->le_pkts)
3577                 hdev->le_cnt = cnt;
3578         else
3579                 hdev->acl_cnt = cnt;
3580
3581         if (cnt != tmp)
3582                 hci_prio_recalculate(hdev, LE_LINK);
3583 }
3584
3585 static void hci_tx_work(struct work_struct *work)
3586 {
3587         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3588         struct sk_buff *skb;
3589
3590         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3591                hdev->sco_cnt, hdev->le_cnt);
3592
3593         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3594                 /* Schedule queues and send stuff to HCI driver */
3595                 hci_sched_sco(hdev);
3596                 hci_sched_esco(hdev);
3597                 hci_sched_acl(hdev);
3598                 hci_sched_le(hdev);
3599         }
3600
3601         /* Send next queued raw (unknown type) packet */
3602         while ((skb = skb_dequeue(&hdev->raw_q)))
3603                 hci_send_frame(hdev, skb);
3604 }
3605
3606 /* ----- HCI RX task (incoming data processing) ----- */
3607
3608 /* ACL data packet */
3609 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3610 {
3611         struct hci_acl_hdr *hdr = (void *) skb->data;
3612         struct hci_conn *conn;
3613         __u16 handle, flags;
3614
3615         skb_pull(skb, HCI_ACL_HDR_SIZE);
3616
3617         handle = __le16_to_cpu(hdr->handle);
3618         flags  = hci_flags(handle);
3619         handle = hci_handle(handle);
3620
3621         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3622                handle, flags);
3623
3624         hdev->stat.acl_rx++;
3625
3626         hci_dev_lock(hdev);
3627         conn = hci_conn_hash_lookup_handle(hdev, handle);
3628         hci_dev_unlock(hdev);
3629
3630         if (conn) {
3631                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3632
3633                 /* Send to upper protocol */
3634                 l2cap_recv_acldata(conn, skb, flags);
3635                 return;
3636         } else {
3637                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3638                            handle);
3639         }
3640
3641         kfree_skb(skb);
3642 }
3643
3644 /* SCO data packet */
3645 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3646 {
3647         struct hci_sco_hdr *hdr = (void *) skb->data;
3648         struct hci_conn *conn;
3649         __u16 handle, flags;
3650
3651         skb_pull(skb, HCI_SCO_HDR_SIZE);
3652
3653         handle = __le16_to_cpu(hdr->handle);
3654         flags  = hci_flags(handle);
3655         handle = hci_handle(handle);
3656
3657         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3658                handle, flags);
3659
3660         hdev->stat.sco_rx++;
3661
3662         hci_dev_lock(hdev);
3663         conn = hci_conn_hash_lookup_handle(hdev, handle);
3664         hci_dev_unlock(hdev);
3665
3666         if (conn) {
3667                 /* Send to upper protocol */
3668                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3669                 sco_recv_scodata(conn, skb);
3670                 return;
3671         } else {
3672                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3673                                        handle);
3674         }
3675
3676         kfree_skb(skb);
3677 }
3678
3679 static bool hci_req_is_complete(struct hci_dev *hdev)
3680 {
3681         struct sk_buff *skb;
3682
3683         skb = skb_peek(&hdev->cmd_q);
3684         if (!skb)
3685                 return true;
3686
3687         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3688 }
3689
3690 static void hci_resend_last(struct hci_dev *hdev)
3691 {
3692         struct hci_command_hdr *sent;
3693         struct sk_buff *skb;
3694         u16 opcode;
3695
3696         if (!hdev->sent_cmd)
3697                 return;
3698
3699         sent = (void *) hdev->sent_cmd->data;
3700         opcode = __le16_to_cpu(sent->opcode);
3701         if (opcode == HCI_OP_RESET)
3702                 return;
3703
3704         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3705         if (!skb)
3706                 return;
3707
3708         skb_queue_head(&hdev->cmd_q, skb);
3709         queue_work(hdev->workqueue, &hdev->cmd_work);
3710 }
3711
3712 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3713                           hci_req_complete_t *req_complete,
3714                           hci_req_complete_skb_t *req_complete_skb)
3715 {
3716         struct sk_buff *skb;
3717         unsigned long flags;
3718
3719         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3720
3721         /* If the completed command doesn't match the last one that was
3722          * sent we need to do special handling of it.
3723          */
3724         if (!hci_sent_cmd_data(hdev, opcode)) {
3725                 /* Some CSR based controllers generate a spontaneous
3726                  * reset complete event during init and any pending
3727                  * command will never be completed. In such a case we
3728                  * need to resend whatever was the last sent
3729                  * command.
3730                  */
3731                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3732                         hci_resend_last(hdev);
3733
3734                 return;
3735         }
3736
3737         /* If we reach this point this event matches the last command sent */
3738         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3739
3740         /* If the command succeeded and there's still more commands in
3741          * this request the request is not yet complete.
3742          */
3743         if (!status && !hci_req_is_complete(hdev))
3744                 return;
3745
3746         /* If this was the last command in a request the complete
3747          * callback would be found in hdev->sent_cmd instead of the
3748          * command queue (hdev->cmd_q).
3749          */
3750         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3751                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3752                 return;
3753         }
3754
3755         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3756                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3757                 return;
3758         }
3759
3760         /* Remove all pending commands belonging to this request */
3761         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3762         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3763                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3764                         __skb_queue_head(&hdev->cmd_q, skb);
3765                         break;
3766                 }
3767
3768                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3769                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3770                 else
3771                         *req_complete = bt_cb(skb)->hci.req_complete;
3772                 kfree_skb(skb);
3773         }
3774         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3775 }
3776
3777 static void hci_rx_work(struct work_struct *work)
3778 {
3779         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3780         struct sk_buff *skb;
3781
3782         BT_DBG("%s", hdev->name);
3783
3784         while ((skb = skb_dequeue(&hdev->rx_q))) {
3785                 /* Send copy to monitor */
3786                 hci_send_to_monitor(hdev, skb);
3787
3788                 if (atomic_read(&hdev->promisc)) {
3789                         /* Send copy to the sockets */
3790                         hci_send_to_sock(hdev, skb);
3791                 }
3792
3793                 /* If the device has been opened in HCI_USER_CHANNEL,
3794                  * the userspace has exclusive access to device.
3795                  * When device is HCI_INIT, we still need to process
3796                  * the data packets to the driver in order
3797                  * to complete its setup().
3798                  */
3799                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3800                     !test_bit(HCI_INIT, &hdev->flags)) {
3801                         kfree_skb(skb);
3802                         continue;
3803                 }
3804
3805                 if (test_bit(HCI_INIT, &hdev->flags)) {
3806                         /* Don't process data packets in this states. */
3807                         switch (hci_skb_pkt_type(skb)) {
3808                         case HCI_ACLDATA_PKT:
3809                         case HCI_SCODATA_PKT:
3810                         case HCI_ISODATA_PKT:
3811                                 kfree_skb(skb);
3812                                 continue;
3813                         }
3814                 }
3815
3816                 /* Process frame */
3817                 switch (hci_skb_pkt_type(skb)) {
3818                 case HCI_EVENT_PKT:
3819                         BT_DBG("%s Event packet", hdev->name);
3820                         hci_event_packet(hdev, skb);
3821                         break;
3822
3823                 case HCI_ACLDATA_PKT:
3824                         BT_DBG("%s ACL data packet", hdev->name);
3825                         hci_acldata_packet(hdev, skb);
3826                         break;
3827
3828                 case HCI_SCODATA_PKT:
3829                         BT_DBG("%s SCO data packet", hdev->name);
3830                         hci_scodata_packet(hdev, skb);
3831                         break;
3832
3833                 default:
3834                         kfree_skb(skb);
3835                         break;
3836                 }
3837         }
3838 }
3839
3840 static void hci_cmd_work(struct work_struct *work)
3841 {
3842         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3843         struct sk_buff *skb;
3844
3845         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3846                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3847
3848         /* Send queued commands */
3849         if (atomic_read(&hdev->cmd_cnt)) {
3850                 skb = skb_dequeue(&hdev->cmd_q);
3851                 if (!skb)
3852                         return;
3853
3854                 kfree_skb(hdev->sent_cmd);
3855
3856                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3857                 if (hdev->sent_cmd) {
3858                         int res;
3859                         if (hci_req_status_pend(hdev))
3860                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3861                         atomic_dec(&hdev->cmd_cnt);
3862
3863                         res = hci_send_frame(hdev, skb);
3864                         if (res < 0)
3865                                 __hci_cmd_sync_cancel(hdev, -res);
3866
3867                         if (test_bit(HCI_RESET, &hdev->flags))
3868                                 cancel_delayed_work(&hdev->cmd_timer);
3869                         else
3870                                 schedule_delayed_work(&hdev->cmd_timer,
3871                                                       HCI_CMD_TIMEOUT);
3872                 } else {
3873                         skb_queue_head(&hdev->cmd_q, skb);
3874                         queue_work(hdev->workqueue, &hdev->cmd_work);
3875                 }
3876         }
3877 }