Remove all #inclusions of asm/system.h
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI requests ---- */
76
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78 {
79         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
81         /* If this is the init phase check if the completed command matches
82          * the last init command, and if not just return.
83          */
84         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86                 struct sk_buff *skb;
87
88                 /* Some CSR based controllers generate a spontaneous
89                  * reset complete event during init and any pending
90                  * command will never be completed. In such a case we
91                  * need to resend whatever was the last sent
92                  * command.
93                  */
94
95                 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96                         return;
97
98                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99                 if (skb) {
100                         skb_queue_head(&hdev->cmd_q, skb);
101                         queue_work(hdev->workqueue, &hdev->cmd_work);
102                 }
103
104                 return;
105         }
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void bredr_init(struct hci_dev *hdev)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         __le16 param;
197         __u8 flt_type;
198
199         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201         /* Mandatory initialization */
202
203         /* Reset */
204         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
205                 set_bit(HCI_RESET, &hdev->flags);
206                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207         }
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218         /* Read BD Address */
219         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
220
221         /* Read Class of Device */
222         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
223
224         /* Read Local Name */
225         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
226
227         /* Read Voice Setting */
228         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
229
230         /* Optional initialization */
231
232         /* Clear Event Filters */
233         flt_type = HCI_FLT_CLEAR_ALL;
234         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
235
236         /* Connection accept timeout ~20 secs */
237         param = cpu_to_le16(0x7d00);
238         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
239
240         bacpy(&cp.bdaddr, BDADDR_ANY);
241         cp.delete_all = 1;
242         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
243 }
244
245 static void amp_init(struct hci_dev *hdev)
246 {
247         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
248
249         /* Reset */
250         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
251
252         /* Read Local Version */
253         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
254 }
255
256 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257 {
258         struct sk_buff *skb;
259
260         BT_DBG("%s %ld", hdev->name, opt);
261
262         /* Driver initialization */
263
264         /* Special commands */
265         while ((skb = skb_dequeue(&hdev->driver_init))) {
266                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267                 skb->dev = (void *) hdev;
268
269                 skb_queue_tail(&hdev->cmd_q, skb);
270                 queue_work(hdev->workqueue, &hdev->cmd_work);
271         }
272         skb_queue_purge(&hdev->driver_init);
273
274         switch (hdev->dev_type) {
275         case HCI_BREDR:
276                 bredr_init(hdev);
277                 break;
278
279         case HCI_AMP:
280                 amp_init(hdev);
281                 break;
282
283         default:
284                 BT_ERR("Unknown device type %d", hdev->dev_type);
285                 break;
286         }
287
288 }
289
290 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
291 {
292         BT_DBG("%s", hdev->name);
293
294         /* Read LE buffer size */
295         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
296 }
297
298 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 scan = opt;
301
302         BT_DBG("%s %x", hdev->name, scan);
303
304         /* Inquiry and Page scans */
305         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
306 }
307
308 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __u8 auth = opt;
311
312         BT_DBG("%s %x", hdev->name, auth);
313
314         /* Authentication */
315         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
316 }
317
318 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319 {
320         __u8 encrypt = opt;
321
322         BT_DBG("%s %x", hdev->name, encrypt);
323
324         /* Encryption */
325         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
326 }
327
328 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
329 {
330         __le16 policy = cpu_to_le16(opt);
331
332         BT_DBG("%s %x", hdev->name, policy);
333
334         /* Default link policy */
335         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
336 }
337
338 /* Get HCI device by index.
339  * Device is held on return. */
340 struct hci_dev *hci_dev_get(int index)
341 {
342         struct hci_dev *hdev = NULL, *d;
343
344         BT_DBG("%d", index);
345
346         if (index < 0)
347                 return NULL;
348
349         read_lock(&hci_dev_list_lock);
350         list_for_each_entry(d, &hci_dev_list, list) {
351                 if (d->id == index) {
352                         hdev = hci_dev_hold(d);
353                         break;
354                 }
355         }
356         read_unlock(&hci_dev_list_lock);
357         return hdev;
358 }
359
360 /* ---- Inquiry support ---- */
361
362 bool hci_discovery_active(struct hci_dev *hdev)
363 {
364         struct discovery_state *discov = &hdev->discovery;
365
366         switch (discov->state) {
367         case DISCOVERY_FINDING:
368         case DISCOVERY_RESOLVING:
369                 return true;
370
371         default:
372                 return false;
373         }
374 }
375
376 void hci_discovery_set_state(struct hci_dev *hdev, int state)
377 {
378         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
379
380         if (hdev->discovery.state == state)
381                 return;
382
383         switch (state) {
384         case DISCOVERY_STOPPED:
385                 if (hdev->discovery.state != DISCOVERY_STARTING)
386                         mgmt_discovering(hdev, 0);
387                 hdev->discovery.type = 0;
388                 break;
389         case DISCOVERY_STARTING:
390                 break;
391         case DISCOVERY_FINDING:
392                 mgmt_discovering(hdev, 1);
393                 break;
394         case DISCOVERY_RESOLVING:
395                 break;
396         case DISCOVERY_STOPPING:
397                 break;
398         }
399
400         hdev->discovery.state = state;
401 }
402
403 static void inquiry_cache_flush(struct hci_dev *hdev)
404 {
405         struct discovery_state *cache = &hdev->discovery;
406         struct inquiry_entry *p, *n;
407
408         list_for_each_entry_safe(p, n, &cache->all, all) {
409                 list_del(&p->all);
410                 kfree(p);
411         }
412
413         INIT_LIST_HEAD(&cache->unknown);
414         INIT_LIST_HEAD(&cache->resolve);
415 }
416
417 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418 {
419         struct discovery_state *cache = &hdev->discovery;
420         struct inquiry_entry *e;
421
422         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424         list_for_each_entry(e, &cache->all, all) {
425                 if (!bacmp(&e->data.bdaddr, bdaddr))
426                         return e;
427         }
428
429         return NULL;
430 }
431
432 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
433                                                        bdaddr_t *bdaddr)
434 {
435         struct discovery_state *cache = &hdev->discovery;
436         struct inquiry_entry *e;
437
438         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439
440         list_for_each_entry(e, &cache->unknown, list) {
441                 if (!bacmp(&e->data.bdaddr, bdaddr))
442                         return e;
443         }
444
445         return NULL;
446 }
447
448 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
449                                                        bdaddr_t *bdaddr,
450                                                        int state)
451 {
452         struct discovery_state *cache = &hdev->discovery;
453         struct inquiry_entry *e;
454
455         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456
457         list_for_each_entry(e, &cache->resolve, list) {
458                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459                         return e;
460                 if (!bacmp(&e->data.bdaddr, bdaddr))
461                         return e;
462         }
463
464         return NULL;
465 }
466
467 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
468                                       struct inquiry_entry *ie)
469 {
470         struct discovery_state *cache = &hdev->discovery;
471         struct list_head *pos = &cache->resolve;
472         struct inquiry_entry *p;
473
474         list_del(&ie->list);
475
476         list_for_each_entry(p, &cache->resolve, list) {
477                 if (p->name_state != NAME_PENDING &&
478                                 abs(p->data.rssi) >= abs(ie->data.rssi))
479                         break;
480                 pos = &p->list;
481         }
482
483         list_add(&ie->list, pos);
484 }
485
486 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
487                               bool name_known, bool *ssp)
488 {
489         struct discovery_state *cache = &hdev->discovery;
490         struct inquiry_entry *ie;
491
492         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493
494         if (ssp)
495                 *ssp = data->ssp_mode;
496
497         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
498         if (ie) {
499                 if (ie->data.ssp_mode && ssp)
500                         *ssp = true;
501
502                 if (ie->name_state == NAME_NEEDED &&
503                                                 data->rssi != ie->data.rssi) {
504                         ie->data.rssi = data->rssi;
505                         hci_inquiry_cache_update_resolve(hdev, ie);
506                 }
507
508                 goto update;
509         }
510
511         /* Entry not in the cache. Add new one. */
512         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513         if (!ie)
514                 return false;
515
516         list_add(&ie->all, &cache->all);
517
518         if (name_known) {
519                 ie->name_state = NAME_KNOWN;
520         } else {
521                 ie->name_state = NAME_NOT_KNOWN;
522                 list_add(&ie->list, &cache->unknown);
523         }
524
525 update:
526         if (name_known && ie->name_state != NAME_KNOWN &&
527                                         ie->name_state != NAME_PENDING) {
528                 ie->name_state = NAME_KNOWN;
529                 list_del(&ie->list);
530         }
531
532         memcpy(&ie->data, data, sizeof(*data));
533         ie->timestamp = jiffies;
534         cache->timestamp = jiffies;
535
536         if (ie->name_state == NAME_NOT_KNOWN)
537                 return false;
538
539         return true;
540 }
541
542 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543 {
544         struct discovery_state *cache = &hdev->discovery;
545         struct inquiry_info *info = (struct inquiry_info *) buf;
546         struct inquiry_entry *e;
547         int copied = 0;
548
549         list_for_each_entry(e, &cache->all, all) {
550                 struct inquiry_data *data = &e->data;
551
552                 if (copied >= num)
553                         break;
554
555                 bacpy(&info->bdaddr, &data->bdaddr);
556                 info->pscan_rep_mode    = data->pscan_rep_mode;
557                 info->pscan_period_mode = data->pscan_period_mode;
558                 info->pscan_mode        = data->pscan_mode;
559                 memcpy(info->dev_class, data->dev_class, 3);
560                 info->clock_offset      = data->clock_offset;
561
562                 info++;
563                 copied++;
564         }
565
566         BT_DBG("cache %p, copied %d", cache, copied);
567         return copied;
568 }
569
570 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571 {
572         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573         struct hci_cp_inquiry cp;
574
575         BT_DBG("%s", hdev->name);
576
577         if (test_bit(HCI_INQUIRY, &hdev->flags))
578                 return;
579
580         /* Start Inquiry */
581         memcpy(&cp.lap, &ir->lap, 3);
582         cp.length  = ir->length;
583         cp.num_rsp = ir->num_rsp;
584         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
585 }
586
587 int hci_inquiry(void __user *arg)
588 {
589         __u8 __user *ptr = arg;
590         struct hci_inquiry_req ir;
591         struct hci_dev *hdev;
592         int err = 0, do_inquiry = 0, max_rsp;
593         long timeo;
594         __u8 *buf;
595
596         if (copy_from_user(&ir, ptr, sizeof(ir)))
597                 return -EFAULT;
598
599         hdev = hci_dev_get(ir.dev_id);
600         if (!hdev)
601                 return -ENODEV;
602
603         hci_dev_lock(hdev);
604         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
605                                 inquiry_cache_empty(hdev) ||
606                                 ir.flags & IREQ_CACHE_FLUSH) {
607                 inquiry_cache_flush(hdev);
608                 do_inquiry = 1;
609         }
610         hci_dev_unlock(hdev);
611
612         timeo = ir.length * msecs_to_jiffies(2000);
613
614         if (do_inquiry) {
615                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616                 if (err < 0)
617                         goto done;
618         }
619
620         /* for unlimited number of responses we will use buffer with 255 entries */
621         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622
623         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624          * copy it to the user space.
625          */
626         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
627         if (!buf) {
628                 err = -ENOMEM;
629                 goto done;
630         }
631
632         hci_dev_lock(hdev);
633         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
634         hci_dev_unlock(hdev);
635
636         BT_DBG("num_rsp %d", ir.num_rsp);
637
638         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639                 ptr += sizeof(ir);
640                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641                                         ir.num_rsp))
642                         err = -EFAULT;
643         } else
644                 err = -EFAULT;
645
646         kfree(buf);
647
648 done:
649         hci_dev_put(hdev);
650         return err;
651 }
652
653 /* ---- HCI ioctl helpers ---- */
654
655 int hci_dev_open(__u16 dev)
656 {
657         struct hci_dev *hdev;
658         int ret = 0;
659
660         hdev = hci_dev_get(dev);
661         if (!hdev)
662                 return -ENODEV;
663
664         BT_DBG("%s %p", hdev->name, hdev);
665
666         hci_req_lock(hdev);
667
668         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
669                 ret = -ERFKILL;
670                 goto done;
671         }
672
673         if (test_bit(HCI_UP, &hdev->flags)) {
674                 ret = -EALREADY;
675                 goto done;
676         }
677
678         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
679                 set_bit(HCI_RAW, &hdev->flags);
680
681         /* Treat all non BR/EDR controllers as raw devices if
682            enable_hs is not set */
683         if (hdev->dev_type != HCI_BREDR && !enable_hs)
684                 set_bit(HCI_RAW, &hdev->flags);
685
686         if (hdev->open(hdev)) {
687                 ret = -EIO;
688                 goto done;
689         }
690
691         if (!test_bit(HCI_RAW, &hdev->flags)) {
692                 atomic_set(&hdev->cmd_cnt, 1);
693                 set_bit(HCI_INIT, &hdev->flags);
694                 hdev->init_last_cmd = 0;
695
696                 ret = __hci_request(hdev, hci_init_req, 0,
697                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
698
699                 if (lmp_host_le_capable(hdev))
700                         ret = __hci_request(hdev, hci_le_init_req, 0,
701                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
702
703                 clear_bit(HCI_INIT, &hdev->flags);
704         }
705
706         if (!ret) {
707                 hci_dev_hold(hdev);
708                 set_bit(HCI_UP, &hdev->flags);
709                 hci_notify(hdev, HCI_DEV_UP);
710                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
711                         hci_dev_lock(hdev);
712                         mgmt_powered(hdev, 1);
713                         hci_dev_unlock(hdev);
714                 }
715         } else {
716                 /* Init failed, cleanup */
717                 flush_work(&hdev->tx_work);
718                 flush_work(&hdev->cmd_work);
719                 flush_work(&hdev->rx_work);
720
721                 skb_queue_purge(&hdev->cmd_q);
722                 skb_queue_purge(&hdev->rx_q);
723
724                 if (hdev->flush)
725                         hdev->flush(hdev);
726
727                 if (hdev->sent_cmd) {
728                         kfree_skb(hdev->sent_cmd);
729                         hdev->sent_cmd = NULL;
730                 }
731
732                 hdev->close(hdev);
733                 hdev->flags = 0;
734         }
735
736 done:
737         hci_req_unlock(hdev);
738         hci_dev_put(hdev);
739         return ret;
740 }
741
742 static int hci_dev_do_close(struct hci_dev *hdev)
743 {
744         BT_DBG("%s %p", hdev->name, hdev);
745
746         cancel_work_sync(&hdev->le_scan);
747
748         hci_req_cancel(hdev, ENODEV);
749         hci_req_lock(hdev);
750
751         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
752                 del_timer_sync(&hdev->cmd_timer);
753                 hci_req_unlock(hdev);
754                 return 0;
755         }
756
757         /* Flush RX and TX works */
758         flush_work(&hdev->tx_work);
759         flush_work(&hdev->rx_work);
760
761         if (hdev->discov_timeout > 0) {
762                 cancel_delayed_work(&hdev->discov_off);
763                 hdev->discov_timeout = 0;
764                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
765         }
766
767         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
768                 cancel_delayed_work(&hdev->service_cache);
769
770         cancel_delayed_work_sync(&hdev->le_scan_disable);
771
772         hci_dev_lock(hdev);
773         inquiry_cache_flush(hdev);
774         hci_conn_hash_flush(hdev);
775         hci_dev_unlock(hdev);
776
777         hci_notify(hdev, HCI_DEV_DOWN);
778
779         if (hdev->flush)
780                 hdev->flush(hdev);
781
782         /* Reset device */
783         skb_queue_purge(&hdev->cmd_q);
784         atomic_set(&hdev->cmd_cnt, 1);
785         if (!test_bit(HCI_RAW, &hdev->flags) &&
786                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
787                 set_bit(HCI_INIT, &hdev->flags);
788                 __hci_request(hdev, hci_reset_req, 0,
789                                         msecs_to_jiffies(250));
790                 clear_bit(HCI_INIT, &hdev->flags);
791         }
792
793         /* flush cmd  work */
794         flush_work(&hdev->cmd_work);
795
796         /* Drop queues */
797         skb_queue_purge(&hdev->rx_q);
798         skb_queue_purge(&hdev->cmd_q);
799         skb_queue_purge(&hdev->raw_q);
800
801         /* Drop last sent command */
802         if (hdev->sent_cmd) {
803                 del_timer_sync(&hdev->cmd_timer);
804                 kfree_skb(hdev->sent_cmd);
805                 hdev->sent_cmd = NULL;
806         }
807
808         /* After this point our queues are empty
809          * and no tasks are scheduled. */
810         hdev->close(hdev);
811
812         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
813                 hci_dev_lock(hdev);
814                 mgmt_powered(hdev, 0);
815                 hci_dev_unlock(hdev);
816         }
817
818         /* Clear flags */
819         hdev->flags = 0;
820
821         memset(hdev->eir, 0, sizeof(hdev->eir));
822         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
823
824         hci_req_unlock(hdev);
825
826         hci_dev_put(hdev);
827         return 0;
828 }
829
830 int hci_dev_close(__u16 dev)
831 {
832         struct hci_dev *hdev;
833         int err;
834
835         hdev = hci_dev_get(dev);
836         if (!hdev)
837                 return -ENODEV;
838
839         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
840                 cancel_delayed_work(&hdev->power_off);
841
842         err = hci_dev_do_close(hdev);
843
844         hci_dev_put(hdev);
845         return err;
846 }
847
848 int hci_dev_reset(__u16 dev)
849 {
850         struct hci_dev *hdev;
851         int ret = 0;
852
853         hdev = hci_dev_get(dev);
854         if (!hdev)
855                 return -ENODEV;
856
857         hci_req_lock(hdev);
858
859         if (!test_bit(HCI_UP, &hdev->flags))
860                 goto done;
861
862         /* Drop queues */
863         skb_queue_purge(&hdev->rx_q);
864         skb_queue_purge(&hdev->cmd_q);
865
866         hci_dev_lock(hdev);
867         inquiry_cache_flush(hdev);
868         hci_conn_hash_flush(hdev);
869         hci_dev_unlock(hdev);
870
871         if (hdev->flush)
872                 hdev->flush(hdev);
873
874         atomic_set(&hdev->cmd_cnt, 1);
875         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
876
877         if (!test_bit(HCI_RAW, &hdev->flags))
878                 ret = __hci_request(hdev, hci_reset_req, 0,
879                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
880
881 done:
882         hci_req_unlock(hdev);
883         hci_dev_put(hdev);
884         return ret;
885 }
886
887 int hci_dev_reset_stat(__u16 dev)
888 {
889         struct hci_dev *hdev;
890         int ret = 0;
891
892         hdev = hci_dev_get(dev);
893         if (!hdev)
894                 return -ENODEV;
895
896         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
897
898         hci_dev_put(hdev);
899
900         return ret;
901 }
902
903 int hci_dev_cmd(unsigned int cmd, void __user *arg)
904 {
905         struct hci_dev *hdev;
906         struct hci_dev_req dr;
907         int err = 0;
908
909         if (copy_from_user(&dr, arg, sizeof(dr)))
910                 return -EFAULT;
911
912         hdev = hci_dev_get(dr.dev_id);
913         if (!hdev)
914                 return -ENODEV;
915
916         switch (cmd) {
917         case HCISETAUTH:
918                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
919                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
920                 break;
921
922         case HCISETENCRYPT:
923                 if (!lmp_encrypt_capable(hdev)) {
924                         err = -EOPNOTSUPP;
925                         break;
926                 }
927
928                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
929                         /* Auth must be enabled first */
930                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
931                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
932                         if (err)
933                                 break;
934                 }
935
936                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
937                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
938                 break;
939
940         case HCISETSCAN:
941                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
942                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
943                 break;
944
945         case HCISETLINKPOL:
946                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
947                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
948                 break;
949
950         case HCISETLINKMODE:
951                 hdev->link_mode = ((__u16) dr.dev_opt) &
952                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
953                 break;
954
955         case HCISETPTYPE:
956                 hdev->pkt_type = (__u16) dr.dev_opt;
957                 break;
958
959         case HCISETACLMTU:
960                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
961                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
962                 break;
963
964         case HCISETSCOMTU:
965                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
966                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
967                 break;
968
969         default:
970                 err = -EINVAL;
971                 break;
972         }
973
974         hci_dev_put(hdev);
975         return err;
976 }
977
978 int hci_get_dev_list(void __user *arg)
979 {
980         struct hci_dev *hdev;
981         struct hci_dev_list_req *dl;
982         struct hci_dev_req *dr;
983         int n = 0, size, err;
984         __u16 dev_num;
985
986         if (get_user(dev_num, (__u16 __user *) arg))
987                 return -EFAULT;
988
989         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
990                 return -EINVAL;
991
992         size = sizeof(*dl) + dev_num * sizeof(*dr);
993
994         dl = kzalloc(size, GFP_KERNEL);
995         if (!dl)
996                 return -ENOMEM;
997
998         dr = dl->dev_req;
999
1000         read_lock(&hci_dev_list_lock);
1001         list_for_each_entry(hdev, &hci_dev_list, list) {
1002                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1003                         cancel_delayed_work(&hdev->power_off);
1004
1005                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1006                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1007
1008                 (dr + n)->dev_id  = hdev->id;
1009                 (dr + n)->dev_opt = hdev->flags;
1010
1011                 if (++n >= dev_num)
1012                         break;
1013         }
1014         read_unlock(&hci_dev_list_lock);
1015
1016         dl->dev_num = n;
1017         size = sizeof(*dl) + n * sizeof(*dr);
1018
1019         err = copy_to_user(arg, dl, size);
1020         kfree(dl);
1021
1022         return err ? -EFAULT : 0;
1023 }
1024
1025 int hci_get_dev_info(void __user *arg)
1026 {
1027         struct hci_dev *hdev;
1028         struct hci_dev_info di;
1029         int err = 0;
1030
1031         if (copy_from_user(&di, arg, sizeof(di)))
1032                 return -EFAULT;
1033
1034         hdev = hci_dev_get(di.dev_id);
1035         if (!hdev)
1036                 return -ENODEV;
1037
1038         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1039                 cancel_delayed_work_sync(&hdev->power_off);
1040
1041         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1042                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1043
1044         strcpy(di.name, hdev->name);
1045         di.bdaddr   = hdev->bdaddr;
1046         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1047         di.flags    = hdev->flags;
1048         di.pkt_type = hdev->pkt_type;
1049         di.acl_mtu  = hdev->acl_mtu;
1050         di.acl_pkts = hdev->acl_pkts;
1051         di.sco_mtu  = hdev->sco_mtu;
1052         di.sco_pkts = hdev->sco_pkts;
1053         di.link_policy = hdev->link_policy;
1054         di.link_mode   = hdev->link_mode;
1055
1056         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1057         memcpy(&di.features, &hdev->features, sizeof(di.features));
1058
1059         if (copy_to_user(arg, &di, sizeof(di)))
1060                 err = -EFAULT;
1061
1062         hci_dev_put(hdev);
1063
1064         return err;
1065 }
1066
1067 /* ---- Interface to HCI drivers ---- */
1068
1069 static int hci_rfkill_set_block(void *data, bool blocked)
1070 {
1071         struct hci_dev *hdev = data;
1072
1073         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1074
1075         if (!blocked)
1076                 return 0;
1077
1078         hci_dev_do_close(hdev);
1079
1080         return 0;
1081 }
1082
1083 static const struct rfkill_ops hci_rfkill_ops = {
1084         .set_block = hci_rfkill_set_block,
1085 };
1086
1087 /* Alloc HCI device */
1088 struct hci_dev *hci_alloc_dev(void)
1089 {
1090         struct hci_dev *hdev;
1091
1092         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1093         if (!hdev)
1094                 return NULL;
1095
1096         hci_init_sysfs(hdev);
1097         skb_queue_head_init(&hdev->driver_init);
1098
1099         return hdev;
1100 }
1101 EXPORT_SYMBOL(hci_alloc_dev);
1102
1103 /* Free HCI device */
1104 void hci_free_dev(struct hci_dev *hdev)
1105 {
1106         skb_queue_purge(&hdev->driver_init);
1107
1108         /* will free via device release */
1109         put_device(&hdev->dev);
1110 }
1111 EXPORT_SYMBOL(hci_free_dev);
1112
1113 static void hci_power_on(struct work_struct *work)
1114 {
1115         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1116
1117         BT_DBG("%s", hdev->name);
1118
1119         if (hci_dev_open(hdev->id) < 0)
1120                 return;
1121
1122         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1123                 schedule_delayed_work(&hdev->power_off,
1124                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1125
1126         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1127                 mgmt_index_added(hdev);
1128 }
1129
1130 static void hci_power_off(struct work_struct *work)
1131 {
1132         struct hci_dev *hdev = container_of(work, struct hci_dev,
1133                                                         power_off.work);
1134
1135         BT_DBG("%s", hdev->name);
1136
1137         hci_dev_do_close(hdev);
1138 }
1139
1140 static void hci_discov_off(struct work_struct *work)
1141 {
1142         struct hci_dev *hdev;
1143         u8 scan = SCAN_PAGE;
1144
1145         hdev = container_of(work, struct hci_dev, discov_off.work);
1146
1147         BT_DBG("%s", hdev->name);
1148
1149         hci_dev_lock(hdev);
1150
1151         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1152
1153         hdev->discov_timeout = 0;
1154
1155         hci_dev_unlock(hdev);
1156 }
1157
1158 int hci_uuids_clear(struct hci_dev *hdev)
1159 {
1160         struct list_head *p, *n;
1161
1162         list_for_each_safe(p, n, &hdev->uuids) {
1163                 struct bt_uuid *uuid;
1164
1165                 uuid = list_entry(p, struct bt_uuid, list);
1166
1167                 list_del(p);
1168                 kfree(uuid);
1169         }
1170
1171         return 0;
1172 }
1173
1174 int hci_link_keys_clear(struct hci_dev *hdev)
1175 {
1176         struct list_head *p, *n;
1177
1178         list_for_each_safe(p, n, &hdev->link_keys) {
1179                 struct link_key *key;
1180
1181                 key = list_entry(p, struct link_key, list);
1182
1183                 list_del(p);
1184                 kfree(key);
1185         }
1186
1187         return 0;
1188 }
1189
1190 int hci_smp_ltks_clear(struct hci_dev *hdev)
1191 {
1192         struct smp_ltk *k, *tmp;
1193
1194         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1195                 list_del(&k->list);
1196                 kfree(k);
1197         }
1198
1199         return 0;
1200 }
1201
1202 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1203 {
1204         struct link_key *k;
1205
1206         list_for_each_entry(k, &hdev->link_keys, list)
1207                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1208                         return k;
1209
1210         return NULL;
1211 }
1212
1213 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1214                                                 u8 key_type, u8 old_key_type)
1215 {
1216         /* Legacy key */
1217         if (key_type < 0x03)
1218                 return 1;
1219
1220         /* Debug keys are insecure so don't store them persistently */
1221         if (key_type == HCI_LK_DEBUG_COMBINATION)
1222                 return 0;
1223
1224         /* Changed combination key and there's no previous one */
1225         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1226                 return 0;
1227
1228         /* Security mode 3 case */
1229         if (!conn)
1230                 return 1;
1231
1232         /* Neither local nor remote side had no-bonding as requirement */
1233         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1234                 return 1;
1235
1236         /* Local side had dedicated bonding as requirement */
1237         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1238                 return 1;
1239
1240         /* Remote side had dedicated bonding as requirement */
1241         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1242                 return 1;
1243
1244         /* If none of the above criteria match, then don't store the key
1245          * persistently */
1246         return 0;
1247 }
1248
1249 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1250 {
1251         struct smp_ltk *k;
1252
1253         list_for_each_entry(k, &hdev->long_term_keys, list) {
1254                 if (k->ediv != ediv ||
1255                                 memcmp(rand, k->rand, sizeof(k->rand)))
1256                         continue;
1257
1258                 return k;
1259         }
1260
1261         return NULL;
1262 }
1263 EXPORT_SYMBOL(hci_find_ltk);
1264
1265 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1266                                      u8 addr_type)
1267 {
1268         struct smp_ltk *k;
1269
1270         list_for_each_entry(k, &hdev->long_term_keys, list)
1271                 if (addr_type == k->bdaddr_type &&
1272                                         bacmp(bdaddr, &k->bdaddr) == 0)
1273                         return k;
1274
1275         return NULL;
1276 }
1277 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1278
1279 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1280                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1281 {
1282         struct link_key *key, *old_key;
1283         u8 old_key_type, persistent;
1284
1285         old_key = hci_find_link_key(hdev, bdaddr);
1286         if (old_key) {
1287                 old_key_type = old_key->type;
1288                 key = old_key;
1289         } else {
1290                 old_key_type = conn ? conn->key_type : 0xff;
1291                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1292                 if (!key)
1293                         return -ENOMEM;
1294                 list_add(&key->list, &hdev->link_keys);
1295         }
1296
1297         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1298
1299         /* Some buggy controller combinations generate a changed
1300          * combination key for legacy pairing even when there's no
1301          * previous key */
1302         if (type == HCI_LK_CHANGED_COMBINATION &&
1303                                         (!conn || conn->remote_auth == 0xff) &&
1304                                         old_key_type == 0xff) {
1305                 type = HCI_LK_COMBINATION;
1306                 if (conn)
1307                         conn->key_type = type;
1308         }
1309
1310         bacpy(&key->bdaddr, bdaddr);
1311         memcpy(key->val, val, 16);
1312         key->pin_len = pin_len;
1313
1314         if (type == HCI_LK_CHANGED_COMBINATION)
1315                 key->type = old_key_type;
1316         else
1317                 key->type = type;
1318
1319         if (!new_key)
1320                 return 0;
1321
1322         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1323
1324         mgmt_new_link_key(hdev, key, persistent);
1325
1326         if (!persistent) {
1327                 list_del(&key->list);
1328                 kfree(key);
1329         }
1330
1331         return 0;
1332 }
1333
1334 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1335                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1336                 ediv, u8 rand[8])
1337 {
1338         struct smp_ltk *key, *old_key;
1339
1340         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1341                 return 0;
1342
1343         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1344         if (old_key)
1345                 key = old_key;
1346         else {
1347                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1348                 if (!key)
1349                         return -ENOMEM;
1350                 list_add(&key->list, &hdev->long_term_keys);
1351         }
1352
1353         bacpy(&key->bdaddr, bdaddr);
1354         key->bdaddr_type = addr_type;
1355         memcpy(key->val, tk, sizeof(key->val));
1356         key->authenticated = authenticated;
1357         key->ediv = ediv;
1358         key->enc_size = enc_size;
1359         key->type = type;
1360         memcpy(key->rand, rand, sizeof(key->rand));
1361
1362         if (!new_key)
1363                 return 0;
1364
1365         if (type & HCI_SMP_LTK)
1366                 mgmt_new_ltk(hdev, key, 1);
1367
1368         return 0;
1369 }
1370
1371 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372 {
1373         struct link_key *key;
1374
1375         key = hci_find_link_key(hdev, bdaddr);
1376         if (!key)
1377                 return -ENOENT;
1378
1379         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1380
1381         list_del(&key->list);
1382         kfree(key);
1383
1384         return 0;
1385 }
1386
1387 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1388 {
1389         struct smp_ltk *k, *tmp;
1390
1391         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1392                 if (bacmp(bdaddr, &k->bdaddr))
1393                         continue;
1394
1395                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1396
1397                 list_del(&k->list);
1398                 kfree(k);
1399         }
1400
1401         return 0;
1402 }
1403
1404 /* HCI command timer function */
1405 static void hci_cmd_timer(unsigned long arg)
1406 {
1407         struct hci_dev *hdev = (void *) arg;
1408
1409         BT_ERR("%s command tx timeout", hdev->name);
1410         atomic_set(&hdev->cmd_cnt, 1);
1411         queue_work(hdev->workqueue, &hdev->cmd_work);
1412 }
1413
1414 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1415                                           bdaddr_t *bdaddr)
1416 {
1417         struct oob_data *data;
1418
1419         list_for_each_entry(data, &hdev->remote_oob_data, list)
1420                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1421                         return data;
1422
1423         return NULL;
1424 }
1425
1426 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1427 {
1428         struct oob_data *data;
1429
1430         data = hci_find_remote_oob_data(hdev, bdaddr);
1431         if (!data)
1432                 return -ENOENT;
1433
1434         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1435
1436         list_del(&data->list);
1437         kfree(data);
1438
1439         return 0;
1440 }
1441
1442 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1443 {
1444         struct oob_data *data, *n;
1445
1446         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1447                 list_del(&data->list);
1448                 kfree(data);
1449         }
1450
1451         return 0;
1452 }
1453
1454 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1455                             u8 *randomizer)
1456 {
1457         struct oob_data *data;
1458
1459         data = hci_find_remote_oob_data(hdev, bdaddr);
1460
1461         if (!data) {
1462                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1463                 if (!data)
1464                         return -ENOMEM;
1465
1466                 bacpy(&data->bdaddr, bdaddr);
1467                 list_add(&data->list, &hdev->remote_oob_data);
1468         }
1469
1470         memcpy(data->hash, hash, sizeof(data->hash));
1471         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1472
1473         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1474
1475         return 0;
1476 }
1477
1478 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1479 {
1480         struct bdaddr_list *b;
1481
1482         list_for_each_entry(b, &hdev->blacklist, list)
1483                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1484                         return b;
1485
1486         return NULL;
1487 }
1488
1489 int hci_blacklist_clear(struct hci_dev *hdev)
1490 {
1491         struct list_head *p, *n;
1492
1493         list_for_each_safe(p, n, &hdev->blacklist) {
1494                 struct bdaddr_list *b;
1495
1496                 b = list_entry(p, struct bdaddr_list, list);
1497
1498                 list_del(p);
1499                 kfree(b);
1500         }
1501
1502         return 0;
1503 }
1504
1505 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1506 {
1507         struct bdaddr_list *entry;
1508
1509         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1510                 return -EBADF;
1511
1512         if (hci_blacklist_lookup(hdev, bdaddr))
1513                 return -EEXIST;
1514
1515         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1516         if (!entry)
1517                 return -ENOMEM;
1518
1519         bacpy(&entry->bdaddr, bdaddr);
1520
1521         list_add(&entry->list, &hdev->blacklist);
1522
1523         return mgmt_device_blocked(hdev, bdaddr, type);
1524 }
1525
1526 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1527 {
1528         struct bdaddr_list *entry;
1529
1530         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1531                 return hci_blacklist_clear(hdev);
1532
1533         entry = hci_blacklist_lookup(hdev, bdaddr);
1534         if (!entry)
1535                 return -ENOENT;
1536
1537         list_del(&entry->list);
1538         kfree(entry);
1539
1540         return mgmt_device_unblocked(hdev, bdaddr, type);
1541 }
1542
1543 static void hci_clear_adv_cache(struct work_struct *work)
1544 {
1545         struct hci_dev *hdev = container_of(work, struct hci_dev,
1546                                             adv_work.work);
1547
1548         hci_dev_lock(hdev);
1549
1550         hci_adv_entries_clear(hdev);
1551
1552         hci_dev_unlock(hdev);
1553 }
1554
1555 int hci_adv_entries_clear(struct hci_dev *hdev)
1556 {
1557         struct adv_entry *entry, *tmp;
1558
1559         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1560                 list_del(&entry->list);
1561                 kfree(entry);
1562         }
1563
1564         BT_DBG("%s adv cache cleared", hdev->name);
1565
1566         return 0;
1567 }
1568
1569 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1570 {
1571         struct adv_entry *entry;
1572
1573         list_for_each_entry(entry, &hdev->adv_entries, list)
1574                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1575                         return entry;
1576
1577         return NULL;
1578 }
1579
1580 static inline int is_connectable_adv(u8 evt_type)
1581 {
1582         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1583                 return 1;
1584
1585         return 0;
1586 }
1587
1588 int hci_add_adv_entry(struct hci_dev *hdev,
1589                                         struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1590                 return -EINVAL;
1591
1592         /* Only new entries should be added to adv_entries. So, if
1593          * bdaddr was found, don't add it. */
1594         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1595                 return 0;
1596
1597         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1598         if (!entry)
1599                 return -ENOMEM;
1600
1601         bacpy(&entry->bdaddr, &ev->bdaddr);
1602         entry->bdaddr_type = ev->bdaddr_type;
1603
1604         list_add(&entry->list, &hdev->adv_entries);
1605
1606         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1607                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1608
1609         return 0;
1610 }
1611
1612 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1613 {
1614         struct le_scan_params *param =  (struct le_scan_params *) opt;
1615         struct hci_cp_le_set_scan_param cp;
1616
1617         memset(&cp, 0, sizeof(cp));
1618         cp.type = param->type;
1619         cp.interval = cpu_to_le16(param->interval);
1620         cp.window = cpu_to_le16(param->window);
1621
1622         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1623 }
1624
1625 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1626 {
1627         struct hci_cp_le_set_scan_enable cp;
1628
1629         memset(&cp, 0, sizeof(cp));
1630         cp.enable = 1;
1631
1632         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1633 }
1634
1635 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1636                           u16 window, int timeout)
1637 {
1638         long timeo = msecs_to_jiffies(3000);
1639         struct le_scan_params param;
1640         int err;
1641
1642         BT_DBG("%s", hdev->name);
1643
1644         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1645                 return -EINPROGRESS;
1646
1647         param.type = type;
1648         param.interval = interval;
1649         param.window = window;
1650
1651         hci_req_lock(hdev);
1652
1653         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1654                             timeo);
1655         if (!err)
1656                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1657
1658         hci_req_unlock(hdev);
1659
1660         if (err < 0)
1661                 return err;
1662
1663         schedule_delayed_work(&hdev->le_scan_disable,
1664                               msecs_to_jiffies(timeout));
1665
1666         return 0;
1667 }
1668
1669 static void le_scan_disable_work(struct work_struct *work)
1670 {
1671         struct hci_dev *hdev = container_of(work, struct hci_dev,
1672                                             le_scan_disable.work);
1673         struct hci_cp_le_set_scan_enable cp;
1674
1675         BT_DBG("%s", hdev->name);
1676
1677         memset(&cp, 0, sizeof(cp));
1678
1679         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1680 }
1681
1682 static void le_scan_work(struct work_struct *work)
1683 {
1684         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1685         struct le_scan_params *param = &hdev->le_scan_params;
1686
1687         BT_DBG("%s", hdev->name);
1688
1689         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1690                        param->timeout);
1691 }
1692
1693 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1694                 int timeout)
1695 {
1696         struct le_scan_params *param = &hdev->le_scan_params;
1697
1698         BT_DBG("%s", hdev->name);
1699
1700         if (work_busy(&hdev->le_scan))
1701                 return -EINPROGRESS;
1702
1703         param->type = type;
1704         param->interval = interval;
1705         param->window = window;
1706         param->timeout = timeout;
1707
1708         queue_work(system_long_wq, &hdev->le_scan);
1709
1710         return 0;
1711 }
1712
1713 /* Register HCI device */
1714 int hci_register_dev(struct hci_dev *hdev)
1715 {
1716         struct list_head *head = &hci_dev_list, *p;
1717         int i, id, error;
1718
1719         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1720
1721         if (!hdev->open || !hdev->close)
1722                 return -EINVAL;
1723
1724         /* Do not allow HCI_AMP devices to register at index 0,
1725          * so the index can be used as the AMP controller ID.
1726          */
1727         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1728
1729         write_lock(&hci_dev_list_lock);
1730
1731         /* Find first available device id */
1732         list_for_each(p, &hci_dev_list) {
1733                 if (list_entry(p, struct hci_dev, list)->id != id)
1734                         break;
1735                 head = p; id++;
1736         }
1737
1738         sprintf(hdev->name, "hci%d", id);
1739         hdev->id = id;
1740         list_add_tail(&hdev->list, head);
1741
1742         mutex_init(&hdev->lock);
1743
1744         hdev->flags = 0;
1745         hdev->dev_flags = 0;
1746         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1747         hdev->esco_type = (ESCO_HV1);
1748         hdev->link_mode = (HCI_LM_ACCEPT);
1749         hdev->io_capability = 0x03; /* No Input No Output */
1750
1751         hdev->idle_timeout = 0;
1752         hdev->sniff_max_interval = 800;
1753         hdev->sniff_min_interval = 80;
1754
1755         INIT_WORK(&hdev->rx_work, hci_rx_work);
1756         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1757         INIT_WORK(&hdev->tx_work, hci_tx_work);
1758
1759
1760         skb_queue_head_init(&hdev->rx_q);
1761         skb_queue_head_init(&hdev->cmd_q);
1762         skb_queue_head_init(&hdev->raw_q);
1763
1764         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1765
1766         for (i = 0; i < NUM_REASSEMBLY; i++)
1767                 hdev->reassembly[i] = NULL;
1768
1769         init_waitqueue_head(&hdev->req_wait_q);
1770         mutex_init(&hdev->req_lock);
1771
1772         discovery_init(hdev);
1773
1774         hci_conn_hash_init(hdev);
1775
1776         INIT_LIST_HEAD(&hdev->mgmt_pending);
1777
1778         INIT_LIST_HEAD(&hdev->blacklist);
1779
1780         INIT_LIST_HEAD(&hdev->uuids);
1781
1782         INIT_LIST_HEAD(&hdev->link_keys);
1783         INIT_LIST_HEAD(&hdev->long_term_keys);
1784
1785         INIT_LIST_HEAD(&hdev->remote_oob_data);
1786
1787         INIT_LIST_HEAD(&hdev->adv_entries);
1788
1789         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1790         INIT_WORK(&hdev->power_on, hci_power_on);
1791         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1792
1793         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1794
1795         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1796
1797         atomic_set(&hdev->promisc, 0);
1798
1799         INIT_WORK(&hdev->le_scan, le_scan_work);
1800
1801         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1802
1803         write_unlock(&hci_dev_list_lock);
1804
1805         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1806                                                         WQ_MEM_RECLAIM, 1);
1807         if (!hdev->workqueue) {
1808                 error = -ENOMEM;
1809                 goto err;
1810         }
1811
1812         error = hci_add_sysfs(hdev);
1813         if (error < 0)
1814                 goto err_wqueue;
1815
1816         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1817                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1818         if (hdev->rfkill) {
1819                 if (rfkill_register(hdev->rfkill) < 0) {
1820                         rfkill_destroy(hdev->rfkill);
1821                         hdev->rfkill = NULL;
1822                 }
1823         }
1824
1825         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1826         set_bit(HCI_SETUP, &hdev->dev_flags);
1827         schedule_work(&hdev->power_on);
1828
1829         hci_notify(hdev, HCI_DEV_REG);
1830         hci_dev_hold(hdev);
1831
1832         return id;
1833
1834 err_wqueue:
1835         destroy_workqueue(hdev->workqueue);
1836 err:
1837         write_lock(&hci_dev_list_lock);
1838         list_del(&hdev->list);
1839         write_unlock(&hci_dev_list_lock);
1840
1841         return error;
1842 }
1843 EXPORT_SYMBOL(hci_register_dev);
1844
1845 /* Unregister HCI device */
1846 void hci_unregister_dev(struct hci_dev *hdev)
1847 {
1848         int i;
1849
1850         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1851
1852         write_lock(&hci_dev_list_lock);
1853         list_del(&hdev->list);
1854         write_unlock(&hci_dev_list_lock);
1855
1856         hci_dev_do_close(hdev);
1857
1858         for (i = 0; i < NUM_REASSEMBLY; i++)
1859                 kfree_skb(hdev->reassembly[i]);
1860
1861         if (!test_bit(HCI_INIT, &hdev->flags) &&
1862                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1863                 hci_dev_lock(hdev);
1864                 mgmt_index_removed(hdev);
1865                 hci_dev_unlock(hdev);
1866         }
1867
1868         /* mgmt_index_removed should take care of emptying the
1869          * pending list */
1870         BUG_ON(!list_empty(&hdev->mgmt_pending));
1871
1872         hci_notify(hdev, HCI_DEV_UNREG);
1873
1874         if (hdev->rfkill) {
1875                 rfkill_unregister(hdev->rfkill);
1876                 rfkill_destroy(hdev->rfkill);
1877         }
1878
1879         hci_del_sysfs(hdev);
1880
1881         cancel_delayed_work_sync(&hdev->adv_work);
1882
1883         destroy_workqueue(hdev->workqueue);
1884
1885         hci_dev_lock(hdev);
1886         hci_blacklist_clear(hdev);
1887         hci_uuids_clear(hdev);
1888         hci_link_keys_clear(hdev);
1889         hci_smp_ltks_clear(hdev);
1890         hci_remote_oob_data_clear(hdev);
1891         hci_adv_entries_clear(hdev);
1892         hci_dev_unlock(hdev);
1893
1894         hci_dev_put(hdev);
1895 }
1896 EXPORT_SYMBOL(hci_unregister_dev);
1897
1898 /* Suspend HCI device */
1899 int hci_suspend_dev(struct hci_dev *hdev)
1900 {
1901         hci_notify(hdev, HCI_DEV_SUSPEND);
1902         return 0;
1903 }
1904 EXPORT_SYMBOL(hci_suspend_dev);
1905
1906 /* Resume HCI device */
1907 int hci_resume_dev(struct hci_dev *hdev)
1908 {
1909         hci_notify(hdev, HCI_DEV_RESUME);
1910         return 0;
1911 }
1912 EXPORT_SYMBOL(hci_resume_dev);
1913
1914 /* Receive frame from HCI drivers */
1915 int hci_recv_frame(struct sk_buff *skb)
1916 {
1917         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1918         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1919                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1920                 kfree_skb(skb);
1921                 return -ENXIO;
1922         }
1923
1924         /* Incomming skb */
1925         bt_cb(skb)->incoming = 1;
1926
1927         /* Time stamp */
1928         __net_timestamp(skb);
1929
1930         skb_queue_tail(&hdev->rx_q, skb);
1931         queue_work(hdev->workqueue, &hdev->rx_work);
1932
1933         return 0;
1934 }
1935 EXPORT_SYMBOL(hci_recv_frame);
1936
1937 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1938                                                   int count, __u8 index)
1939 {
1940         int len = 0;
1941         int hlen = 0;
1942         int remain = count;
1943         struct sk_buff *skb;
1944         struct bt_skb_cb *scb;
1945
1946         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1947                                 index >= NUM_REASSEMBLY)
1948                 return -EILSEQ;
1949
1950         skb = hdev->reassembly[index];
1951
1952         if (!skb) {
1953                 switch (type) {
1954                 case HCI_ACLDATA_PKT:
1955                         len = HCI_MAX_FRAME_SIZE;
1956                         hlen = HCI_ACL_HDR_SIZE;
1957                         break;
1958                 case HCI_EVENT_PKT:
1959                         len = HCI_MAX_EVENT_SIZE;
1960                         hlen = HCI_EVENT_HDR_SIZE;
1961                         break;
1962                 case HCI_SCODATA_PKT:
1963                         len = HCI_MAX_SCO_SIZE;
1964                         hlen = HCI_SCO_HDR_SIZE;
1965                         break;
1966                 }
1967
1968                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1969                 if (!skb)
1970                         return -ENOMEM;
1971
1972                 scb = (void *) skb->cb;
1973                 scb->expect = hlen;
1974                 scb->pkt_type = type;
1975
1976                 skb->dev = (void *) hdev;
1977                 hdev->reassembly[index] = skb;
1978         }
1979
1980         while (count) {
1981                 scb = (void *) skb->cb;
1982                 len = min_t(uint, scb->expect, count);
1983
1984                 memcpy(skb_put(skb, len), data, len);
1985
1986                 count -= len;
1987                 data += len;
1988                 scb->expect -= len;
1989                 remain = count;
1990
1991                 switch (type) {
1992                 case HCI_EVENT_PKT:
1993                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1994                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1995                                 scb->expect = h->plen;
1996
1997                                 if (skb_tailroom(skb) < scb->expect) {
1998                                         kfree_skb(skb);
1999                                         hdev->reassembly[index] = NULL;
2000                                         return -ENOMEM;
2001                                 }
2002                         }
2003                         break;
2004
2005                 case HCI_ACLDATA_PKT:
2006                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2007                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2008                                 scb->expect = __le16_to_cpu(h->dlen);
2009
2010                                 if (skb_tailroom(skb) < scb->expect) {
2011                                         kfree_skb(skb);
2012                                         hdev->reassembly[index] = NULL;
2013                                         return -ENOMEM;
2014                                 }
2015                         }
2016                         break;
2017
2018                 case HCI_SCODATA_PKT:
2019                         if (skb->len == HCI_SCO_HDR_SIZE) {
2020                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2021                                 scb->expect = h->dlen;
2022
2023                                 if (skb_tailroom(skb) < scb->expect) {
2024                                         kfree_skb(skb);
2025                                         hdev->reassembly[index] = NULL;
2026                                         return -ENOMEM;
2027                                 }
2028                         }
2029                         break;
2030                 }
2031
2032                 if (scb->expect == 0) {
2033                         /* Complete frame */
2034
2035                         bt_cb(skb)->pkt_type = type;
2036                         hci_recv_frame(skb);
2037
2038                         hdev->reassembly[index] = NULL;
2039                         return remain;
2040                 }
2041         }
2042
2043         return remain;
2044 }
2045
2046 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2047 {
2048         int rem = 0;
2049
2050         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2051                 return -EILSEQ;
2052
2053         while (count) {
2054                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2055                 if (rem < 0)
2056                         return rem;
2057
2058                 data += (count - rem);
2059                 count = rem;
2060         }
2061
2062         return rem;
2063 }
2064 EXPORT_SYMBOL(hci_recv_fragment);
2065
2066 #define STREAM_REASSEMBLY 0
2067
2068 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2069 {
2070         int type;
2071         int rem = 0;
2072
2073         while (count) {
2074                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2075
2076                 if (!skb) {
2077                         struct { char type; } *pkt;
2078
2079                         /* Start of the frame */
2080                         pkt = data;
2081                         type = pkt->type;
2082
2083                         data++;
2084                         count--;
2085                 } else
2086                         type = bt_cb(skb)->pkt_type;
2087
2088                 rem = hci_reassembly(hdev, type, data, count,
2089                                                         STREAM_REASSEMBLY);
2090                 if (rem < 0)
2091                         return rem;
2092
2093                 data += (count - rem);
2094                 count = rem;
2095         }
2096
2097         return rem;
2098 }
2099 EXPORT_SYMBOL(hci_recv_stream_fragment);
2100
2101 /* ---- Interface to upper protocols ---- */
2102
2103 int hci_register_cb(struct hci_cb *cb)
2104 {
2105         BT_DBG("%p name %s", cb, cb->name);
2106
2107         write_lock(&hci_cb_list_lock);
2108         list_add(&cb->list, &hci_cb_list);
2109         write_unlock(&hci_cb_list_lock);
2110
2111         return 0;
2112 }
2113 EXPORT_SYMBOL(hci_register_cb);
2114
2115 int hci_unregister_cb(struct hci_cb *cb)
2116 {
2117         BT_DBG("%p name %s", cb, cb->name);
2118
2119         write_lock(&hci_cb_list_lock);
2120         list_del(&cb->list);
2121         write_unlock(&hci_cb_list_lock);
2122
2123         return 0;
2124 }
2125 EXPORT_SYMBOL(hci_unregister_cb);
2126
2127 static int hci_send_frame(struct sk_buff *skb)
2128 {
2129         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2130
2131         if (!hdev) {
2132                 kfree_skb(skb);
2133                 return -ENODEV;
2134         }
2135
2136         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2137
2138         /* Time stamp */
2139         __net_timestamp(skb);
2140
2141         /* Send copy to monitor */
2142         hci_send_to_monitor(hdev, skb);
2143
2144         if (atomic_read(&hdev->promisc)) {
2145                 /* Send copy to the sockets */
2146                 hci_send_to_sock(hdev, skb);
2147         }
2148
2149         /* Get rid of skb owner, prior to sending to the driver. */
2150         skb_orphan(skb);
2151
2152         return hdev->send(skb);
2153 }
2154
2155 /* Send HCI command */
2156 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2157 {
2158         int len = HCI_COMMAND_HDR_SIZE + plen;
2159         struct hci_command_hdr *hdr;
2160         struct sk_buff *skb;
2161
2162         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2163
2164         skb = bt_skb_alloc(len, GFP_ATOMIC);
2165         if (!skb) {
2166                 BT_ERR("%s no memory for command", hdev->name);
2167                 return -ENOMEM;
2168         }
2169
2170         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2171         hdr->opcode = cpu_to_le16(opcode);
2172         hdr->plen   = plen;
2173
2174         if (plen)
2175                 memcpy(skb_put(skb, plen), param, plen);
2176
2177         BT_DBG("skb len %d", skb->len);
2178
2179         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2180         skb->dev = (void *) hdev;
2181
2182         if (test_bit(HCI_INIT, &hdev->flags))
2183                 hdev->init_last_cmd = opcode;
2184
2185         skb_queue_tail(&hdev->cmd_q, skb);
2186         queue_work(hdev->workqueue, &hdev->cmd_work);
2187
2188         return 0;
2189 }
2190
2191 /* Get data from the previously sent command */
2192 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2193 {
2194         struct hci_command_hdr *hdr;
2195
2196         if (!hdev->sent_cmd)
2197                 return NULL;
2198
2199         hdr = (void *) hdev->sent_cmd->data;
2200
2201         if (hdr->opcode != cpu_to_le16(opcode))
2202                 return NULL;
2203
2204         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2205
2206         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2207 }
2208
2209 /* Send ACL data */
2210 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2211 {
2212         struct hci_acl_hdr *hdr;
2213         int len = skb->len;
2214
2215         skb_push(skb, HCI_ACL_HDR_SIZE);
2216         skb_reset_transport_header(skb);
2217         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2218         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2219         hdr->dlen   = cpu_to_le16(len);
2220 }
2221
2222 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2223                                 struct sk_buff *skb, __u16 flags)
2224 {
2225         struct hci_dev *hdev = conn->hdev;
2226         struct sk_buff *list;
2227
2228         list = skb_shinfo(skb)->frag_list;
2229         if (!list) {
2230                 /* Non fragmented */
2231                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2232
2233                 skb_queue_tail(queue, skb);
2234         } else {
2235                 /* Fragmented */
2236                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2237
2238                 skb_shinfo(skb)->frag_list = NULL;
2239
2240                 /* Queue all fragments atomically */
2241                 spin_lock(&queue->lock);
2242
2243                 __skb_queue_tail(queue, skb);
2244
2245                 flags &= ~ACL_START;
2246                 flags |= ACL_CONT;
2247                 do {
2248                         skb = list; list = list->next;
2249
2250                         skb->dev = (void *) hdev;
2251                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2252                         hci_add_acl_hdr(skb, conn->handle, flags);
2253
2254                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2255
2256                         __skb_queue_tail(queue, skb);
2257                 } while (list);
2258
2259                 spin_unlock(&queue->lock);
2260         }
2261 }
2262
2263 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2264 {
2265         struct hci_conn *conn = chan->conn;
2266         struct hci_dev *hdev = conn->hdev;
2267
2268         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2269
2270         skb->dev = (void *) hdev;
2271         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2272         hci_add_acl_hdr(skb, conn->handle, flags);
2273
2274         hci_queue_acl(conn, &chan->data_q, skb, flags);
2275
2276         queue_work(hdev->workqueue, &hdev->tx_work);
2277 }
2278 EXPORT_SYMBOL(hci_send_acl);
2279
2280 /* Send SCO data */
2281 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2282 {
2283         struct hci_dev *hdev = conn->hdev;
2284         struct hci_sco_hdr hdr;
2285
2286         BT_DBG("%s len %d", hdev->name, skb->len);
2287
2288         hdr.handle = cpu_to_le16(conn->handle);
2289         hdr.dlen   = skb->len;
2290
2291         skb_push(skb, HCI_SCO_HDR_SIZE);
2292         skb_reset_transport_header(skb);
2293         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2294
2295         skb->dev = (void *) hdev;
2296         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2297
2298         skb_queue_tail(&conn->data_q, skb);
2299         queue_work(hdev->workqueue, &hdev->tx_work);
2300 }
2301 EXPORT_SYMBOL(hci_send_sco);
2302
2303 /* ---- HCI TX task (outgoing data) ---- */
2304
2305 /* HCI Connection scheduler */
2306 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2307 {
2308         struct hci_conn_hash *h = &hdev->conn_hash;
2309         struct hci_conn *conn = NULL, *c;
2310         int num = 0, min = ~0;
2311
2312         /* We don't have to lock device here. Connections are always
2313          * added and removed with TX task disabled. */
2314
2315         rcu_read_lock();
2316
2317         list_for_each_entry_rcu(c, &h->list, list) {
2318                 if (c->type != type || skb_queue_empty(&c->data_q))
2319                         continue;
2320
2321                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2322                         continue;
2323
2324                 num++;
2325
2326                 if (c->sent < min) {
2327                         min  = c->sent;
2328                         conn = c;
2329                 }
2330
2331                 if (hci_conn_num(hdev, type) == num)
2332                         break;
2333         }
2334
2335         rcu_read_unlock();
2336
2337         if (conn) {
2338                 int cnt, q;
2339
2340                 switch (conn->type) {
2341                 case ACL_LINK:
2342                         cnt = hdev->acl_cnt;
2343                         break;
2344                 case SCO_LINK:
2345                 case ESCO_LINK:
2346                         cnt = hdev->sco_cnt;
2347                         break;
2348                 case LE_LINK:
2349                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2350                         break;
2351                 default:
2352                         cnt = 0;
2353                         BT_ERR("Unknown link type");
2354                 }
2355
2356                 q = cnt / num;
2357                 *quote = q ? q : 1;
2358         } else
2359                 *quote = 0;
2360
2361         BT_DBG("conn %p quote %d", conn, *quote);
2362         return conn;
2363 }
2364
2365 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2366 {
2367         struct hci_conn_hash *h = &hdev->conn_hash;
2368         struct hci_conn *c;
2369
2370         BT_ERR("%s link tx timeout", hdev->name);
2371
2372         rcu_read_lock();
2373
2374         /* Kill stalled connections */
2375         list_for_each_entry_rcu(c, &h->list, list) {
2376                 if (c->type == type && c->sent) {
2377                         BT_ERR("%s killing stalled connection %s",
2378                                 hdev->name, batostr(&c->dst));
2379                         hci_acl_disconn(c, 0x13);
2380                 }
2381         }
2382
2383         rcu_read_unlock();
2384 }
2385
2386 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2387                                                 int *quote)
2388 {
2389         struct hci_conn_hash *h = &hdev->conn_hash;
2390         struct hci_chan *chan = NULL;
2391         int num = 0, min = ~0, cur_prio = 0;
2392         struct hci_conn *conn;
2393         int cnt, q, conn_num = 0;
2394
2395         BT_DBG("%s", hdev->name);
2396
2397         rcu_read_lock();
2398
2399         list_for_each_entry_rcu(conn, &h->list, list) {
2400                 struct hci_chan *tmp;
2401
2402                 if (conn->type != type)
2403                         continue;
2404
2405                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2406                         continue;
2407
2408                 conn_num++;
2409
2410                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2411                         struct sk_buff *skb;
2412
2413                         if (skb_queue_empty(&tmp->data_q))
2414                                 continue;
2415
2416                         skb = skb_peek(&tmp->data_q);
2417                         if (skb->priority < cur_prio)
2418                                 continue;
2419
2420                         if (skb->priority > cur_prio) {
2421                                 num = 0;
2422                                 min = ~0;
2423                                 cur_prio = skb->priority;
2424                         }
2425
2426                         num++;
2427
2428                         if (conn->sent < min) {
2429                                 min  = conn->sent;
2430                                 chan = tmp;
2431                         }
2432                 }
2433
2434                 if (hci_conn_num(hdev, type) == conn_num)
2435                         break;
2436         }
2437
2438         rcu_read_unlock();
2439
2440         if (!chan)
2441                 return NULL;
2442
2443         switch (chan->conn->type) {
2444         case ACL_LINK:
2445                 cnt = hdev->acl_cnt;
2446                 break;
2447         case SCO_LINK:
2448         case ESCO_LINK:
2449                 cnt = hdev->sco_cnt;
2450                 break;
2451         case LE_LINK:
2452                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2453                 break;
2454         default:
2455                 cnt = 0;
2456                 BT_ERR("Unknown link type");
2457         }
2458
2459         q = cnt / num;
2460         *quote = q ? q : 1;
2461         BT_DBG("chan %p quote %d", chan, *quote);
2462         return chan;
2463 }
2464
2465 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2466 {
2467         struct hci_conn_hash *h = &hdev->conn_hash;
2468         struct hci_conn *conn;
2469         int num = 0;
2470
2471         BT_DBG("%s", hdev->name);
2472
2473         rcu_read_lock();
2474
2475         list_for_each_entry_rcu(conn, &h->list, list) {
2476                 struct hci_chan *chan;
2477
2478                 if (conn->type != type)
2479                         continue;
2480
2481                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2482                         continue;
2483
2484                 num++;
2485
2486                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2487                         struct sk_buff *skb;
2488
2489                         if (chan->sent) {
2490                                 chan->sent = 0;
2491                                 continue;
2492                         }
2493
2494                         if (skb_queue_empty(&chan->data_q))
2495                                 continue;
2496
2497                         skb = skb_peek(&chan->data_q);
2498                         if (skb->priority >= HCI_PRIO_MAX - 1)
2499                                 continue;
2500
2501                         skb->priority = HCI_PRIO_MAX - 1;
2502
2503                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2504                                                                 skb->priority);
2505                 }
2506
2507                 if (hci_conn_num(hdev, type) == num)
2508                         break;
2509         }
2510
2511         rcu_read_unlock();
2512
2513 }
2514
2515 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2516 {
2517         /* Calculate count of blocks used by this packet */
2518         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2519 }
2520
2521 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2522 {
2523         if (!test_bit(HCI_RAW, &hdev->flags)) {
2524                 /* ACL tx timeout must be longer than maximum
2525                  * link supervision timeout (40.9 seconds) */
2526                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2527                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2528                         hci_link_tx_to(hdev, ACL_LINK);
2529         }
2530 }
2531
2532 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2533 {
2534         unsigned int cnt = hdev->acl_cnt;
2535         struct hci_chan *chan;
2536         struct sk_buff *skb;
2537         int quote;
2538
2539         __check_timeout(hdev, cnt);
2540
2541         while (hdev->acl_cnt &&
2542                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2543                 u32 priority = (skb_peek(&chan->data_q))->priority;
2544                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2545                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2546                                         skb->len, skb->priority);
2547
2548                         /* Stop if priority has changed */
2549                         if (skb->priority < priority)
2550                                 break;
2551
2552                         skb = skb_dequeue(&chan->data_q);
2553
2554                         hci_conn_enter_active_mode(chan->conn,
2555                                                    bt_cb(skb)->force_active);
2556
2557                         hci_send_frame(skb);
2558                         hdev->acl_last_tx = jiffies;
2559
2560                         hdev->acl_cnt--;
2561                         chan->sent++;
2562                         chan->conn->sent++;
2563                 }
2564         }
2565
2566         if (cnt != hdev->acl_cnt)
2567                 hci_prio_recalculate(hdev, ACL_LINK);
2568 }
2569
2570 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2571 {
2572         unsigned int cnt = hdev->block_cnt;
2573         struct hci_chan *chan;
2574         struct sk_buff *skb;
2575         int quote;
2576
2577         __check_timeout(hdev, cnt);
2578
2579         while (hdev->block_cnt > 0 &&
2580                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2581                 u32 priority = (skb_peek(&chan->data_q))->priority;
2582                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2583                         int blocks;
2584
2585                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2586                                                 skb->len, skb->priority);
2587
2588                         /* Stop if priority has changed */
2589                         if (skb->priority < priority)
2590                                 break;
2591
2592                         skb = skb_dequeue(&chan->data_q);
2593
2594                         blocks = __get_blocks(hdev, skb);
2595                         if (blocks > hdev->block_cnt)
2596                                 return;
2597
2598                         hci_conn_enter_active_mode(chan->conn,
2599                                                 bt_cb(skb)->force_active);
2600
2601                         hci_send_frame(skb);
2602                         hdev->acl_last_tx = jiffies;
2603
2604                         hdev->block_cnt -= blocks;
2605                         quote -= blocks;
2606
2607                         chan->sent += blocks;
2608                         chan->conn->sent += blocks;
2609                 }
2610         }
2611
2612         if (cnt != hdev->block_cnt)
2613                 hci_prio_recalculate(hdev, ACL_LINK);
2614 }
2615
2616 static inline void hci_sched_acl(struct hci_dev *hdev)
2617 {
2618         BT_DBG("%s", hdev->name);
2619
2620         if (!hci_conn_num(hdev, ACL_LINK))
2621                 return;
2622
2623         switch (hdev->flow_ctl_mode) {
2624         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2625                 hci_sched_acl_pkt(hdev);
2626                 break;
2627
2628         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2629                 hci_sched_acl_blk(hdev);
2630                 break;
2631         }
2632 }
2633
2634 /* Schedule SCO */
2635 static inline void hci_sched_sco(struct hci_dev *hdev)
2636 {
2637         struct hci_conn *conn;
2638         struct sk_buff *skb;
2639         int quote;
2640
2641         BT_DBG("%s", hdev->name);
2642
2643         if (!hci_conn_num(hdev, SCO_LINK))
2644                 return;
2645
2646         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2647                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2648                         BT_DBG("skb %p len %d", skb, skb->len);
2649                         hci_send_frame(skb);
2650
2651                         conn->sent++;
2652                         if (conn->sent == ~0)
2653                                 conn->sent = 0;
2654                 }
2655         }
2656 }
2657
2658 static inline void hci_sched_esco(struct hci_dev *hdev)
2659 {
2660         struct hci_conn *conn;
2661         struct sk_buff *skb;
2662         int quote;
2663
2664         BT_DBG("%s", hdev->name);
2665
2666         if (!hci_conn_num(hdev, ESCO_LINK))
2667                 return;
2668
2669         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2670                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2671                         BT_DBG("skb %p len %d", skb, skb->len);
2672                         hci_send_frame(skb);
2673
2674                         conn->sent++;
2675                         if (conn->sent == ~0)
2676                                 conn->sent = 0;
2677                 }
2678         }
2679 }
2680
2681 static inline void hci_sched_le(struct hci_dev *hdev)
2682 {
2683         struct hci_chan *chan;
2684         struct sk_buff *skb;
2685         int quote, cnt, tmp;
2686
2687         BT_DBG("%s", hdev->name);
2688
2689         if (!hci_conn_num(hdev, LE_LINK))
2690                 return;
2691
2692         if (!test_bit(HCI_RAW, &hdev->flags)) {
2693                 /* LE tx timeout must be longer than maximum
2694                  * link supervision timeout (40.9 seconds) */
2695                 if (!hdev->le_cnt && hdev->le_pkts &&
2696                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2697                         hci_link_tx_to(hdev, LE_LINK);
2698         }
2699
2700         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2701         tmp = cnt;
2702         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2703                 u32 priority = (skb_peek(&chan->data_q))->priority;
2704                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2705                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2706                                         skb->len, skb->priority);
2707
2708                         /* Stop if priority has changed */
2709                         if (skb->priority < priority)
2710                                 break;
2711
2712                         skb = skb_dequeue(&chan->data_q);
2713
2714                         hci_send_frame(skb);
2715                         hdev->le_last_tx = jiffies;
2716
2717                         cnt--;
2718                         chan->sent++;
2719                         chan->conn->sent++;
2720                 }
2721         }
2722
2723         if (hdev->le_pkts)
2724                 hdev->le_cnt = cnt;
2725         else
2726                 hdev->acl_cnt = cnt;
2727
2728         if (cnt != tmp)
2729                 hci_prio_recalculate(hdev, LE_LINK);
2730 }
2731
2732 static void hci_tx_work(struct work_struct *work)
2733 {
2734         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2735         struct sk_buff *skb;
2736
2737         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2738                 hdev->sco_cnt, hdev->le_cnt);
2739
2740         /* Schedule queues and send stuff to HCI driver */
2741
2742         hci_sched_acl(hdev);
2743
2744         hci_sched_sco(hdev);
2745
2746         hci_sched_esco(hdev);
2747
2748         hci_sched_le(hdev);
2749
2750         /* Send next queued raw (unknown type) packet */
2751         while ((skb = skb_dequeue(&hdev->raw_q)))
2752                 hci_send_frame(skb);
2753 }
2754
2755 /* ----- HCI RX task (incoming data processing) ----- */
2756
2757 /* ACL data packet */
2758 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2759 {
2760         struct hci_acl_hdr *hdr = (void *) skb->data;
2761         struct hci_conn *conn;
2762         __u16 handle, flags;
2763
2764         skb_pull(skb, HCI_ACL_HDR_SIZE);
2765
2766         handle = __le16_to_cpu(hdr->handle);
2767         flags  = hci_flags(handle);
2768         handle = hci_handle(handle);
2769
2770         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2771
2772         hdev->stat.acl_rx++;
2773
2774         hci_dev_lock(hdev);
2775         conn = hci_conn_hash_lookup_handle(hdev, handle);
2776         hci_dev_unlock(hdev);
2777
2778         if (conn) {
2779                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2780
2781                 /* Send to upper protocol */
2782                 l2cap_recv_acldata(conn, skb, flags);
2783                 return;
2784         } else {
2785                 BT_ERR("%s ACL packet for unknown connection handle %d",
2786                         hdev->name, handle);
2787         }
2788
2789         kfree_skb(skb);
2790 }
2791
2792 /* SCO data packet */
2793 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2794 {
2795         struct hci_sco_hdr *hdr = (void *) skb->data;
2796         struct hci_conn *conn;
2797         __u16 handle;
2798
2799         skb_pull(skb, HCI_SCO_HDR_SIZE);
2800
2801         handle = __le16_to_cpu(hdr->handle);
2802
2803         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2804
2805         hdev->stat.sco_rx++;
2806
2807         hci_dev_lock(hdev);
2808         conn = hci_conn_hash_lookup_handle(hdev, handle);
2809         hci_dev_unlock(hdev);
2810
2811         if (conn) {
2812                 /* Send to upper protocol */
2813                 sco_recv_scodata(conn, skb);
2814                 return;
2815         } else {
2816                 BT_ERR("%s SCO packet for unknown connection handle %d",
2817                         hdev->name, handle);
2818         }
2819
2820         kfree_skb(skb);
2821 }
2822
2823 static void hci_rx_work(struct work_struct *work)
2824 {
2825         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2826         struct sk_buff *skb;
2827
2828         BT_DBG("%s", hdev->name);
2829
2830         while ((skb = skb_dequeue(&hdev->rx_q))) {
2831                 /* Send copy to monitor */
2832                 hci_send_to_monitor(hdev, skb);
2833
2834                 if (atomic_read(&hdev->promisc)) {
2835                         /* Send copy to the sockets */
2836                         hci_send_to_sock(hdev, skb);
2837                 }
2838
2839                 if (test_bit(HCI_RAW, &hdev->flags)) {
2840                         kfree_skb(skb);
2841                         continue;
2842                 }
2843
2844                 if (test_bit(HCI_INIT, &hdev->flags)) {
2845                         /* Don't process data packets in this states. */
2846                         switch (bt_cb(skb)->pkt_type) {
2847                         case HCI_ACLDATA_PKT:
2848                         case HCI_SCODATA_PKT:
2849                                 kfree_skb(skb);
2850                                 continue;
2851                         }
2852                 }
2853
2854                 /* Process frame */
2855                 switch (bt_cb(skb)->pkt_type) {
2856                 case HCI_EVENT_PKT:
2857                         BT_DBG("%s Event packet", hdev->name);
2858                         hci_event_packet(hdev, skb);
2859                         break;
2860
2861                 case HCI_ACLDATA_PKT:
2862                         BT_DBG("%s ACL data packet", hdev->name);
2863                         hci_acldata_packet(hdev, skb);
2864                         break;
2865
2866                 case HCI_SCODATA_PKT:
2867                         BT_DBG("%s SCO data packet", hdev->name);
2868                         hci_scodata_packet(hdev, skb);
2869                         break;
2870
2871                 default:
2872                         kfree_skb(skb);
2873                         break;
2874                 }
2875         }
2876 }
2877
2878 static void hci_cmd_work(struct work_struct *work)
2879 {
2880         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2881         struct sk_buff *skb;
2882
2883         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2884
2885         /* Send queued commands */
2886         if (atomic_read(&hdev->cmd_cnt)) {
2887                 skb = skb_dequeue(&hdev->cmd_q);
2888                 if (!skb)
2889                         return;
2890
2891                 kfree_skb(hdev->sent_cmd);
2892
2893                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2894                 if (hdev->sent_cmd) {
2895                         atomic_dec(&hdev->cmd_cnt);
2896                         hci_send_frame(skb);
2897                         if (test_bit(HCI_RESET, &hdev->flags))
2898                                 del_timer(&hdev->cmd_timer);
2899                         else
2900                                 mod_timer(&hdev->cmd_timer,
2901                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2902                 } else {
2903                         skb_queue_head(&hdev->cmd_q, skb);
2904                         queue_work(hdev->workqueue, &hdev->cmd_work);
2905                 }
2906         }
2907 }
2908
2909 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2910 {
2911         /* General inquiry access code (GIAC) */
2912         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2913         struct hci_cp_inquiry cp;
2914
2915         BT_DBG("%s", hdev->name);
2916
2917         if (test_bit(HCI_INQUIRY, &hdev->flags))
2918                 return -EINPROGRESS;
2919
2920         inquiry_cache_flush(hdev);
2921
2922         memset(&cp, 0, sizeof(cp));
2923         memcpy(&cp.lap, lap, sizeof(cp.lap));
2924         cp.length  = length;
2925
2926         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2927 }
2928
2929 int hci_cancel_inquiry(struct hci_dev *hdev)
2930 {
2931         BT_DBG("%s", hdev->name);
2932
2933         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2934                 return -EPERM;
2935
2936         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2937 }