4a0cac774107dd555c6866491cc8c87f728cb915
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         char buf[32];
80         size_t buf_size = min(count, (sizeof(buf)-1));
81         bool enable;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         char buf[32];
139         size_t buf_size = min(count, (sizeof(buf)-1));
140         bool enable;
141         int err;
142
143         if (copy_from_user(buf, user_buf, buf_size))
144                 return -EFAULT;
145
146         buf[buf_size] = '\0';
147         if (strtobool(buf, &enable))
148                 return -EINVAL;
149
150         /* When the diagnostic flags are not persistent and the transport
151          * is not active, then there is no need for the vendor callback.
152          *
153          * Instead just store the desired value. If needed the setting
154          * will be programmed when the controller gets powered on.
155          */
156         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
157             !test_bit(HCI_RUNNING, &hdev->flags))
158                 goto done;
159
160         hci_req_sync_lock(hdev);
161         err = hdev->set_diag(hdev, enable);
162         hci_req_sync_unlock(hdev);
163
164         if (err < 0)
165                 return err;
166
167 done:
168         if (enable)
169                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170         else
171                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173         return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177         .open           = simple_open,
178         .read           = vendor_diag_read,
179         .write          = vendor_diag_write,
180         .llseek         = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186                             &dut_mode_fops);
187
188         if (hdev->set_diag)
189                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190                                     &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195         BT_DBG("%s %ld", req->hdev->name, opt);
196
197         /* Reset device */
198         set_bit(HCI_RESET, &req->hdev->flags);
199         hci_req_add(req, HCI_OP_RESET, 0, NULL);
200         return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207         /* Read Local Supported Features */
208         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read BD Address */
214         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221         /* Read Local Version */
222         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Local Supported Commands */
225         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227         /* Read Local AMP Info */
228         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230         /* Read Data Blk size */
231         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233         /* Read Flow Control Mode */
234         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236         /* Read Location Data */
237         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242         /* Read Local Supported Features. Not all AMP controllers
243          * support this so it's placed conditionally in the second
244          * stage init.
245          */
246         if (req->hdev->commands[14] & 0x20)
247                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249         return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254         struct hci_dev *hdev = req->hdev;
255
256         BT_DBG("%s %ld", hdev->name, opt);
257
258         /* Reset */
259         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260                 hci_reset_req(req, 0);
261
262         switch (hdev->dev_type) {
263         case HCI_PRIMARY:
264                 bredr_init(req);
265                 break;
266         case HCI_AMP:
267                 amp_init1(req);
268                 break;
269         default:
270                 BT_ERR("Unknown device type %d", hdev->dev_type);
271                 break;
272         }
273
274         return 0;
275 }
276
277 static void bredr_setup(struct hci_request *req)
278 {
279         __le16 param;
280         __u8 flt_type;
281
282         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285         /* Read Class of Device */
286         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288         /* Read Local Name */
289         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291         /* Read Voice Setting */
292         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294         /* Read Number of Supported IAC */
295         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297         /* Read Current IAC LAP */
298         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300         /* Clear Event Filters */
301         flt_type = HCI_FLT_CLEAR_ALL;
302         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304         /* Connection accept timeout ~20 secs */
305         param = cpu_to_le16(0x7d00);
306         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308
309 static void le_setup(struct hci_request *req)
310 {
311         struct hci_dev *hdev = req->hdev;
312
313         /* Read LE Buffer Size */
314         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316         /* Read LE Local Supported Features */
317         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319         /* Read LE Supported States */
320         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322         /* LE-only controllers have LE implicitly enabled */
323         if (!lmp_bredr_capable(hdev))
324                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329         struct hci_dev *hdev = req->hdev;
330
331         /* The second byte is 0xff instead of 0x9f (two reserved bits
332          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333          * command otherwise.
334          */
335         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338          * any event mask for pre 1.2 devices.
339          */
340         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341                 return;
342
343         if (lmp_bredr_capable(hdev)) {
344                 events[4] |= 0x01; /* Flow Specification Complete */
345         } else {
346                 /* Use a different default for LE-only devices */
347                 memset(events, 0, sizeof(events));
348                 events[1] |= 0x20; /* Command Complete */
349                 events[1] |= 0x40; /* Command Status */
350                 events[1] |= 0x80; /* Hardware Error */
351
352                 /* If the controller supports the Disconnect command, enable
353                  * the corresponding event. In addition enable packet flow
354                  * control related events.
355                  */
356                 if (hdev->commands[0] & 0x20) {
357                         events[0] |= 0x10; /* Disconnection Complete */
358                         events[2] |= 0x04; /* Number of Completed Packets */
359                         events[3] |= 0x02; /* Data Buffer Overflow */
360                 }
361
362                 /* If the controller supports the Read Remote Version
363                  * Information command, enable the corresponding event.
364                  */
365                 if (hdev->commands[2] & 0x80)
366                         events[1] |= 0x08; /* Read Remote Version Information
367                                             * Complete
368                                             */
369
370                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371                         events[0] |= 0x80; /* Encryption Change */
372                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
373                 }
374         }
375
376         if (lmp_inq_rssi_capable(hdev) ||
377             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378                 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380         if (lmp_ext_feat_capable(hdev))
381                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383         if (lmp_esco_capable(hdev)) {
384                 events[5] |= 0x08; /* Synchronous Connection Complete */
385                 events[5] |= 0x10; /* Synchronous Connection Changed */
386         }
387
388         if (lmp_sniffsubr_capable(hdev))
389                 events[5] |= 0x20; /* Sniff Subrating */
390
391         if (lmp_pause_enc_capable(hdev))
392                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394         if (lmp_ext_inq_capable(hdev))
395                 events[5] |= 0x40; /* Extended Inquiry Result */
396
397         if (lmp_no_flush_capable(hdev))
398                 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400         if (lmp_lsto_capable(hdev))
401                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403         if (lmp_ssp_capable(hdev)) {
404                 events[6] |= 0x01;      /* IO Capability Request */
405                 events[6] |= 0x02;      /* IO Capability Response */
406                 events[6] |= 0x04;      /* User Confirmation Request */
407                 events[6] |= 0x08;      /* User Passkey Request */
408                 events[6] |= 0x10;      /* Remote OOB Data Request */
409                 events[6] |= 0x20;      /* Simple Pairing Complete */
410                 events[7] |= 0x04;      /* User Passkey Notification */
411                 events[7] |= 0x08;      /* Keypress Notification */
412                 events[7] |= 0x10;      /* Remote Host Supported
413                                          * Features Notification
414                                          */
415         }
416
417         if (lmp_le_capable(hdev))
418                 events[7] |= 0x20;      /* LE Meta-Event */
419
420         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425         struct hci_dev *hdev = req->hdev;
426
427         if (hdev->dev_type == HCI_AMP)
428                 return amp_init2(req);
429
430         if (lmp_bredr_capable(hdev))
431                 bredr_setup(req);
432         else
433                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435         if (lmp_le_capable(hdev))
436                 le_setup(req);
437
438         /* All Bluetooth 1.2 and later controllers should support the
439          * HCI command for reading the local supported commands.
440          *
441          * Unfortunately some controllers indicate Bluetooth 1.2 support,
442          * but do not have support for this command. If that is the case,
443          * the driver can quirk the behavior and skip reading the local
444          * supported commands.
445          */
446         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450         if (lmp_ssp_capable(hdev)) {
451                 /* When SSP is available, then the host features page
452                  * should also be available as well. However some
453                  * controllers list the max_page as 0 as long as SSP
454                  * has not been enabled. To achieve proper debugging
455                  * output, force the minimum max_page to 1 at least.
456                  */
457                 hdev->max_page = 0x01;
458
459                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460                         u8 mode = 0x01;
461
462                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463                                     sizeof(mode), &mode);
464                 } else {
465                         struct hci_cp_write_eir cp;
466
467                         memset(hdev->eir, 0, sizeof(hdev->eir));
468                         memset(&cp, 0, sizeof(cp));
469
470                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471                 }
472         }
473
474         if (lmp_inq_rssi_capable(hdev) ||
475             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476                 u8 mode;
477
478                 /* If Extended Inquiry Result events are supported, then
479                  * they are clearly preferred over Inquiry Result with RSSI
480                  * events.
481                  */
482                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485         }
486
487         if (lmp_inq_tx_pwr_capable(hdev))
488                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490         if (lmp_ext_feat_capable(hdev)) {
491                 struct hci_cp_read_local_ext_features cp;
492
493                 cp.page = 0x01;
494                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495                             sizeof(cp), &cp);
496         }
497
498         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499                 u8 enable = 1;
500                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501                             &enable);
502         }
503
504         return 0;
505 }
506
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509         struct hci_dev *hdev = req->hdev;
510         struct hci_cp_write_def_link_policy cp;
511         u16 link_policy = 0;
512
513         if (lmp_rswitch_capable(hdev))
514                 link_policy |= HCI_LP_RSWITCH;
515         if (lmp_hold_capable(hdev))
516                 link_policy |= HCI_LP_HOLD;
517         if (lmp_sniff_capable(hdev))
518                 link_policy |= HCI_LP_SNIFF;
519         if (lmp_park_capable(hdev))
520                 link_policy |= HCI_LP_PARK;
521
522         cp.policy = cpu_to_le16(link_policy);
523         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
526 static void hci_set_le_support(struct hci_request *req)
527 {
528         struct hci_dev *hdev = req->hdev;
529         struct hci_cp_write_le_host_supported cp;
530
531         /* LE-only devices do not support explicit enablement */
532         if (!lmp_bredr_capable(hdev))
533                 return;
534
535         memset(&cp, 0, sizeof(cp));
536
537         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538                 cp.le = 0x01;
539                 cp.simul = 0x00;
540         }
541
542         if (cp.le != lmp_host_le_capable(hdev))
543                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544                             &cp);
545 }
546
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549         struct hci_dev *hdev = req->hdev;
550         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551
552         /* If Connectionless Slave Broadcast master role is supported
553          * enable all necessary events for it.
554          */
555         if (lmp_csb_master_capable(hdev)) {
556                 events[1] |= 0x40;      /* Triggered Clock Capture */
557                 events[1] |= 0x80;      /* Synchronization Train Complete */
558                 events[2] |= 0x10;      /* Slave Page Response Timeout */
559                 events[2] |= 0x20;      /* CSB Channel Map Change */
560         }
561
562         /* If Connectionless Slave Broadcast slave role is supported
563          * enable all necessary events for it.
564          */
565         if (lmp_csb_slave_capable(hdev)) {
566                 events[2] |= 0x01;      /* Synchronization Train Received */
567                 events[2] |= 0x02;      /* CSB Receive */
568                 events[2] |= 0x04;      /* CSB Timeout */
569                 events[2] |= 0x08;      /* Truncated Page Complete */
570         }
571
572         /* Enable Authenticated Payload Timeout Expired event if supported */
573         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
574                 events[2] |= 0x80;
575
576         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
577 }
578
579 static int hci_init3_req(struct hci_request *req, unsigned long opt)
580 {
581         struct hci_dev *hdev = req->hdev;
582         u8 p;
583
584         hci_setup_event_mask(req);
585
586         if (hdev->commands[6] & 0x20 &&
587             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
588                 struct hci_cp_read_stored_link_key cp;
589
590                 bacpy(&cp.bdaddr, BDADDR_ANY);
591                 cp.read_all = 0x01;
592                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
593         }
594
595         if (hdev->commands[5] & 0x10)
596                 hci_setup_link_policy(req);
597
598         if (hdev->commands[8] & 0x01)
599                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
600
601         /* Some older Broadcom based Bluetooth 1.2 controllers do not
602          * support the Read Page Scan Type command. Check support for
603          * this command in the bit mask of supported commands.
604          */
605         if (hdev->commands[13] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
607
608         if (lmp_le_capable(hdev)) {
609                 u8 events[8];
610
611                 memset(events, 0, sizeof(events));
612
613                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
614                         events[0] |= 0x10;      /* LE Long Term Key Request */
615
616                 /* If controller supports the Connection Parameters Request
617                  * Link Layer Procedure, enable the corresponding event.
618                  */
619                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
620                         events[0] |= 0x20;      /* LE Remote Connection
621                                                  * Parameter Request
622                                                  */
623
624                 /* If the controller supports the Data Length Extension
625                  * feature, enable the corresponding event.
626                  */
627                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
628                         events[0] |= 0x40;      /* LE Data Length Change */
629
630                 /* If the controller supports Extended Scanner Filter
631                  * Policies, enable the correspondig event.
632                  */
633                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
634                         events[1] |= 0x04;      /* LE Direct Advertising
635                                                  * Report
636                                                  */
637
638                 /* If the controller supports the LE Set Scan Enable command,
639                  * enable the corresponding advertising report event.
640                  */
641                 if (hdev->commands[26] & 0x08)
642                         events[0] |= 0x02;      /* LE Advertising Report */
643
644                 /* If the controller supports the LE Create Connection
645                  * command, enable the corresponding event.
646                  */
647                 if (hdev->commands[26] & 0x10)
648                         events[0] |= 0x01;      /* LE Connection Complete */
649
650                 /* If the controller supports the LE Connection Update
651                  * command, enable the corresponding event.
652                  */
653                 if (hdev->commands[27] & 0x04)
654                         events[0] |= 0x04;      /* LE Connection Update
655                                                  * Complete
656                                                  */
657
658                 /* If the controller supports the LE Read Remote Used Features
659                  * command, enable the corresponding event.
660                  */
661                 if (hdev->commands[27] & 0x20)
662                         events[0] |= 0x08;      /* LE Read Remote Used
663                                                  * Features Complete
664                                                  */
665
666                 /* If the controller supports the LE Read Local P-256
667                  * Public Key command, enable the corresponding event.
668                  */
669                 if (hdev->commands[34] & 0x02)
670                         events[0] |= 0x80;      /* LE Read Local P-256
671                                                  * Public Key Complete
672                                                  */
673
674                 /* If the controller supports the LE Generate DHKey
675                  * command, enable the corresponding event.
676                  */
677                 if (hdev->commands[34] & 0x04)
678                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
679
680                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
681                             events);
682
683                 if (hdev->commands[25] & 0x40) {
684                         /* Read LE Advertising Channel TX Power */
685                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
686                 }
687
688                 if (hdev->commands[26] & 0x40) {
689                         /* Read LE White List Size */
690                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
691                                     0, NULL);
692                 }
693
694                 if (hdev->commands[26] & 0x80) {
695                         /* Clear LE White List */
696                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
697                 }
698
699                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
700                         /* Read LE Maximum Data Length */
701                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
702
703                         /* Read LE Suggested Default Data Length */
704                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
705                 }
706
707                 hci_set_le_support(req);
708         }
709
710         /* Read features beyond page 1 if available */
711         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
712                 struct hci_cp_read_local_ext_features cp;
713
714                 cp.page = p;
715                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
716                             sizeof(cp), &cp);
717         }
718
719         return 0;
720 }
721
722 static int hci_init4_req(struct hci_request *req, unsigned long opt)
723 {
724         struct hci_dev *hdev = req->hdev;
725
726         /* Some Broadcom based Bluetooth controllers do not support the
727          * Delete Stored Link Key command. They are clearly indicating its
728          * absence in the bit mask of supported commands.
729          *
730          * Check the supported commands and only if the the command is marked
731          * as supported send it. If not supported assume that the controller
732          * does not have actual support for stored link keys which makes this
733          * command redundant anyway.
734          *
735          * Some controllers indicate that they support handling deleting
736          * stored link keys, but they don't. The quirk lets a driver
737          * just disable this command.
738          */
739         if (hdev->commands[6] & 0x80 &&
740             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
741                 struct hci_cp_delete_stored_link_key cp;
742
743                 bacpy(&cp.bdaddr, BDADDR_ANY);
744                 cp.delete_all = 0x01;
745                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
746                             sizeof(cp), &cp);
747         }
748
749         /* Set event mask page 2 if the HCI command for it is supported */
750         if (hdev->commands[22] & 0x04)
751                 hci_set_event_mask_page_2(req);
752
753         /* Read local codec list if the HCI command is supported */
754         if (hdev->commands[29] & 0x20)
755                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
756
757         /* Get MWS transport configuration if the HCI command is supported */
758         if (hdev->commands[30] & 0x08)
759                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
760
761         /* Check for Synchronization Train support */
762         if (lmp_sync_train_capable(hdev))
763                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
764
765         /* Enable Secure Connections if supported and configured */
766         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
767             bredr_sc_enabled(hdev)) {
768                 u8 support = 0x01;
769
770                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
771                             sizeof(support), &support);
772         }
773
774         /* Set Suggested Default Data Length to maximum if supported */
775         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776                 struct hci_cp_le_write_def_data_len cp;
777
778                 cp.tx_len = hdev->le_max_tx_len;
779                 cp.tx_time = hdev->le_max_tx_time;
780                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
781         }
782
783         return 0;
784 }
785
786 static int __hci_init(struct hci_dev *hdev)
787 {
788         int err;
789
790         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
791         if (err < 0)
792                 return err;
793
794         if (hci_dev_test_flag(hdev, HCI_SETUP))
795                 hci_debugfs_create_basic(hdev);
796
797         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
798         if (err < 0)
799                 return err;
800
801         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
802          * BR/EDR/LE type controllers. AMP controllers only need the
803          * first two stages of init.
804          */
805         if (hdev->dev_type != HCI_PRIMARY)
806                 return 0;
807
808         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
809         if (err < 0)
810                 return err;
811
812         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
813         if (err < 0)
814                 return err;
815
816         /* This function is only called when the controller is actually in
817          * configured state. When the controller is marked as unconfigured,
818          * this initialization procedure is not run.
819          *
820          * It means that it is possible that a controller runs through its
821          * setup phase and then discovers missing settings. If that is the
822          * case, then this function will not be called. It then will only
823          * be called during the config phase.
824          *
825          * So only when in setup phase or config phase, create the debugfs
826          * entries and register the SMP channels.
827          */
828         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
829             !hci_dev_test_flag(hdev, HCI_CONFIG))
830                 return 0;
831
832         hci_debugfs_create_common(hdev);
833
834         if (lmp_bredr_capable(hdev))
835                 hci_debugfs_create_bredr(hdev);
836
837         if (lmp_le_capable(hdev))
838                 hci_debugfs_create_le(hdev);
839
840         return 0;
841 }
842
843 static int hci_init0_req(struct hci_request *req, unsigned long opt)
844 {
845         struct hci_dev *hdev = req->hdev;
846
847         BT_DBG("%s %ld", hdev->name, opt);
848
849         /* Reset */
850         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
851                 hci_reset_req(req, 0);
852
853         /* Read Local Version */
854         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
855
856         /* Read BD Address */
857         if (hdev->set_bdaddr)
858                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
859
860         return 0;
861 }
862
863 static int __hci_unconf_init(struct hci_dev *hdev)
864 {
865         int err;
866
867         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
868                 return 0;
869
870         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
871         if (err < 0)
872                 return err;
873
874         if (hci_dev_test_flag(hdev, HCI_SETUP))
875                 hci_debugfs_create_basic(hdev);
876
877         return 0;
878 }
879
880 static int hci_scan_req(struct hci_request *req, unsigned long opt)
881 {
882         __u8 scan = opt;
883
884         BT_DBG("%s %x", req->hdev->name, scan);
885
886         /* Inquiry and Page scans */
887         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
888         return 0;
889 }
890
891 static int hci_auth_req(struct hci_request *req, unsigned long opt)
892 {
893         __u8 auth = opt;
894
895         BT_DBG("%s %x", req->hdev->name, auth);
896
897         /* Authentication */
898         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
899         return 0;
900 }
901
902 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
903 {
904         __u8 encrypt = opt;
905
906         BT_DBG("%s %x", req->hdev->name, encrypt);
907
908         /* Encryption */
909         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
910         return 0;
911 }
912
913 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
914 {
915         __le16 policy = cpu_to_le16(opt);
916
917         BT_DBG("%s %x", req->hdev->name, policy);
918
919         /* Default link policy */
920         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
921         return 0;
922 }
923
924 /* Get HCI device by index.
925  * Device is held on return. */
926 struct hci_dev *hci_dev_get(int index)
927 {
928         struct hci_dev *hdev = NULL, *d;
929
930         BT_DBG("%d", index);
931
932         if (index < 0)
933                 return NULL;
934
935         read_lock(&hci_dev_list_lock);
936         list_for_each_entry(d, &hci_dev_list, list) {
937                 if (d->id == index) {
938                         hdev = hci_dev_hold(d);
939                         break;
940                 }
941         }
942         read_unlock(&hci_dev_list_lock);
943         return hdev;
944 }
945
946 /* ---- Inquiry support ---- */
947
948 bool hci_discovery_active(struct hci_dev *hdev)
949 {
950         struct discovery_state *discov = &hdev->discovery;
951
952         switch (discov->state) {
953         case DISCOVERY_FINDING:
954         case DISCOVERY_RESOLVING:
955                 return true;
956
957         default:
958                 return false;
959         }
960 }
961
962 void hci_discovery_set_state(struct hci_dev *hdev, int state)
963 {
964         int old_state = hdev->discovery.state;
965
966         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
967
968         if (old_state == state)
969                 return;
970
971         hdev->discovery.state = state;
972
973         switch (state) {
974         case DISCOVERY_STOPPED:
975                 hci_update_background_scan(hdev);
976
977                 if (old_state != DISCOVERY_STARTING)
978                         mgmt_discovering(hdev, 0);
979                 break;
980         case DISCOVERY_STARTING:
981                 break;
982         case DISCOVERY_FINDING:
983                 mgmt_discovering(hdev, 1);
984                 break;
985         case DISCOVERY_RESOLVING:
986                 break;
987         case DISCOVERY_STOPPING:
988                 break;
989         }
990 }
991
992 void hci_inquiry_cache_flush(struct hci_dev *hdev)
993 {
994         struct discovery_state *cache = &hdev->discovery;
995         struct inquiry_entry *p, *n;
996
997         list_for_each_entry_safe(p, n, &cache->all, all) {
998                 list_del(&p->all);
999                 kfree(p);
1000         }
1001
1002         INIT_LIST_HEAD(&cache->unknown);
1003         INIT_LIST_HEAD(&cache->resolve);
1004 }
1005
1006 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1007                                                bdaddr_t *bdaddr)
1008 {
1009         struct discovery_state *cache = &hdev->discovery;
1010         struct inquiry_entry *e;
1011
1012         BT_DBG("cache %p, %pMR", cache, bdaddr);
1013
1014         list_for_each_entry(e, &cache->all, all) {
1015                 if (!bacmp(&e->data.bdaddr, bdaddr))
1016                         return e;
1017         }
1018
1019         return NULL;
1020 }
1021
1022 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1023                                                        bdaddr_t *bdaddr)
1024 {
1025         struct discovery_state *cache = &hdev->discovery;
1026         struct inquiry_entry *e;
1027
1028         BT_DBG("cache %p, %pMR", cache, bdaddr);
1029
1030         list_for_each_entry(e, &cache->unknown, list) {
1031                 if (!bacmp(&e->data.bdaddr, bdaddr))
1032                         return e;
1033         }
1034
1035         return NULL;
1036 }
1037
1038 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1039                                                        bdaddr_t *bdaddr,
1040                                                        int state)
1041 {
1042         struct discovery_state *cache = &hdev->discovery;
1043         struct inquiry_entry *e;
1044
1045         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1046
1047         list_for_each_entry(e, &cache->resolve, list) {
1048                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1049                         return e;
1050                 if (!bacmp(&e->data.bdaddr, bdaddr))
1051                         return e;
1052         }
1053
1054         return NULL;
1055 }
1056
1057 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1058                                       struct inquiry_entry *ie)
1059 {
1060         struct discovery_state *cache = &hdev->discovery;
1061         struct list_head *pos = &cache->resolve;
1062         struct inquiry_entry *p;
1063
1064         list_del(&ie->list);
1065
1066         list_for_each_entry(p, &cache->resolve, list) {
1067                 if (p->name_state != NAME_PENDING &&
1068                     abs(p->data.rssi) >= abs(ie->data.rssi))
1069                         break;
1070                 pos = &p->list;
1071         }
1072
1073         list_add(&ie->list, pos);
1074 }
1075
1076 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1077                              bool name_known)
1078 {
1079         struct discovery_state *cache = &hdev->discovery;
1080         struct inquiry_entry *ie;
1081         u32 flags = 0;
1082
1083         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1084
1085         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1086
1087         if (!data->ssp_mode)
1088                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1089
1090         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1091         if (ie) {
1092                 if (!ie->data.ssp_mode)
1093                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1094
1095                 if (ie->name_state == NAME_NEEDED &&
1096                     data->rssi != ie->data.rssi) {
1097                         ie->data.rssi = data->rssi;
1098                         hci_inquiry_cache_update_resolve(hdev, ie);
1099                 }
1100
1101                 goto update;
1102         }
1103
1104         /* Entry not in the cache. Add new one. */
1105         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1106         if (!ie) {
1107                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1108                 goto done;
1109         }
1110
1111         list_add(&ie->all, &cache->all);
1112
1113         if (name_known) {
1114                 ie->name_state = NAME_KNOWN;
1115         } else {
1116                 ie->name_state = NAME_NOT_KNOWN;
1117                 list_add(&ie->list, &cache->unknown);
1118         }
1119
1120 update:
1121         if (name_known && ie->name_state != NAME_KNOWN &&
1122             ie->name_state != NAME_PENDING) {
1123                 ie->name_state = NAME_KNOWN;
1124                 list_del(&ie->list);
1125         }
1126
1127         memcpy(&ie->data, data, sizeof(*data));
1128         ie->timestamp = jiffies;
1129         cache->timestamp = jiffies;
1130
1131         if (ie->name_state == NAME_NOT_KNOWN)
1132                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1133
1134 done:
1135         return flags;
1136 }
1137
1138 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1139 {
1140         struct discovery_state *cache = &hdev->discovery;
1141         struct inquiry_info *info = (struct inquiry_info *) buf;
1142         struct inquiry_entry *e;
1143         int copied = 0;
1144
1145         list_for_each_entry(e, &cache->all, all) {
1146                 struct inquiry_data *data = &e->data;
1147
1148                 if (copied >= num)
1149                         break;
1150
1151                 bacpy(&info->bdaddr, &data->bdaddr);
1152                 info->pscan_rep_mode    = data->pscan_rep_mode;
1153                 info->pscan_period_mode = data->pscan_period_mode;
1154                 info->pscan_mode        = data->pscan_mode;
1155                 memcpy(info->dev_class, data->dev_class, 3);
1156                 info->clock_offset      = data->clock_offset;
1157
1158                 info++;
1159                 copied++;
1160         }
1161
1162         BT_DBG("cache %p, copied %d", cache, copied);
1163         return copied;
1164 }
1165
1166 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1167 {
1168         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1169         struct hci_dev *hdev = req->hdev;
1170         struct hci_cp_inquiry cp;
1171
1172         BT_DBG("%s", hdev->name);
1173
1174         if (test_bit(HCI_INQUIRY, &hdev->flags))
1175                 return 0;
1176
1177         /* Start Inquiry */
1178         memcpy(&cp.lap, &ir->lap, 3);
1179         cp.length  = ir->length;
1180         cp.num_rsp = ir->num_rsp;
1181         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1182
1183         return 0;
1184 }
1185
1186 int hci_inquiry(void __user *arg)
1187 {
1188         __u8 __user *ptr = arg;
1189         struct hci_inquiry_req ir;
1190         struct hci_dev *hdev;
1191         int err = 0, do_inquiry = 0, max_rsp;
1192         long timeo;
1193         __u8 *buf;
1194
1195         if (copy_from_user(&ir, ptr, sizeof(ir)))
1196                 return -EFAULT;
1197
1198         hdev = hci_dev_get(ir.dev_id);
1199         if (!hdev)
1200                 return -ENODEV;
1201
1202         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1203                 err = -EBUSY;
1204                 goto done;
1205         }
1206
1207         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1208                 err = -EOPNOTSUPP;
1209                 goto done;
1210         }
1211
1212         if (hdev->dev_type != HCI_PRIMARY) {
1213                 err = -EOPNOTSUPP;
1214                 goto done;
1215         }
1216
1217         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1218                 err = -EOPNOTSUPP;
1219                 goto done;
1220         }
1221
1222         hci_dev_lock(hdev);
1223         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1224             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1225                 hci_inquiry_cache_flush(hdev);
1226                 do_inquiry = 1;
1227         }
1228         hci_dev_unlock(hdev);
1229
1230         timeo = ir.length * msecs_to_jiffies(2000);
1231
1232         if (do_inquiry) {
1233                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1234                                    timeo, NULL);
1235                 if (err < 0)
1236                         goto done;
1237
1238                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1239                  * cleared). If it is interrupted by a signal, return -EINTR.
1240                  */
1241                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1242                                 TASK_INTERRUPTIBLE))
1243                         return -EINTR;
1244         }
1245
1246         /* for unlimited number of responses we will use buffer with
1247          * 255 entries
1248          */
1249         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1250
1251         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1252          * copy it to the user space.
1253          */
1254         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1255         if (!buf) {
1256                 err = -ENOMEM;
1257                 goto done;
1258         }
1259
1260         hci_dev_lock(hdev);
1261         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1262         hci_dev_unlock(hdev);
1263
1264         BT_DBG("num_rsp %d", ir.num_rsp);
1265
1266         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1267                 ptr += sizeof(ir);
1268                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1269                                  ir.num_rsp))
1270                         err = -EFAULT;
1271         } else
1272                 err = -EFAULT;
1273
1274         kfree(buf);
1275
1276 done:
1277         hci_dev_put(hdev);
1278         return err;
1279 }
1280
1281 static int hci_dev_do_open(struct hci_dev *hdev)
1282 {
1283         int ret = 0;
1284
1285         BT_DBG("%s %p", hdev->name, hdev);
1286
1287         hci_req_sync_lock(hdev);
1288
1289         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1290                 ret = -ENODEV;
1291                 goto done;
1292         }
1293
1294         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1295             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1296                 /* Check for rfkill but allow the HCI setup stage to
1297                  * proceed (which in itself doesn't cause any RF activity).
1298                  */
1299                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1300                         ret = -ERFKILL;
1301                         goto done;
1302                 }
1303
1304                 /* Check for valid public address or a configured static
1305                  * random adddress, but let the HCI setup proceed to
1306                  * be able to determine if there is a public address
1307                  * or not.
1308                  *
1309                  * In case of user channel usage, it is not important
1310                  * if a public address or static random address is
1311                  * available.
1312                  *
1313                  * This check is only valid for BR/EDR controllers
1314                  * since AMP controllers do not have an address.
1315                  */
1316                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1317                     hdev->dev_type == HCI_PRIMARY &&
1318                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1319                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1320                         ret = -EADDRNOTAVAIL;
1321                         goto done;
1322                 }
1323         }
1324
1325         if (test_bit(HCI_UP, &hdev->flags)) {
1326                 ret = -EALREADY;
1327                 goto done;
1328         }
1329
1330         if (hdev->open(hdev)) {
1331                 ret = -EIO;
1332                 goto done;
1333         }
1334
1335         set_bit(HCI_RUNNING, &hdev->flags);
1336         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1337
1338         atomic_set(&hdev->cmd_cnt, 1);
1339         set_bit(HCI_INIT, &hdev->flags);
1340
1341         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1342                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1343
1344                 if (hdev->setup)
1345                         ret = hdev->setup(hdev);
1346
1347                 /* The transport driver can set these quirks before
1348                  * creating the HCI device or in its setup callback.
1349                  *
1350                  * In case any of them is set, the controller has to
1351                  * start up as unconfigured.
1352                  */
1353                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1354                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1355                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1356
1357                 /* For an unconfigured controller it is required to
1358                  * read at least the version information provided by
1359                  * the Read Local Version Information command.
1360                  *
1361                  * If the set_bdaddr driver callback is provided, then
1362                  * also the original Bluetooth public device address
1363                  * will be read using the Read BD Address command.
1364                  */
1365                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1366                         ret = __hci_unconf_init(hdev);
1367         }
1368
1369         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1370                 /* If public address change is configured, ensure that
1371                  * the address gets programmed. If the driver does not
1372                  * support changing the public address, fail the power
1373                  * on procedure.
1374                  */
1375                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1376                     hdev->set_bdaddr)
1377                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1378                 else
1379                         ret = -EADDRNOTAVAIL;
1380         }
1381
1382         if (!ret) {
1383                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1384                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1385                         ret = __hci_init(hdev);
1386                         if (!ret && hdev->post_init)
1387                                 ret = hdev->post_init(hdev);
1388                 }
1389         }
1390
1391         /* If the HCI Reset command is clearing all diagnostic settings,
1392          * then they need to be reprogrammed after the init procedure
1393          * completed.
1394          */
1395         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1396             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1397                 ret = hdev->set_diag(hdev, true);
1398
1399         clear_bit(HCI_INIT, &hdev->flags);
1400
1401         if (!ret) {
1402                 hci_dev_hold(hdev);
1403                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1404                 set_bit(HCI_UP, &hdev->flags);
1405                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1406                 hci_leds_update_powered(hdev, true);
1407                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1408                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1409                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1410                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1411                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1412                     hdev->dev_type == HCI_PRIMARY) {
1413                         ret = __hci_req_hci_power_on(hdev);
1414                         mgmt_power_on(hdev, ret);
1415                 }
1416         } else {
1417                 /* Init failed, cleanup */
1418                 flush_work(&hdev->tx_work);
1419                 flush_work(&hdev->cmd_work);
1420                 flush_work(&hdev->rx_work);
1421
1422                 skb_queue_purge(&hdev->cmd_q);
1423                 skb_queue_purge(&hdev->rx_q);
1424
1425                 if (hdev->flush)
1426                         hdev->flush(hdev);
1427
1428                 if (hdev->sent_cmd) {
1429                         kfree_skb(hdev->sent_cmd);
1430                         hdev->sent_cmd = NULL;
1431                 }
1432
1433                 clear_bit(HCI_RUNNING, &hdev->flags);
1434                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1435
1436                 hdev->close(hdev);
1437                 hdev->flags &= BIT(HCI_RAW);
1438         }
1439
1440 done:
1441         hci_req_sync_unlock(hdev);
1442         return ret;
1443 }
1444
1445 /* ---- HCI ioctl helpers ---- */
1446
1447 int hci_dev_open(__u16 dev)
1448 {
1449         struct hci_dev *hdev;
1450         int err;
1451
1452         hdev = hci_dev_get(dev);
1453         if (!hdev)
1454                 return -ENODEV;
1455
1456         /* Devices that are marked as unconfigured can only be powered
1457          * up as user channel. Trying to bring them up as normal devices
1458          * will result into a failure. Only user channel operation is
1459          * possible.
1460          *
1461          * When this function is called for a user channel, the flag
1462          * HCI_USER_CHANNEL will be set first before attempting to
1463          * open the device.
1464          */
1465         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1466             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1467                 err = -EOPNOTSUPP;
1468                 goto done;
1469         }
1470
1471         /* We need to ensure that no other power on/off work is pending
1472          * before proceeding to call hci_dev_do_open. This is
1473          * particularly important if the setup procedure has not yet
1474          * completed.
1475          */
1476         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1477                 cancel_delayed_work(&hdev->power_off);
1478
1479         /* After this call it is guaranteed that the setup procedure
1480          * has finished. This means that error conditions like RFKILL
1481          * or no valid public or static random address apply.
1482          */
1483         flush_workqueue(hdev->req_workqueue);
1484
1485         /* For controllers not using the management interface and that
1486          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1487          * so that pairing works for them. Once the management interface
1488          * is in use this bit will be cleared again and userspace has
1489          * to explicitly enable it.
1490          */
1491         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1492             !hci_dev_test_flag(hdev, HCI_MGMT))
1493                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1494
1495         err = hci_dev_do_open(hdev);
1496
1497 done:
1498         hci_dev_put(hdev);
1499         return err;
1500 }
1501
1502 /* This function requires the caller holds hdev->lock */
1503 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1504 {
1505         struct hci_conn_params *p;
1506
1507         list_for_each_entry(p, &hdev->le_conn_params, list) {
1508                 if (p->conn) {
1509                         hci_conn_drop(p->conn);
1510                         hci_conn_put(p->conn);
1511                         p->conn = NULL;
1512                 }
1513                 list_del_init(&p->action);
1514         }
1515
1516         BT_DBG("All LE pending actions cleared");
1517 }
1518
1519 int hci_dev_do_close(struct hci_dev *hdev)
1520 {
1521         bool auto_off;
1522
1523         BT_DBG("%s %p", hdev->name, hdev);
1524
1525         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1526             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1527             test_bit(HCI_UP, &hdev->flags)) {
1528                 /* Execute vendor specific shutdown routine */
1529                 if (hdev->shutdown)
1530                         hdev->shutdown(hdev);
1531         }
1532
1533         cancel_delayed_work(&hdev->power_off);
1534
1535         hci_request_cancel_all(hdev);
1536         hci_req_sync_lock(hdev);
1537
1538         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1539                 cancel_delayed_work_sync(&hdev->cmd_timer);
1540                 hci_req_sync_unlock(hdev);
1541                 return 0;
1542         }
1543
1544         hci_leds_update_powered(hdev, false);
1545
1546         /* Flush RX and TX works */
1547         flush_work(&hdev->tx_work);
1548         flush_work(&hdev->rx_work);
1549
1550         if (hdev->discov_timeout > 0) {
1551                 hdev->discov_timeout = 0;
1552                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1553                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1554         }
1555
1556         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1557                 cancel_delayed_work(&hdev->service_cache);
1558
1559         if (hci_dev_test_flag(hdev, HCI_MGMT))
1560                 cancel_delayed_work_sync(&hdev->rpa_expired);
1561
1562         /* Avoid potential lockdep warnings from the *_flush() calls by
1563          * ensuring the workqueue is empty up front.
1564          */
1565         drain_workqueue(hdev->workqueue);
1566
1567         hci_dev_lock(hdev);
1568
1569         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1570
1571         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1572
1573         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1574             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1575             hci_dev_test_flag(hdev, HCI_MGMT))
1576                 __mgmt_power_off(hdev);
1577
1578         hci_inquiry_cache_flush(hdev);
1579         hci_pend_le_actions_clear(hdev);
1580         hci_conn_hash_flush(hdev);
1581         hci_dev_unlock(hdev);
1582
1583         smp_unregister(hdev);
1584
1585         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1586
1587         if (hdev->flush)
1588                 hdev->flush(hdev);
1589
1590         /* Reset device */
1591         skb_queue_purge(&hdev->cmd_q);
1592         atomic_set(&hdev->cmd_cnt, 1);
1593         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1594             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1595                 set_bit(HCI_INIT, &hdev->flags);
1596                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1597                 clear_bit(HCI_INIT, &hdev->flags);
1598         }
1599
1600         /* flush cmd  work */
1601         flush_work(&hdev->cmd_work);
1602
1603         /* Drop queues */
1604         skb_queue_purge(&hdev->rx_q);
1605         skb_queue_purge(&hdev->cmd_q);
1606         skb_queue_purge(&hdev->raw_q);
1607
1608         /* Drop last sent command */
1609         if (hdev->sent_cmd) {
1610                 cancel_delayed_work_sync(&hdev->cmd_timer);
1611                 kfree_skb(hdev->sent_cmd);
1612                 hdev->sent_cmd = NULL;
1613         }
1614
1615         clear_bit(HCI_RUNNING, &hdev->flags);
1616         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1617
1618         /* After this point our queues are empty
1619          * and no tasks are scheduled. */
1620         hdev->close(hdev);
1621
1622         /* Clear flags */
1623         hdev->flags &= BIT(HCI_RAW);
1624         hci_dev_clear_volatile_flags(hdev);
1625
1626         /* Controller radio is available but is currently powered down */
1627         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1628
1629         memset(hdev->eir, 0, sizeof(hdev->eir));
1630         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1631         bacpy(&hdev->random_addr, BDADDR_ANY);
1632
1633         hci_req_sync_unlock(hdev);
1634
1635         hci_dev_put(hdev);
1636         return 0;
1637 }
1638
1639 int hci_dev_close(__u16 dev)
1640 {
1641         struct hci_dev *hdev;
1642         int err;
1643
1644         hdev = hci_dev_get(dev);
1645         if (!hdev)
1646                 return -ENODEV;
1647
1648         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1649                 err = -EBUSY;
1650                 goto done;
1651         }
1652
1653         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1654                 cancel_delayed_work(&hdev->power_off);
1655
1656         err = hci_dev_do_close(hdev);
1657
1658 done:
1659         hci_dev_put(hdev);
1660         return err;
1661 }
1662
1663 static int hci_dev_do_reset(struct hci_dev *hdev)
1664 {
1665         int ret;
1666
1667         BT_DBG("%s %p", hdev->name, hdev);
1668
1669         hci_req_sync_lock(hdev);
1670
1671         /* Drop queues */
1672         skb_queue_purge(&hdev->rx_q);
1673         skb_queue_purge(&hdev->cmd_q);
1674
1675         /* Avoid potential lockdep warnings from the *_flush() calls by
1676          * ensuring the workqueue is empty up front.
1677          */
1678         drain_workqueue(hdev->workqueue);
1679
1680         hci_dev_lock(hdev);
1681         hci_inquiry_cache_flush(hdev);
1682         hci_conn_hash_flush(hdev);
1683         hci_dev_unlock(hdev);
1684
1685         if (hdev->flush)
1686                 hdev->flush(hdev);
1687
1688         atomic_set(&hdev->cmd_cnt, 1);
1689         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1690
1691         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1692
1693         hci_req_sync_unlock(hdev);
1694         return ret;
1695 }
1696
1697 int hci_dev_reset(__u16 dev)
1698 {
1699         struct hci_dev *hdev;
1700         int err;
1701
1702         hdev = hci_dev_get(dev);
1703         if (!hdev)
1704                 return -ENODEV;
1705
1706         if (!test_bit(HCI_UP, &hdev->flags)) {
1707                 err = -ENETDOWN;
1708                 goto done;
1709         }
1710
1711         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1712                 err = -EBUSY;
1713                 goto done;
1714         }
1715
1716         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1717                 err = -EOPNOTSUPP;
1718                 goto done;
1719         }
1720
1721         err = hci_dev_do_reset(hdev);
1722
1723 done:
1724         hci_dev_put(hdev);
1725         return err;
1726 }
1727
1728 int hci_dev_reset_stat(__u16 dev)
1729 {
1730         struct hci_dev *hdev;
1731         int ret = 0;
1732
1733         hdev = hci_dev_get(dev);
1734         if (!hdev)
1735                 return -ENODEV;
1736
1737         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1738                 ret = -EBUSY;
1739                 goto done;
1740         }
1741
1742         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1743                 ret = -EOPNOTSUPP;
1744                 goto done;
1745         }
1746
1747         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1748
1749 done:
1750         hci_dev_put(hdev);
1751         return ret;
1752 }
1753
1754 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1755 {
1756         bool conn_changed, discov_changed;
1757
1758         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1759
1760         if ((scan & SCAN_PAGE))
1761                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1762                                                           HCI_CONNECTABLE);
1763         else
1764                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1765                                                            HCI_CONNECTABLE);
1766
1767         if ((scan & SCAN_INQUIRY)) {
1768                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1769                                                             HCI_DISCOVERABLE);
1770         } else {
1771                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1772                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1773                                                              HCI_DISCOVERABLE);
1774         }
1775
1776         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1777                 return;
1778
1779         if (conn_changed || discov_changed) {
1780                 /* In case this was disabled through mgmt */
1781                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1782
1783                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1784                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1785
1786                 mgmt_new_settings(hdev);
1787         }
1788 }
1789
1790 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1791 {
1792         struct hci_dev *hdev;
1793         struct hci_dev_req dr;
1794         int err = 0;
1795
1796         if (copy_from_user(&dr, arg, sizeof(dr)))
1797                 return -EFAULT;
1798
1799         hdev = hci_dev_get(dr.dev_id);
1800         if (!hdev)
1801                 return -ENODEV;
1802
1803         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1804                 err = -EBUSY;
1805                 goto done;
1806         }
1807
1808         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1809                 err = -EOPNOTSUPP;
1810                 goto done;
1811         }
1812
1813         if (hdev->dev_type != HCI_PRIMARY) {
1814                 err = -EOPNOTSUPP;
1815                 goto done;
1816         }
1817
1818         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1819                 err = -EOPNOTSUPP;
1820                 goto done;
1821         }
1822
1823         switch (cmd) {
1824         case HCISETAUTH:
1825                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1826                                    HCI_INIT_TIMEOUT, NULL);
1827                 break;
1828
1829         case HCISETENCRYPT:
1830                 if (!lmp_encrypt_capable(hdev)) {
1831                         err = -EOPNOTSUPP;
1832                         break;
1833                 }
1834
1835                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1836                         /* Auth must be enabled first */
1837                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1838                                            HCI_INIT_TIMEOUT, NULL);
1839                         if (err)
1840                                 break;
1841                 }
1842
1843                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1844                                    HCI_INIT_TIMEOUT, NULL);
1845                 break;
1846
1847         case HCISETSCAN:
1848                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1849                                    HCI_INIT_TIMEOUT, NULL);
1850
1851                 /* Ensure that the connectable and discoverable states
1852                  * get correctly modified as this was a non-mgmt change.
1853                  */
1854                 if (!err)
1855                         hci_update_scan_state(hdev, dr.dev_opt);
1856                 break;
1857
1858         case HCISETLINKPOL:
1859                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1860                                    HCI_INIT_TIMEOUT, NULL);
1861                 break;
1862
1863         case HCISETLINKMODE:
1864                 hdev->link_mode = ((__u16) dr.dev_opt) &
1865                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1866                 break;
1867
1868         case HCISETPTYPE:
1869                 hdev->pkt_type = (__u16) dr.dev_opt;
1870                 break;
1871
1872         case HCISETACLMTU:
1873                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1874                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1875                 break;
1876
1877         case HCISETSCOMTU:
1878                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1879                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1880                 break;
1881
1882         default:
1883                 err = -EINVAL;
1884                 break;
1885         }
1886
1887 done:
1888         hci_dev_put(hdev);
1889         return err;
1890 }
1891
1892 int hci_get_dev_list(void __user *arg)
1893 {
1894         struct hci_dev *hdev;
1895         struct hci_dev_list_req *dl;
1896         struct hci_dev_req *dr;
1897         int n = 0, size, err;
1898         __u16 dev_num;
1899
1900         if (get_user(dev_num, (__u16 __user *) arg))
1901                 return -EFAULT;
1902
1903         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1904                 return -EINVAL;
1905
1906         size = sizeof(*dl) + dev_num * sizeof(*dr);
1907
1908         dl = kzalloc(size, GFP_KERNEL);
1909         if (!dl)
1910                 return -ENOMEM;
1911
1912         dr = dl->dev_req;
1913
1914         read_lock(&hci_dev_list_lock);
1915         list_for_each_entry(hdev, &hci_dev_list, list) {
1916                 unsigned long flags = hdev->flags;
1917
1918                 /* When the auto-off is configured it means the transport
1919                  * is running, but in that case still indicate that the
1920                  * device is actually down.
1921                  */
1922                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1923                         flags &= ~BIT(HCI_UP);
1924
1925                 (dr + n)->dev_id  = hdev->id;
1926                 (dr + n)->dev_opt = flags;
1927
1928                 if (++n >= dev_num)
1929                         break;
1930         }
1931         read_unlock(&hci_dev_list_lock);
1932
1933         dl->dev_num = n;
1934         size = sizeof(*dl) + n * sizeof(*dr);
1935
1936         err = copy_to_user(arg, dl, size);
1937         kfree(dl);
1938
1939         return err ? -EFAULT : 0;
1940 }
1941
1942 int hci_get_dev_info(void __user *arg)
1943 {
1944         struct hci_dev *hdev;
1945         struct hci_dev_info di;
1946         unsigned long flags;
1947         int err = 0;
1948
1949         if (copy_from_user(&di, arg, sizeof(di)))
1950                 return -EFAULT;
1951
1952         hdev = hci_dev_get(di.dev_id);
1953         if (!hdev)
1954                 return -ENODEV;
1955
1956         /* When the auto-off is configured it means the transport
1957          * is running, but in that case still indicate that the
1958          * device is actually down.
1959          */
1960         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1961                 flags = hdev->flags & ~BIT(HCI_UP);
1962         else
1963                 flags = hdev->flags;
1964
1965         strcpy(di.name, hdev->name);
1966         di.bdaddr   = hdev->bdaddr;
1967         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1968         di.flags    = flags;
1969         di.pkt_type = hdev->pkt_type;
1970         if (lmp_bredr_capable(hdev)) {
1971                 di.acl_mtu  = hdev->acl_mtu;
1972                 di.acl_pkts = hdev->acl_pkts;
1973                 di.sco_mtu  = hdev->sco_mtu;
1974                 di.sco_pkts = hdev->sco_pkts;
1975         } else {
1976                 di.acl_mtu  = hdev->le_mtu;
1977                 di.acl_pkts = hdev->le_pkts;
1978                 di.sco_mtu  = 0;
1979                 di.sco_pkts = 0;
1980         }
1981         di.link_policy = hdev->link_policy;
1982         di.link_mode   = hdev->link_mode;
1983
1984         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1985         memcpy(&di.features, &hdev->features, sizeof(di.features));
1986
1987         if (copy_to_user(arg, &di, sizeof(di)))
1988                 err = -EFAULT;
1989
1990         hci_dev_put(hdev);
1991
1992         return err;
1993 }
1994
1995 /* ---- Interface to HCI drivers ---- */
1996
1997 static int hci_rfkill_set_block(void *data, bool blocked)
1998 {
1999         struct hci_dev *hdev = data;
2000
2001         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2002
2003         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2004                 return -EBUSY;
2005
2006         if (blocked) {
2007                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2008                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2009                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2010                         hci_dev_do_close(hdev);
2011         } else {
2012                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2013         }
2014
2015         return 0;
2016 }
2017
2018 static const struct rfkill_ops hci_rfkill_ops = {
2019         .set_block = hci_rfkill_set_block,
2020 };
2021
2022 static void hci_power_on(struct work_struct *work)
2023 {
2024         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2025         int err;
2026
2027         BT_DBG("%s", hdev->name);
2028
2029         if (test_bit(HCI_UP, &hdev->flags) &&
2030             hci_dev_test_flag(hdev, HCI_MGMT) &&
2031             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2032                 cancel_delayed_work(&hdev->power_off);
2033                 hci_req_sync_lock(hdev);
2034                 err = __hci_req_hci_power_on(hdev);
2035                 hci_req_sync_unlock(hdev);
2036                 mgmt_power_on(hdev, err);
2037                 return;
2038         }
2039
2040         err = hci_dev_do_open(hdev);
2041         if (err < 0) {
2042                 hci_dev_lock(hdev);
2043                 mgmt_set_powered_failed(hdev, err);
2044                 hci_dev_unlock(hdev);
2045                 return;
2046         }
2047
2048         /* During the HCI setup phase, a few error conditions are
2049          * ignored and they need to be checked now. If they are still
2050          * valid, it is important to turn the device back off.
2051          */
2052         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2053             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2054             (hdev->dev_type == HCI_PRIMARY &&
2055              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2056              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2057                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2058                 hci_dev_do_close(hdev);
2059         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2060                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2061                                    HCI_AUTO_OFF_TIMEOUT);
2062         }
2063
2064         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2065                 /* For unconfigured devices, set the HCI_RAW flag
2066                  * so that userspace can easily identify them.
2067                  */
2068                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2069                         set_bit(HCI_RAW, &hdev->flags);
2070
2071                 /* For fully configured devices, this will send
2072                  * the Index Added event. For unconfigured devices,
2073                  * it will send Unconfigued Index Added event.
2074                  *
2075                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2076                  * and no event will be send.
2077                  */
2078                 mgmt_index_added(hdev);
2079         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2080                 /* When the controller is now configured, then it
2081                  * is important to clear the HCI_RAW flag.
2082                  */
2083                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2084                         clear_bit(HCI_RAW, &hdev->flags);
2085
2086                 /* Powering on the controller with HCI_CONFIG set only
2087                  * happens with the transition from unconfigured to
2088                  * configured. This will send the Index Added event.
2089                  */
2090                 mgmt_index_added(hdev);
2091         }
2092 }
2093
2094 static void hci_power_off(struct work_struct *work)
2095 {
2096         struct hci_dev *hdev = container_of(work, struct hci_dev,
2097                                             power_off.work);
2098
2099         BT_DBG("%s", hdev->name);
2100
2101         hci_dev_do_close(hdev);
2102 }
2103
2104 static void hci_error_reset(struct work_struct *work)
2105 {
2106         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2107
2108         BT_DBG("%s", hdev->name);
2109
2110         if (hdev->hw_error)
2111                 hdev->hw_error(hdev, hdev->hw_error_code);
2112         else
2113                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2114                        hdev->hw_error_code);
2115
2116         if (hci_dev_do_close(hdev))
2117                 return;
2118
2119         hci_dev_do_open(hdev);
2120 }
2121
2122 void hci_uuids_clear(struct hci_dev *hdev)
2123 {
2124         struct bt_uuid *uuid, *tmp;
2125
2126         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2127                 list_del(&uuid->list);
2128                 kfree(uuid);
2129         }
2130 }
2131
2132 void hci_link_keys_clear(struct hci_dev *hdev)
2133 {
2134         struct link_key *key;
2135
2136         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2137                 list_del_rcu(&key->list);
2138                 kfree_rcu(key, rcu);
2139         }
2140 }
2141
2142 void hci_smp_ltks_clear(struct hci_dev *hdev)
2143 {
2144         struct smp_ltk *k;
2145
2146         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2147                 list_del_rcu(&k->list);
2148                 kfree_rcu(k, rcu);
2149         }
2150 }
2151
2152 void hci_smp_irks_clear(struct hci_dev *hdev)
2153 {
2154         struct smp_irk *k;
2155
2156         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2157                 list_del_rcu(&k->list);
2158                 kfree_rcu(k, rcu);
2159         }
2160 }
2161
2162 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2163 {
2164         struct link_key *k;
2165
2166         rcu_read_lock();
2167         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2168                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2169                         rcu_read_unlock();
2170                         return k;
2171                 }
2172         }
2173         rcu_read_unlock();
2174
2175         return NULL;
2176 }
2177
2178 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2179                                u8 key_type, u8 old_key_type)
2180 {
2181         /* Legacy key */
2182         if (key_type < 0x03)
2183                 return true;
2184
2185         /* Debug keys are insecure so don't store them persistently */
2186         if (key_type == HCI_LK_DEBUG_COMBINATION)
2187                 return false;
2188
2189         /* Changed combination key and there's no previous one */
2190         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2191                 return false;
2192
2193         /* Security mode 3 case */
2194         if (!conn)
2195                 return true;
2196
2197         /* BR/EDR key derived using SC from an LE link */
2198         if (conn->type == LE_LINK)
2199                 return true;
2200
2201         /* Neither local nor remote side had no-bonding as requirement */
2202         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2203                 return true;
2204
2205         /* Local side had dedicated bonding as requirement */
2206         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2207                 return true;
2208
2209         /* Remote side had dedicated bonding as requirement */
2210         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2211                 return true;
2212
2213         /* If none of the above criteria match, then don't store the key
2214          * persistently */
2215         return false;
2216 }
2217
2218 static u8 ltk_role(u8 type)
2219 {
2220         if (type == SMP_LTK)
2221                 return HCI_ROLE_MASTER;
2222
2223         return HCI_ROLE_SLAVE;
2224 }
2225
2226 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2227                              u8 addr_type, u8 role)
2228 {
2229         struct smp_ltk *k;
2230
2231         rcu_read_lock();
2232         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2233                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2234                         continue;
2235
2236                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2237                         rcu_read_unlock();
2238                         return k;
2239                 }
2240         }
2241         rcu_read_unlock();
2242
2243         return NULL;
2244 }
2245
2246 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2247 {
2248         struct smp_irk *irk;
2249
2250         rcu_read_lock();
2251         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2252                 if (!bacmp(&irk->rpa, rpa)) {
2253                         rcu_read_unlock();
2254                         return irk;
2255                 }
2256         }
2257
2258         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2259                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2260                         bacpy(&irk->rpa, rpa);
2261                         rcu_read_unlock();
2262                         return irk;
2263                 }
2264         }
2265         rcu_read_unlock();
2266
2267         return NULL;
2268 }
2269
2270 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271                                      u8 addr_type)
2272 {
2273         struct smp_irk *irk;
2274
2275         /* Identity Address must be public or static random */
2276         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2277                 return NULL;
2278
2279         rcu_read_lock();
2280         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2281                 if (addr_type == irk->addr_type &&
2282                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2283                         rcu_read_unlock();
2284                         return irk;
2285                 }
2286         }
2287         rcu_read_unlock();
2288
2289         return NULL;
2290 }
2291
2292 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2293                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2294                                   u8 pin_len, bool *persistent)
2295 {
2296         struct link_key *key, *old_key;
2297         u8 old_key_type;
2298
2299         old_key = hci_find_link_key(hdev, bdaddr);
2300         if (old_key) {
2301                 old_key_type = old_key->type;
2302                 key = old_key;
2303         } else {
2304                 old_key_type = conn ? conn->key_type : 0xff;
2305                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2306                 if (!key)
2307                         return NULL;
2308                 list_add_rcu(&key->list, &hdev->link_keys);
2309         }
2310
2311         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2312
2313         /* Some buggy controller combinations generate a changed
2314          * combination key for legacy pairing even when there's no
2315          * previous key */
2316         if (type == HCI_LK_CHANGED_COMBINATION &&
2317             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2318                 type = HCI_LK_COMBINATION;
2319                 if (conn)
2320                         conn->key_type = type;
2321         }
2322
2323         bacpy(&key->bdaddr, bdaddr);
2324         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2325         key->pin_len = pin_len;
2326
2327         if (type == HCI_LK_CHANGED_COMBINATION)
2328                 key->type = old_key_type;
2329         else
2330                 key->type = type;
2331
2332         if (persistent)
2333                 *persistent = hci_persistent_key(hdev, conn, type,
2334                                                  old_key_type);
2335
2336         return key;
2337 }
2338
2339 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2340                             u8 addr_type, u8 type, u8 authenticated,
2341                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2342 {
2343         struct smp_ltk *key, *old_key;
2344         u8 role = ltk_role(type);
2345
2346         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2347         if (old_key)
2348                 key = old_key;
2349         else {
2350                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2351                 if (!key)
2352                         return NULL;
2353                 list_add_rcu(&key->list, &hdev->long_term_keys);
2354         }
2355
2356         bacpy(&key->bdaddr, bdaddr);
2357         key->bdaddr_type = addr_type;
2358         memcpy(key->val, tk, sizeof(key->val));
2359         key->authenticated = authenticated;
2360         key->ediv = ediv;
2361         key->rand = rand;
2362         key->enc_size = enc_size;
2363         key->type = type;
2364
2365         return key;
2366 }
2367
2368 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2369                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2370 {
2371         struct smp_irk *irk;
2372
2373         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2374         if (!irk) {
2375                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2376                 if (!irk)
2377                         return NULL;
2378
2379                 bacpy(&irk->bdaddr, bdaddr);
2380                 irk->addr_type = addr_type;
2381
2382                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2383         }
2384
2385         memcpy(irk->val, val, 16);
2386         bacpy(&irk->rpa, rpa);
2387
2388         return irk;
2389 }
2390
2391 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2392 {
2393         struct link_key *key;
2394
2395         key = hci_find_link_key(hdev, bdaddr);
2396         if (!key)
2397                 return -ENOENT;
2398
2399         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2400
2401         list_del_rcu(&key->list);
2402         kfree_rcu(key, rcu);
2403
2404         return 0;
2405 }
2406
2407 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2408 {
2409         struct smp_ltk *k;
2410         int removed = 0;
2411
2412         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2413                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2414                         continue;
2415
2416                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2417
2418                 list_del_rcu(&k->list);
2419                 kfree_rcu(k, rcu);
2420                 removed++;
2421         }
2422
2423         return removed ? 0 : -ENOENT;
2424 }
2425
2426 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2427 {
2428         struct smp_irk *k;
2429
2430         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2431                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2432                         continue;
2433
2434                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2435
2436                 list_del_rcu(&k->list);
2437                 kfree_rcu(k, rcu);
2438         }
2439 }
2440
2441 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2442 {
2443         struct smp_ltk *k;
2444         struct smp_irk *irk;
2445         u8 addr_type;
2446
2447         if (type == BDADDR_BREDR) {
2448                 if (hci_find_link_key(hdev, bdaddr))
2449                         return true;
2450                 return false;
2451         }
2452
2453         /* Convert to HCI addr type which struct smp_ltk uses */
2454         if (type == BDADDR_LE_PUBLIC)
2455                 addr_type = ADDR_LE_DEV_PUBLIC;
2456         else
2457                 addr_type = ADDR_LE_DEV_RANDOM;
2458
2459         irk = hci_get_irk(hdev, bdaddr, addr_type);
2460         if (irk) {
2461                 bdaddr = &irk->bdaddr;
2462                 addr_type = irk->addr_type;
2463         }
2464
2465         rcu_read_lock();
2466         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2467                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2468                         rcu_read_unlock();
2469                         return true;
2470                 }
2471         }
2472         rcu_read_unlock();
2473
2474         return false;
2475 }
2476
2477 /* HCI command timer function */
2478 static void hci_cmd_timeout(struct work_struct *work)
2479 {
2480         struct hci_dev *hdev = container_of(work, struct hci_dev,
2481                                             cmd_timer.work);
2482
2483         if (hdev->sent_cmd) {
2484                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2485                 u16 opcode = __le16_to_cpu(sent->opcode);
2486
2487                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2488         } else {
2489                 BT_ERR("%s command tx timeout", hdev->name);
2490         }
2491
2492         atomic_set(&hdev->cmd_cnt, 1);
2493         queue_work(hdev->workqueue, &hdev->cmd_work);
2494 }
2495
2496 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2497                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2498 {
2499         struct oob_data *data;
2500
2501         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2502                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2503                         continue;
2504                 if (data->bdaddr_type != bdaddr_type)
2505                         continue;
2506                 return data;
2507         }
2508
2509         return NULL;
2510 }
2511
2512 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2513                                u8 bdaddr_type)
2514 {
2515         struct oob_data *data;
2516
2517         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2518         if (!data)
2519                 return -ENOENT;
2520
2521         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2522
2523         list_del(&data->list);
2524         kfree(data);
2525
2526         return 0;
2527 }
2528
2529 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2530 {
2531         struct oob_data *data, *n;
2532
2533         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2534                 list_del(&data->list);
2535                 kfree(data);
2536         }
2537 }
2538
2539 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2540                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2541                             u8 *hash256, u8 *rand256)
2542 {
2543         struct oob_data *data;
2544
2545         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2546         if (!data) {
2547                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2548                 if (!data)
2549                         return -ENOMEM;
2550
2551                 bacpy(&data->bdaddr, bdaddr);
2552                 data->bdaddr_type = bdaddr_type;
2553                 list_add(&data->list, &hdev->remote_oob_data);
2554         }
2555
2556         if (hash192 && rand192) {
2557                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2558                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2559                 if (hash256 && rand256)
2560                         data->present = 0x03;
2561         } else {
2562                 memset(data->hash192, 0, sizeof(data->hash192));
2563                 memset(data->rand192, 0, sizeof(data->rand192));
2564                 if (hash256 && rand256)
2565                         data->present = 0x02;
2566                 else
2567                         data->present = 0x00;
2568         }
2569
2570         if (hash256 && rand256) {
2571                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2572                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2573         } else {
2574                 memset(data->hash256, 0, sizeof(data->hash256));
2575                 memset(data->rand256, 0, sizeof(data->rand256));
2576                 if (hash192 && rand192)
2577                         data->present = 0x01;
2578         }
2579
2580         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2581
2582         return 0;
2583 }
2584
2585 /* This function requires the caller holds hdev->lock */
2586 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2587 {
2588         struct adv_info *adv_instance;
2589
2590         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2591                 if (adv_instance->instance == instance)
2592                         return adv_instance;
2593         }
2594
2595         return NULL;
2596 }
2597
2598 /* This function requires the caller holds hdev->lock */
2599 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2600 {
2601         struct adv_info *cur_instance;
2602
2603         cur_instance = hci_find_adv_instance(hdev, instance);
2604         if (!cur_instance)
2605                 return NULL;
2606
2607         if (cur_instance == list_last_entry(&hdev->adv_instances,
2608                                             struct adv_info, list))
2609                 return list_first_entry(&hdev->adv_instances,
2610                                                  struct adv_info, list);
2611         else
2612                 return list_next_entry(cur_instance, list);
2613 }
2614
2615 /* This function requires the caller holds hdev->lock */
2616 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2617 {
2618         struct adv_info *adv_instance;
2619
2620         adv_instance = hci_find_adv_instance(hdev, instance);
2621         if (!adv_instance)
2622                 return -ENOENT;
2623
2624         BT_DBG("%s removing %dMR", hdev->name, instance);
2625
2626         if (hdev->cur_adv_instance == instance) {
2627                 if (hdev->adv_instance_timeout) {
2628                         cancel_delayed_work(&hdev->adv_instance_expire);
2629                         hdev->adv_instance_timeout = 0;
2630                 }
2631                 hdev->cur_adv_instance = 0x00;
2632         }
2633
2634         list_del(&adv_instance->list);
2635         kfree(adv_instance);
2636
2637         hdev->adv_instance_cnt--;
2638
2639         return 0;
2640 }
2641
2642 /* This function requires the caller holds hdev->lock */
2643 void hci_adv_instances_clear(struct hci_dev *hdev)
2644 {
2645         struct adv_info *adv_instance, *n;
2646
2647         if (hdev->adv_instance_timeout) {
2648                 cancel_delayed_work(&hdev->adv_instance_expire);
2649                 hdev->adv_instance_timeout = 0;
2650         }
2651
2652         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2653                 list_del(&adv_instance->list);
2654                 kfree(adv_instance);
2655         }
2656
2657         hdev->adv_instance_cnt = 0;
2658         hdev->cur_adv_instance = 0x00;
2659 }
2660
2661 /* This function requires the caller holds hdev->lock */
2662 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2663                          u16 adv_data_len, u8 *adv_data,
2664                          u16 scan_rsp_len, u8 *scan_rsp_data,
2665                          u16 timeout, u16 duration)
2666 {
2667         struct adv_info *adv_instance;
2668
2669         adv_instance = hci_find_adv_instance(hdev, instance);
2670         if (adv_instance) {
2671                 memset(adv_instance->adv_data, 0,
2672                        sizeof(adv_instance->adv_data));
2673                 memset(adv_instance->scan_rsp_data, 0,
2674                        sizeof(adv_instance->scan_rsp_data));
2675         } else {
2676                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2677                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2678                         return -EOVERFLOW;
2679
2680                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2681                 if (!adv_instance)
2682                         return -ENOMEM;
2683
2684                 adv_instance->pending = true;
2685                 adv_instance->instance = instance;
2686                 list_add(&adv_instance->list, &hdev->adv_instances);
2687                 hdev->adv_instance_cnt++;
2688         }
2689
2690         adv_instance->flags = flags;
2691         adv_instance->adv_data_len = adv_data_len;
2692         adv_instance->scan_rsp_len = scan_rsp_len;
2693
2694         if (adv_data_len)
2695                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2696
2697         if (scan_rsp_len)
2698                 memcpy(adv_instance->scan_rsp_data,
2699                        scan_rsp_data, scan_rsp_len);
2700
2701         adv_instance->timeout = timeout;
2702         adv_instance->remaining_time = timeout;
2703
2704         if (duration == 0)
2705                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2706         else
2707                 adv_instance->duration = duration;
2708
2709         BT_DBG("%s for %dMR", hdev->name, instance);
2710
2711         return 0;
2712 }
2713
2714 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2715                                          bdaddr_t *bdaddr, u8 type)
2716 {
2717         struct bdaddr_list *b;
2718
2719         list_for_each_entry(b, bdaddr_list, list) {
2720                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2721                         return b;
2722         }
2723
2724         return NULL;
2725 }
2726
2727 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2728 {
2729         struct bdaddr_list *b, *n;
2730
2731         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2732                 list_del(&b->list);
2733                 kfree(b);
2734         }
2735 }
2736
2737 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2738 {
2739         struct bdaddr_list *entry;
2740
2741         if (!bacmp(bdaddr, BDADDR_ANY))
2742                 return -EBADF;
2743
2744         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2745                 return -EEXIST;
2746
2747         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2748         if (!entry)
2749                 return -ENOMEM;
2750
2751         bacpy(&entry->bdaddr, bdaddr);
2752         entry->bdaddr_type = type;
2753
2754         list_add(&entry->list, list);
2755
2756         return 0;
2757 }
2758
2759 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2760 {
2761         struct bdaddr_list *entry;
2762
2763         if (!bacmp(bdaddr, BDADDR_ANY)) {
2764                 hci_bdaddr_list_clear(list);
2765                 return 0;
2766         }
2767
2768         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2769         if (!entry)
2770                 return -ENOENT;
2771
2772         list_del(&entry->list);
2773         kfree(entry);
2774
2775         return 0;
2776 }
2777
2778 /* This function requires the caller holds hdev->lock */
2779 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2780                                                bdaddr_t *addr, u8 addr_type)
2781 {
2782         struct hci_conn_params *params;
2783
2784         list_for_each_entry(params, &hdev->le_conn_params, list) {
2785                 if (bacmp(&params->addr, addr) == 0 &&
2786                     params->addr_type == addr_type) {
2787                         return params;
2788                 }
2789         }
2790
2791         return NULL;
2792 }
2793
2794 /* This function requires the caller holds hdev->lock */
2795 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2796                                                   bdaddr_t *addr, u8 addr_type)
2797 {
2798         struct hci_conn_params *param;
2799
2800         list_for_each_entry(param, list, action) {
2801                 if (bacmp(&param->addr, addr) == 0 &&
2802                     param->addr_type == addr_type)
2803                         return param;
2804         }
2805
2806         return NULL;
2807 }
2808
2809 /* This function requires the caller holds hdev->lock */
2810 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2811                                             bdaddr_t *addr, u8 addr_type)
2812 {
2813         struct hci_conn_params *params;
2814
2815         params = hci_conn_params_lookup(hdev, addr, addr_type);
2816         if (params)
2817                 return params;
2818
2819         params = kzalloc(sizeof(*params), GFP_KERNEL);
2820         if (!params) {
2821                 BT_ERR("Out of memory");
2822                 return NULL;
2823         }
2824
2825         bacpy(&params->addr, addr);
2826         params->addr_type = addr_type;
2827
2828         list_add(&params->list, &hdev->le_conn_params);
2829         INIT_LIST_HEAD(&params->action);
2830
2831         params->conn_min_interval = hdev->le_conn_min_interval;
2832         params->conn_max_interval = hdev->le_conn_max_interval;
2833         params->conn_latency = hdev->le_conn_latency;
2834         params->supervision_timeout = hdev->le_supv_timeout;
2835         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2836
2837         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2838
2839         return params;
2840 }
2841
2842 static void hci_conn_params_free(struct hci_conn_params *params)
2843 {
2844         if (params->conn) {
2845                 hci_conn_drop(params->conn);
2846                 hci_conn_put(params->conn);
2847         }
2848
2849         list_del(&params->action);
2850         list_del(&params->list);
2851         kfree(params);
2852 }
2853
2854 /* This function requires the caller holds hdev->lock */
2855 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2856 {
2857         struct hci_conn_params *params;
2858
2859         params = hci_conn_params_lookup(hdev, addr, addr_type);
2860         if (!params)
2861                 return;
2862
2863         hci_conn_params_free(params);
2864
2865         hci_update_background_scan(hdev);
2866
2867         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2868 }
2869
2870 /* This function requires the caller holds hdev->lock */
2871 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2872 {
2873         struct hci_conn_params *params, *tmp;
2874
2875         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2876                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2877                         continue;
2878
2879                 /* If trying to estabilish one time connection to disabled
2880                  * device, leave the params, but mark them as just once.
2881                  */
2882                 if (params->explicit_connect) {
2883                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2884                         continue;
2885                 }
2886
2887                 list_del(&params->list);
2888                 kfree(params);
2889         }
2890
2891         BT_DBG("All LE disabled connection parameters were removed");
2892 }
2893
2894 /* This function requires the caller holds hdev->lock */
2895 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2896 {
2897         struct hci_conn_params *params, *tmp;
2898
2899         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2900                 hci_conn_params_free(params);
2901
2902         BT_DBG("All LE connection parameters were removed");
2903 }
2904
2905 /* Copy the Identity Address of the controller.
2906  *
2907  * If the controller has a public BD_ADDR, then by default use that one.
2908  * If this is a LE only controller without a public address, default to
2909  * the static random address.
2910  *
2911  * For debugging purposes it is possible to force controllers with a
2912  * public address to use the static random address instead.
2913  *
2914  * In case BR/EDR has been disabled on a dual-mode controller and
2915  * userspace has configured a static address, then that address
2916  * becomes the identity address instead of the public BR/EDR address.
2917  */
2918 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2919                                u8 *bdaddr_type)
2920 {
2921         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2922             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2923             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2924              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2925                 bacpy(bdaddr, &hdev->static_addr);
2926                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2927         } else {
2928                 bacpy(bdaddr, &hdev->bdaddr);
2929                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2930         }
2931 }
2932
2933 /* Alloc HCI device */
2934 struct hci_dev *hci_alloc_dev(void)
2935 {
2936         struct hci_dev *hdev;
2937
2938         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2939         if (!hdev)
2940                 return NULL;
2941
2942         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2943         hdev->esco_type = (ESCO_HV1);
2944         hdev->link_mode = (HCI_LM_ACCEPT);
2945         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2946         hdev->io_capability = 0x03;     /* No Input No Output */
2947         hdev->manufacturer = 0xffff;    /* Default to internal use */
2948         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2949         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2950         hdev->adv_instance_cnt = 0;
2951         hdev->cur_adv_instance = 0x00;
2952         hdev->adv_instance_timeout = 0;
2953
2954         hdev->sniff_max_interval = 800;
2955         hdev->sniff_min_interval = 80;
2956
2957         hdev->le_adv_channel_map = 0x07;
2958         hdev->le_adv_min_interval = 0x0800;
2959         hdev->le_adv_max_interval = 0x0800;
2960         hdev->le_scan_interval = 0x0060;
2961         hdev->le_scan_window = 0x0030;
2962         hdev->le_conn_min_interval = 0x0018;
2963         hdev->le_conn_max_interval = 0x0028;
2964         hdev->le_conn_latency = 0x0000;
2965         hdev->le_supv_timeout = 0x002a;
2966         hdev->le_def_tx_len = 0x001b;
2967         hdev->le_def_tx_time = 0x0148;
2968         hdev->le_max_tx_len = 0x001b;
2969         hdev->le_max_tx_time = 0x0148;
2970         hdev->le_max_rx_len = 0x001b;
2971         hdev->le_max_rx_time = 0x0148;
2972
2973         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2974         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2975         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2976         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2977
2978         mutex_init(&hdev->lock);
2979         mutex_init(&hdev->req_lock);
2980
2981         INIT_LIST_HEAD(&hdev->mgmt_pending);
2982         INIT_LIST_HEAD(&hdev->blacklist);
2983         INIT_LIST_HEAD(&hdev->whitelist);
2984         INIT_LIST_HEAD(&hdev->uuids);
2985         INIT_LIST_HEAD(&hdev->link_keys);
2986         INIT_LIST_HEAD(&hdev->long_term_keys);
2987         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2988         INIT_LIST_HEAD(&hdev->remote_oob_data);
2989         INIT_LIST_HEAD(&hdev->le_white_list);
2990         INIT_LIST_HEAD(&hdev->le_conn_params);
2991         INIT_LIST_HEAD(&hdev->pend_le_conns);
2992         INIT_LIST_HEAD(&hdev->pend_le_reports);
2993         INIT_LIST_HEAD(&hdev->conn_hash.list);
2994         INIT_LIST_HEAD(&hdev->adv_instances);
2995
2996         INIT_WORK(&hdev->rx_work, hci_rx_work);
2997         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2998         INIT_WORK(&hdev->tx_work, hci_tx_work);
2999         INIT_WORK(&hdev->power_on, hci_power_on);
3000         INIT_WORK(&hdev->error_reset, hci_error_reset);
3001
3002         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3003
3004         skb_queue_head_init(&hdev->rx_q);
3005         skb_queue_head_init(&hdev->cmd_q);
3006         skb_queue_head_init(&hdev->raw_q);
3007
3008         init_waitqueue_head(&hdev->req_wait_q);
3009
3010         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3011
3012         hci_request_setup(hdev);
3013
3014         hci_init_sysfs(hdev);
3015         discovery_init(hdev);
3016
3017         return hdev;
3018 }
3019 EXPORT_SYMBOL(hci_alloc_dev);
3020
3021 /* Free HCI device */
3022 void hci_free_dev(struct hci_dev *hdev)
3023 {
3024         /* will free via device release */
3025         put_device(&hdev->dev);
3026 }
3027 EXPORT_SYMBOL(hci_free_dev);
3028
3029 /* Register HCI device */
3030 int hci_register_dev(struct hci_dev *hdev)
3031 {
3032         int id, error;
3033
3034         if (!hdev->open || !hdev->close || !hdev->send)
3035                 return -EINVAL;
3036
3037         /* Do not allow HCI_AMP devices to register at index 0,
3038          * so the index can be used as the AMP controller ID.
3039          */
3040         switch (hdev->dev_type) {
3041         case HCI_PRIMARY:
3042                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3043                 break;
3044         case HCI_AMP:
3045                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3046                 break;
3047         default:
3048                 return -EINVAL;
3049         }
3050
3051         if (id < 0)
3052                 return id;
3053
3054         sprintf(hdev->name, "hci%d", id);
3055         hdev->id = id;
3056
3057         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3058
3059         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3060                                           WQ_MEM_RECLAIM, 1, hdev->name);
3061         if (!hdev->workqueue) {
3062                 error = -ENOMEM;
3063                 goto err;
3064         }
3065
3066         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3067                                               WQ_MEM_RECLAIM, 1, hdev->name);
3068         if (!hdev->req_workqueue) {
3069                 destroy_workqueue(hdev->workqueue);
3070                 error = -ENOMEM;
3071                 goto err;
3072         }
3073
3074         if (!IS_ERR_OR_NULL(bt_debugfs))
3075                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3076
3077         dev_set_name(&hdev->dev, "%s", hdev->name);
3078
3079         error = device_add(&hdev->dev);
3080         if (error < 0)
3081                 goto err_wqueue;
3082
3083         hci_leds_init(hdev);
3084
3085         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3086                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3087                                     hdev);
3088         if (hdev->rfkill) {
3089                 if (rfkill_register(hdev->rfkill) < 0) {
3090                         rfkill_destroy(hdev->rfkill);
3091                         hdev->rfkill = NULL;
3092                 }
3093         }
3094
3095         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3096                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3097
3098         hci_dev_set_flag(hdev, HCI_SETUP);
3099         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3100
3101         if (hdev->dev_type == HCI_PRIMARY) {
3102                 /* Assume BR/EDR support until proven otherwise (such as
3103                  * through reading supported features during init.
3104                  */
3105                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3106         }
3107
3108         write_lock(&hci_dev_list_lock);
3109         list_add(&hdev->list, &hci_dev_list);
3110         write_unlock(&hci_dev_list_lock);
3111
3112         /* Devices that are marked for raw-only usage are unconfigured
3113          * and should not be included in normal operation.
3114          */
3115         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3116                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3117
3118         hci_sock_dev_event(hdev, HCI_DEV_REG);
3119         hci_dev_hold(hdev);
3120
3121         queue_work(hdev->req_workqueue, &hdev->power_on);
3122
3123         return id;
3124
3125 err_wqueue:
3126         destroy_workqueue(hdev->workqueue);
3127         destroy_workqueue(hdev->req_workqueue);
3128 err:
3129         ida_simple_remove(&hci_index_ida, hdev->id);
3130
3131         return error;
3132 }
3133 EXPORT_SYMBOL(hci_register_dev);
3134
3135 /* Unregister HCI device */
3136 void hci_unregister_dev(struct hci_dev *hdev)
3137 {
3138         int id;
3139
3140         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3141
3142         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3143
3144         id = hdev->id;
3145
3146         write_lock(&hci_dev_list_lock);
3147         list_del(&hdev->list);
3148         write_unlock(&hci_dev_list_lock);
3149
3150         cancel_work_sync(&hdev->power_on);
3151
3152         hci_dev_do_close(hdev);
3153
3154         if (!test_bit(HCI_INIT, &hdev->flags) &&
3155             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3156             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3157                 hci_dev_lock(hdev);
3158                 mgmt_index_removed(hdev);
3159                 hci_dev_unlock(hdev);
3160         }
3161
3162         /* mgmt_index_removed should take care of emptying the
3163          * pending list */
3164         BUG_ON(!list_empty(&hdev->mgmt_pending));
3165
3166         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3167
3168         if (hdev->rfkill) {
3169                 rfkill_unregister(hdev->rfkill);
3170                 rfkill_destroy(hdev->rfkill);
3171         }
3172
3173         device_del(&hdev->dev);
3174
3175         debugfs_remove_recursive(hdev->debugfs);
3176         kfree_const(hdev->hw_info);
3177         kfree_const(hdev->fw_info);
3178
3179         destroy_workqueue(hdev->workqueue);
3180         destroy_workqueue(hdev->req_workqueue);
3181
3182         hci_dev_lock(hdev);
3183         hci_bdaddr_list_clear(&hdev->blacklist);
3184         hci_bdaddr_list_clear(&hdev->whitelist);
3185         hci_uuids_clear(hdev);
3186         hci_link_keys_clear(hdev);
3187         hci_smp_ltks_clear(hdev);
3188         hci_smp_irks_clear(hdev);
3189         hci_remote_oob_data_clear(hdev);
3190         hci_adv_instances_clear(hdev);
3191         hci_bdaddr_list_clear(&hdev->le_white_list);
3192         hci_conn_params_clear_all(hdev);
3193         hci_discovery_filter_clear(hdev);
3194         hci_dev_unlock(hdev);
3195
3196         hci_dev_put(hdev);
3197
3198         ida_simple_remove(&hci_index_ida, id);
3199 }
3200 EXPORT_SYMBOL(hci_unregister_dev);
3201
3202 /* Suspend HCI device */
3203 int hci_suspend_dev(struct hci_dev *hdev)
3204 {
3205         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3206         return 0;
3207 }
3208 EXPORT_SYMBOL(hci_suspend_dev);
3209
3210 /* Resume HCI device */
3211 int hci_resume_dev(struct hci_dev *hdev)
3212 {
3213         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3214         return 0;
3215 }
3216 EXPORT_SYMBOL(hci_resume_dev);
3217
3218 /* Reset HCI device */
3219 int hci_reset_dev(struct hci_dev *hdev)
3220 {
3221         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3222         struct sk_buff *skb;
3223
3224         skb = bt_skb_alloc(3, GFP_ATOMIC);
3225         if (!skb)
3226                 return -ENOMEM;
3227
3228         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3229         memcpy(skb_put(skb, 3), hw_err, 3);
3230
3231         /* Send Hardware Error to upper stack */
3232         return hci_recv_frame(hdev, skb);
3233 }
3234 EXPORT_SYMBOL(hci_reset_dev);
3235
3236 /* Receive frame from HCI drivers */
3237 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3238 {
3239         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3240                       && !test_bit(HCI_INIT, &hdev->flags))) {
3241                 kfree_skb(skb);
3242                 return -ENXIO;
3243         }
3244
3245         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3246             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3247             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3248                 kfree_skb(skb);
3249                 return -EINVAL;
3250         }
3251
3252         /* Incoming skb */
3253         bt_cb(skb)->incoming = 1;
3254
3255         /* Time stamp */
3256         __net_timestamp(skb);
3257
3258         skb_queue_tail(&hdev->rx_q, skb);
3259         queue_work(hdev->workqueue, &hdev->rx_work);
3260
3261         return 0;
3262 }
3263 EXPORT_SYMBOL(hci_recv_frame);
3264
3265 /* Receive diagnostic message from HCI drivers */
3266 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3267 {
3268         /* Mark as diagnostic packet */
3269         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3270
3271         /* Time stamp */
3272         __net_timestamp(skb);
3273
3274         skb_queue_tail(&hdev->rx_q, skb);
3275         queue_work(hdev->workqueue, &hdev->rx_work);
3276
3277         return 0;
3278 }
3279 EXPORT_SYMBOL(hci_recv_diag);
3280
3281 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3282 {
3283         va_list vargs;
3284
3285         va_start(vargs, fmt);
3286         kfree_const(hdev->hw_info);
3287         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3288         va_end(vargs);
3289 }
3290 EXPORT_SYMBOL(hci_set_hw_info);
3291
3292 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3293 {
3294         va_list vargs;
3295
3296         va_start(vargs, fmt);
3297         kfree_const(hdev->fw_info);
3298         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3299         va_end(vargs);
3300 }
3301 EXPORT_SYMBOL(hci_set_fw_info);
3302
3303 /* ---- Interface to upper protocols ---- */
3304
3305 int hci_register_cb(struct hci_cb *cb)
3306 {
3307         BT_DBG("%p name %s", cb, cb->name);
3308
3309         mutex_lock(&hci_cb_list_lock);
3310         list_add_tail(&cb->list, &hci_cb_list);
3311         mutex_unlock(&hci_cb_list_lock);
3312
3313         return 0;
3314 }
3315 EXPORT_SYMBOL(hci_register_cb);
3316
3317 int hci_unregister_cb(struct hci_cb *cb)
3318 {
3319         BT_DBG("%p name %s", cb, cb->name);
3320
3321         mutex_lock(&hci_cb_list_lock);
3322         list_del(&cb->list);
3323         mutex_unlock(&hci_cb_list_lock);
3324
3325         return 0;
3326 }
3327 EXPORT_SYMBOL(hci_unregister_cb);
3328
3329 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3330 {
3331         int err;
3332
3333         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3334                skb->len);
3335
3336         /* Time stamp */
3337         __net_timestamp(skb);
3338
3339         /* Send copy to monitor */
3340         hci_send_to_monitor(hdev, skb);
3341
3342         if (atomic_read(&hdev->promisc)) {
3343                 /* Send copy to the sockets */
3344                 hci_send_to_sock(hdev, skb);
3345         }
3346
3347         /* Get rid of skb owner, prior to sending to the driver. */
3348         skb_orphan(skb);
3349
3350         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3351                 kfree_skb(skb);
3352                 return;
3353         }
3354
3355         err = hdev->send(hdev, skb);
3356         if (err < 0) {
3357                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3358                 kfree_skb(skb);
3359         }
3360 }
3361
3362 /* Send HCI command */
3363 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3364                  const void *param)
3365 {
3366         struct sk_buff *skb;
3367
3368         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3369
3370         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3371         if (!skb) {
3372                 BT_ERR("%s no memory for command", hdev->name);
3373                 return -ENOMEM;
3374         }
3375
3376         /* Stand-alone HCI commands must be flagged as
3377          * single-command requests.
3378          */
3379         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3380
3381         skb_queue_tail(&hdev->cmd_q, skb);
3382         queue_work(hdev->workqueue, &hdev->cmd_work);
3383
3384         return 0;
3385 }
3386
3387 /* Get data from the previously sent command */
3388 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3389 {
3390         struct hci_command_hdr *hdr;
3391
3392         if (!hdev->sent_cmd)
3393                 return NULL;
3394
3395         hdr = (void *) hdev->sent_cmd->data;
3396
3397         if (hdr->opcode != cpu_to_le16(opcode))
3398                 return NULL;
3399
3400         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3401
3402         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3403 }
3404
3405 /* Send HCI command and wait for command commplete event */
3406 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3407                              const void *param, u32 timeout)
3408 {
3409         struct sk_buff *skb;
3410
3411         if (!test_bit(HCI_UP, &hdev->flags))
3412                 return ERR_PTR(-ENETDOWN);
3413
3414         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3415
3416         hci_req_sync_lock(hdev);
3417         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3418         hci_req_sync_unlock(hdev);
3419
3420         return skb;
3421 }
3422 EXPORT_SYMBOL(hci_cmd_sync);
3423
3424 /* Send ACL data */
3425 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3426 {
3427         struct hci_acl_hdr *hdr;
3428         int len = skb->len;
3429
3430         skb_push(skb, HCI_ACL_HDR_SIZE);
3431         skb_reset_transport_header(skb);
3432         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3433         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3434         hdr->dlen   = cpu_to_le16(len);
3435 }
3436
3437 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3438                           struct sk_buff *skb, __u16 flags)
3439 {
3440         struct hci_conn *conn = chan->conn;
3441         struct hci_dev *hdev = conn->hdev;
3442         struct sk_buff *list;
3443
3444         skb->len = skb_headlen(skb);
3445         skb->data_len = 0;
3446
3447         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3448
3449         switch (hdev->dev_type) {
3450         case HCI_PRIMARY:
3451                 hci_add_acl_hdr(skb, conn->handle, flags);
3452                 break;
3453         case HCI_AMP:
3454                 hci_add_acl_hdr(skb, chan->handle, flags);
3455                 break;
3456         default:
3457                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3458                 return;
3459         }
3460
3461         list = skb_shinfo(skb)->frag_list;
3462         if (!list) {
3463                 /* Non fragmented */
3464                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3465
3466                 skb_queue_tail(queue, skb);
3467         } else {
3468                 /* Fragmented */
3469                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3470
3471                 skb_shinfo(skb)->frag_list = NULL;
3472
3473                 /* Queue all fragments atomically. We need to use spin_lock_bh
3474                  * here because of 6LoWPAN links, as there this function is
3475                  * called from softirq and using normal spin lock could cause
3476                  * deadlocks.
3477                  */
3478                 spin_lock_bh(&queue->lock);
3479
3480                 __skb_queue_tail(queue, skb);
3481
3482                 flags &= ~ACL_START;
3483                 flags |= ACL_CONT;
3484                 do {
3485                         skb = list; list = list->next;
3486
3487                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3488                         hci_add_acl_hdr(skb, conn->handle, flags);
3489
3490                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3491
3492                         __skb_queue_tail(queue, skb);
3493                 } while (list);
3494
3495                 spin_unlock_bh(&queue->lock);
3496         }
3497 }
3498
3499 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3500 {
3501         struct hci_dev *hdev = chan->conn->hdev;
3502
3503         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3504
3505         hci_queue_acl(chan, &chan->data_q, skb, flags);
3506
3507         queue_work(hdev->workqueue, &hdev->tx_work);
3508 }
3509
3510 /* Send SCO data */
3511 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3512 {
3513         struct hci_dev *hdev = conn->hdev;
3514         struct hci_sco_hdr hdr;
3515
3516         BT_DBG("%s len %d", hdev->name, skb->len);
3517
3518         hdr.handle = cpu_to_le16(conn->handle);
3519         hdr.dlen   = skb->len;
3520
3521         skb_push(skb, HCI_SCO_HDR_SIZE);
3522         skb_reset_transport_header(skb);
3523         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3524
3525         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3526
3527         skb_queue_tail(&conn->data_q, skb);
3528         queue_work(hdev->workqueue, &hdev->tx_work);
3529 }
3530
3531 /* ---- HCI TX task (outgoing data) ---- */
3532
3533 /* HCI Connection scheduler */
3534 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3535                                      int *quote)
3536 {
3537         struct hci_conn_hash *h = &hdev->conn_hash;
3538         struct hci_conn *conn = NULL, *c;
3539         unsigned int num = 0, min = ~0;
3540
3541         /* We don't have to lock device here. Connections are always
3542          * added and removed with TX task disabled. */
3543
3544         rcu_read_lock();
3545
3546         list_for_each_entry_rcu(c, &h->list, list) {
3547                 if (c->type != type || skb_queue_empty(&c->data_q))
3548                         continue;
3549
3550                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3551                         continue;
3552
3553                 num++;
3554
3555                 if (c->sent < min) {
3556                         min  = c->sent;
3557                         conn = c;
3558                 }
3559
3560                 if (hci_conn_num(hdev, type) == num)
3561                         break;
3562         }
3563
3564         rcu_read_unlock();
3565
3566         if (conn) {
3567                 int cnt, q;
3568
3569                 switch (conn->type) {
3570                 case ACL_LINK:
3571                         cnt = hdev->acl_cnt;
3572                         break;
3573                 case SCO_LINK:
3574                 case ESCO_LINK:
3575                         cnt = hdev->sco_cnt;
3576                         break;
3577                 case LE_LINK:
3578                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3579                         break;
3580                 default:
3581                         cnt = 0;
3582                         BT_ERR("Unknown link type");
3583                 }
3584
3585                 q = cnt / num;
3586                 *quote = q ? q : 1;
3587         } else
3588                 *quote = 0;
3589
3590         BT_DBG("conn %p quote %d", conn, *quote);
3591         return conn;
3592 }
3593
3594 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3595 {
3596         struct hci_conn_hash *h = &hdev->conn_hash;
3597         struct hci_conn *c;
3598
3599         BT_ERR("%s link tx timeout", hdev->name);
3600
3601         rcu_read_lock();
3602
3603         /* Kill stalled connections */
3604         list_for_each_entry_rcu(c, &h->list, list) {
3605                 if (c->type == type && c->sent) {
3606                         BT_ERR("%s killing stalled connection %pMR",
3607                                hdev->name, &c->dst);
3608                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3609                 }
3610         }
3611
3612         rcu_read_unlock();
3613 }
3614
3615 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3616                                       int *quote)
3617 {
3618         struct hci_conn_hash *h = &hdev->conn_hash;
3619         struct hci_chan *chan = NULL;
3620         unsigned int num = 0, min = ~0, cur_prio = 0;
3621         struct hci_conn *conn;
3622         int cnt, q, conn_num = 0;
3623
3624         BT_DBG("%s", hdev->name);
3625
3626         rcu_read_lock();
3627
3628         list_for_each_entry_rcu(conn, &h->list, list) {
3629                 struct hci_chan *tmp;
3630
3631                 if (conn->type != type)
3632                         continue;
3633
3634                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3635                         continue;
3636
3637                 conn_num++;
3638
3639                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3640                         struct sk_buff *skb;
3641
3642                         if (skb_queue_empty(&tmp->data_q))
3643                                 continue;
3644
3645                         skb = skb_peek(&tmp->data_q);
3646                         if (skb->priority < cur_prio)
3647                                 continue;
3648
3649                         if (skb->priority > cur_prio) {
3650                                 num = 0;
3651                                 min = ~0;
3652                                 cur_prio = skb->priority;
3653                         }
3654
3655                         num++;
3656
3657                         if (conn->sent < min) {
3658                                 min  = conn->sent;
3659                                 chan = tmp;
3660                         }
3661                 }
3662
3663                 if (hci_conn_num(hdev, type) == conn_num)
3664                         break;
3665         }
3666
3667         rcu_read_unlock();
3668
3669         if (!chan)
3670                 return NULL;
3671
3672         switch (chan->conn->type) {
3673         case ACL_LINK:
3674                 cnt = hdev->acl_cnt;
3675                 break;
3676         case AMP_LINK:
3677                 cnt = hdev->block_cnt;
3678                 break;
3679         case SCO_LINK:
3680         case ESCO_LINK:
3681                 cnt = hdev->sco_cnt;
3682                 break;
3683         case LE_LINK:
3684                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3685                 break;
3686         default:
3687                 cnt = 0;
3688                 BT_ERR("Unknown link type");
3689         }
3690
3691         q = cnt / num;
3692         *quote = q ? q : 1;
3693         BT_DBG("chan %p quote %d", chan, *quote);
3694         return chan;
3695 }
3696
3697 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3698 {
3699         struct hci_conn_hash *h = &hdev->conn_hash;
3700         struct hci_conn *conn;
3701         int num = 0;
3702
3703         BT_DBG("%s", hdev->name);
3704
3705         rcu_read_lock();
3706
3707         list_for_each_entry_rcu(conn, &h->list, list) {
3708                 struct hci_chan *chan;
3709
3710                 if (conn->type != type)
3711                         continue;
3712
3713                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3714                         continue;
3715
3716                 num++;
3717
3718                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3719                         struct sk_buff *skb;
3720
3721                         if (chan->sent) {
3722                                 chan->sent = 0;
3723                                 continue;
3724                         }
3725
3726                         if (skb_queue_empty(&chan->data_q))
3727                                 continue;
3728
3729                         skb = skb_peek(&chan->data_q);
3730                         if (skb->priority >= HCI_PRIO_MAX - 1)
3731                                 continue;
3732
3733                         skb->priority = HCI_PRIO_MAX - 1;
3734
3735                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3736                                skb->priority);
3737                 }
3738
3739                 if (hci_conn_num(hdev, type) == num)
3740                         break;
3741         }
3742
3743         rcu_read_unlock();
3744
3745 }
3746
3747 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3748 {
3749         /* Calculate count of blocks used by this packet */
3750         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3751 }
3752
3753 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3754 {
3755         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3756                 /* ACL tx timeout must be longer than maximum
3757                  * link supervision timeout (40.9 seconds) */
3758                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3759                                        HCI_ACL_TX_TIMEOUT))
3760                         hci_link_tx_to(hdev, ACL_LINK);
3761         }
3762 }
3763
3764 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3765 {
3766         unsigned int cnt = hdev->acl_cnt;
3767         struct hci_chan *chan;
3768         struct sk_buff *skb;
3769         int quote;
3770
3771         __check_timeout(hdev, cnt);
3772
3773         while (hdev->acl_cnt &&
3774                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3775                 u32 priority = (skb_peek(&chan->data_q))->priority;
3776                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3777                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3778                                skb->len, skb->priority);
3779
3780                         /* Stop if priority has changed */
3781                         if (skb->priority < priority)
3782                                 break;
3783
3784                         skb = skb_dequeue(&chan->data_q);
3785
3786                         hci_conn_enter_active_mode(chan->conn,
3787                                                    bt_cb(skb)->force_active);
3788
3789                         hci_send_frame(hdev, skb);
3790                         hdev->acl_last_tx = jiffies;
3791
3792                         hdev->acl_cnt--;
3793                         chan->sent++;
3794                         chan->conn->sent++;
3795                 }
3796         }
3797
3798         if (cnt != hdev->acl_cnt)
3799                 hci_prio_recalculate(hdev, ACL_LINK);
3800 }
3801
3802 static void hci_sched_acl_blk(struct hci_dev *hdev)
3803 {
3804         unsigned int cnt = hdev->block_cnt;
3805         struct hci_chan *chan;
3806         struct sk_buff *skb;
3807         int quote;
3808         u8 type;
3809
3810         __check_timeout(hdev, cnt);
3811
3812         BT_DBG("%s", hdev->name);
3813
3814         if (hdev->dev_type == HCI_AMP)
3815                 type = AMP_LINK;
3816         else
3817                 type = ACL_LINK;
3818
3819         while (hdev->block_cnt > 0 &&
3820                (chan = hci_chan_sent(hdev, type, &quote))) {
3821                 u32 priority = (skb_peek(&chan->data_q))->priority;
3822                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3823                         int blocks;
3824
3825                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3826                                skb->len, skb->priority);
3827
3828                         /* Stop if priority has changed */
3829                         if (skb->priority < priority)
3830                                 break;
3831
3832                         skb = skb_dequeue(&chan->data_q);
3833
3834                         blocks = __get_blocks(hdev, skb);
3835                         if (blocks > hdev->block_cnt)
3836                                 return;
3837
3838                         hci_conn_enter_active_mode(chan->conn,
3839                                                    bt_cb(skb)->force_active);
3840
3841                         hci_send_frame(hdev, skb);
3842                         hdev->acl_last_tx = jiffies;
3843
3844                         hdev->block_cnt -= blocks;
3845                         quote -= blocks;
3846
3847                         chan->sent += blocks;
3848                         chan->conn->sent += blocks;
3849                 }
3850         }
3851
3852         if (cnt != hdev->block_cnt)
3853                 hci_prio_recalculate(hdev, type);
3854 }
3855
3856 static void hci_sched_acl(struct hci_dev *hdev)
3857 {
3858         BT_DBG("%s", hdev->name);
3859
3860         /* No ACL link over BR/EDR controller */
3861         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3862                 return;
3863
3864         /* No AMP link over AMP controller */
3865         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3866                 return;
3867
3868         switch (hdev->flow_ctl_mode) {
3869         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3870                 hci_sched_acl_pkt(hdev);
3871                 break;
3872
3873         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3874                 hci_sched_acl_blk(hdev);
3875                 break;
3876         }
3877 }
3878
3879 /* Schedule SCO */
3880 static void hci_sched_sco(struct hci_dev *hdev)
3881 {
3882         struct hci_conn *conn;
3883         struct sk_buff *skb;
3884         int quote;
3885
3886         BT_DBG("%s", hdev->name);
3887
3888         if (!hci_conn_num(hdev, SCO_LINK))
3889                 return;
3890
3891         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3892                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3893                         BT_DBG("skb %p len %d", skb, skb->len);
3894                         hci_send_frame(hdev, skb);
3895
3896                         conn->sent++;
3897                         if (conn->sent == ~0)
3898                                 conn->sent = 0;
3899                 }
3900         }
3901 }
3902
3903 static void hci_sched_esco(struct hci_dev *hdev)
3904 {
3905         struct hci_conn *conn;
3906         struct sk_buff *skb;
3907         int quote;
3908
3909         BT_DBG("%s", hdev->name);
3910
3911         if (!hci_conn_num(hdev, ESCO_LINK))
3912                 return;
3913
3914         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3915                                                      &quote))) {
3916                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3917                         BT_DBG("skb %p len %d", skb, skb->len);
3918                         hci_send_frame(hdev, skb);
3919
3920                         conn->sent++;
3921                         if (conn->sent == ~0)
3922                                 conn->sent = 0;
3923                 }
3924         }
3925 }
3926
3927 static void hci_sched_le(struct hci_dev *hdev)
3928 {
3929         struct hci_chan *chan;
3930         struct sk_buff *skb;
3931         int quote, cnt, tmp;
3932
3933         BT_DBG("%s", hdev->name);
3934
3935         if (!hci_conn_num(hdev, LE_LINK))
3936                 return;
3937
3938         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3939                 /* LE tx timeout must be longer than maximum
3940                  * link supervision timeout (40.9 seconds) */
3941                 if (!hdev->le_cnt && hdev->le_pkts &&
3942                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3943                         hci_link_tx_to(hdev, LE_LINK);
3944         }
3945
3946         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3947         tmp = cnt;
3948         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3949                 u32 priority = (skb_peek(&chan->data_q))->priority;
3950                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3951                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3952                                skb->len, skb->priority);
3953
3954                         /* Stop if priority has changed */
3955                         if (skb->priority < priority)
3956                                 break;
3957
3958                         skb = skb_dequeue(&chan->data_q);
3959
3960                         hci_send_frame(hdev, skb);
3961                         hdev->le_last_tx = jiffies;
3962
3963                         cnt--;
3964                         chan->sent++;
3965                         chan->conn->sent++;
3966                 }
3967         }
3968
3969         if (hdev->le_pkts)
3970                 hdev->le_cnt = cnt;
3971         else
3972                 hdev->acl_cnt = cnt;
3973
3974         if (cnt != tmp)
3975                 hci_prio_recalculate(hdev, LE_LINK);
3976 }
3977
3978 static void hci_tx_work(struct work_struct *work)
3979 {
3980         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3981         struct sk_buff *skb;
3982
3983         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3984                hdev->sco_cnt, hdev->le_cnt);
3985
3986         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3987                 /* Schedule queues and send stuff to HCI driver */
3988                 hci_sched_acl(hdev);
3989                 hci_sched_sco(hdev);
3990                 hci_sched_esco(hdev);
3991                 hci_sched_le(hdev);
3992         }
3993
3994         /* Send next queued raw (unknown type) packet */
3995         while ((skb = skb_dequeue(&hdev->raw_q)))
3996                 hci_send_frame(hdev, skb);
3997 }
3998
3999 /* ----- HCI RX task (incoming data processing) ----- */
4000
4001 /* ACL data packet */
4002 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4003 {
4004         struct hci_acl_hdr *hdr = (void *) skb->data;
4005         struct hci_conn *conn;
4006         __u16 handle, flags;
4007
4008         skb_pull(skb, HCI_ACL_HDR_SIZE);
4009
4010         handle = __le16_to_cpu(hdr->handle);
4011         flags  = hci_flags(handle);
4012         handle = hci_handle(handle);
4013
4014         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4015                handle, flags);
4016
4017         hdev->stat.acl_rx++;
4018
4019         hci_dev_lock(hdev);
4020         conn = hci_conn_hash_lookup_handle(hdev, handle);
4021         hci_dev_unlock(hdev);
4022
4023         if (conn) {
4024                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4025
4026                 /* Send to upper protocol */
4027                 l2cap_recv_acldata(conn, skb, flags);
4028                 return;
4029         } else {
4030                 BT_ERR("%s ACL packet for unknown connection handle %d",
4031                        hdev->name, handle);
4032         }
4033
4034         kfree_skb(skb);
4035 }
4036
4037 /* SCO data packet */
4038 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4039 {
4040         struct hci_sco_hdr *hdr = (void *) skb->data;
4041         struct hci_conn *conn;
4042         __u16 handle;
4043
4044         skb_pull(skb, HCI_SCO_HDR_SIZE);
4045
4046         handle = __le16_to_cpu(hdr->handle);
4047
4048         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4049
4050         hdev->stat.sco_rx++;
4051
4052         hci_dev_lock(hdev);
4053         conn = hci_conn_hash_lookup_handle(hdev, handle);
4054         hci_dev_unlock(hdev);
4055
4056         if (conn) {
4057                 /* Send to upper protocol */
4058                 sco_recv_scodata(conn, skb);
4059                 return;
4060         } else {
4061                 BT_ERR("%s SCO packet for unknown connection handle %d",
4062                        hdev->name, handle);
4063         }
4064
4065         kfree_skb(skb);
4066 }
4067
4068 static bool hci_req_is_complete(struct hci_dev *hdev)
4069 {
4070         struct sk_buff *skb;
4071
4072         skb = skb_peek(&hdev->cmd_q);
4073         if (!skb)
4074                 return true;
4075
4076         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4077 }
4078
4079 static void hci_resend_last(struct hci_dev *hdev)
4080 {
4081         struct hci_command_hdr *sent;
4082         struct sk_buff *skb;
4083         u16 opcode;
4084
4085         if (!hdev->sent_cmd)
4086                 return;
4087
4088         sent = (void *) hdev->sent_cmd->data;
4089         opcode = __le16_to_cpu(sent->opcode);
4090         if (opcode == HCI_OP_RESET)
4091                 return;
4092
4093         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4094         if (!skb)
4095                 return;
4096
4097         skb_queue_head(&hdev->cmd_q, skb);
4098         queue_work(hdev->workqueue, &hdev->cmd_work);
4099 }
4100
4101 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4102                           hci_req_complete_t *req_complete,
4103                           hci_req_complete_skb_t *req_complete_skb)
4104 {
4105         struct sk_buff *skb;
4106         unsigned long flags;
4107
4108         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4109
4110         /* If the completed command doesn't match the last one that was
4111          * sent we need to do special handling of it.
4112          */
4113         if (!hci_sent_cmd_data(hdev, opcode)) {
4114                 /* Some CSR based controllers generate a spontaneous
4115                  * reset complete event during init and any pending
4116                  * command will never be completed. In such a case we
4117                  * need to resend whatever was the last sent
4118                  * command.
4119                  */
4120                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4121                         hci_resend_last(hdev);
4122
4123                 return;
4124         }
4125
4126         /* If the command succeeded and there's still more commands in
4127          * this request the request is not yet complete.
4128          */
4129         if (!status && !hci_req_is_complete(hdev))
4130                 return;
4131
4132         /* If this was the last command in a request the complete
4133          * callback would be found in hdev->sent_cmd instead of the
4134          * command queue (hdev->cmd_q).
4135          */
4136         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4137                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4138                 return;
4139         }
4140
4141         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4142                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4143                 return;
4144         }
4145
4146         /* Remove all pending commands belonging to this request */
4147         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4148         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4149                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4150                         __skb_queue_head(&hdev->cmd_q, skb);
4151                         break;
4152                 }
4153
4154                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4155                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4156                 else
4157                         *req_complete = bt_cb(skb)->hci.req_complete;
4158                 kfree_skb(skb);
4159         }
4160         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4161 }
4162
4163 static void hci_rx_work(struct work_struct *work)
4164 {
4165         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4166         struct sk_buff *skb;
4167
4168         BT_DBG("%s", hdev->name);
4169
4170         while ((skb = skb_dequeue(&hdev->rx_q))) {
4171                 /* Send copy to monitor */
4172                 hci_send_to_monitor(hdev, skb);
4173
4174                 if (atomic_read(&hdev->promisc)) {
4175                         /* Send copy to the sockets */
4176                         hci_send_to_sock(hdev, skb);
4177                 }
4178
4179                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4180                         kfree_skb(skb);
4181                         continue;
4182                 }
4183
4184                 if (test_bit(HCI_INIT, &hdev->flags)) {
4185                         /* Don't process data packets in this states. */
4186                         switch (hci_skb_pkt_type(skb)) {
4187                         case HCI_ACLDATA_PKT:
4188                         case HCI_SCODATA_PKT:
4189                                 kfree_skb(skb);
4190                                 continue;
4191                         }
4192                 }
4193
4194                 /* Process frame */
4195                 switch (hci_skb_pkt_type(skb)) {
4196                 case HCI_EVENT_PKT:
4197                         BT_DBG("%s Event packet", hdev->name);
4198                         hci_event_packet(hdev, skb);
4199                         break;
4200
4201                 case HCI_ACLDATA_PKT:
4202                         BT_DBG("%s ACL data packet", hdev->name);
4203                         hci_acldata_packet(hdev, skb);
4204                         break;
4205
4206                 case HCI_SCODATA_PKT:
4207                         BT_DBG("%s SCO data packet", hdev->name);
4208                         hci_scodata_packet(hdev, skb);
4209                         break;
4210
4211                 default:
4212                         kfree_skb(skb);
4213                         break;
4214                 }
4215         }
4216 }
4217
4218 static void hci_cmd_work(struct work_struct *work)
4219 {
4220         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4221         struct sk_buff *skb;
4222
4223         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4224                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4225
4226         /* Send queued commands */
4227         if (atomic_read(&hdev->cmd_cnt)) {
4228                 skb = skb_dequeue(&hdev->cmd_q);
4229                 if (!skb)
4230                         return;
4231
4232                 kfree_skb(hdev->sent_cmd);
4233
4234                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4235                 if (hdev->sent_cmd) {
4236                         atomic_dec(&hdev->cmd_cnt);
4237                         hci_send_frame(hdev, skb);
4238                         if (test_bit(HCI_RESET, &hdev->flags))
4239                                 cancel_delayed_work(&hdev->cmd_timer);
4240                         else
4241                                 schedule_delayed_work(&hdev->cmd_timer,
4242                                                       HCI_CMD_TIMEOUT);
4243                 } else {
4244                         skb_queue_head(&hdev->cmd_q, skb);
4245                         queue_work(hdev->workqueue, &hdev->cmd_work);
4246                 }
4247         }
4248 }