Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         bool enable;
80         int err;
81
82         if (!test_bit(HCI_UP, &hdev->flags))
83                 return -ENETDOWN;
84
85         err = kstrtobool_from_user(user_buf, count, &enable);
86         if (err)
87                 return err;
88
89         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
90                 return -EALREADY;
91
92         hci_req_sync_lock(hdev);
93         if (enable)
94                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95                                      HCI_CMD_TIMEOUT);
96         else
97                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         hci_req_sync_unlock(hdev);
100
101         if (IS_ERR(skb))
102                 return PTR_ERR(skb);
103
104         kfree_skb(skb);
105
106         hci_dev_change_flag(hdev, HCI_DUT_MODE);
107
108         return count;
109 }
110
111 static const struct file_operations dut_mode_fops = {
112         .open           = simple_open,
113         .read           = dut_mode_read,
114         .write          = dut_mode_write,
115         .llseek         = default_llseek,
116 };
117
118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119                                 size_t count, loff_t *ppos)
120 {
121         struct hci_dev *hdev = file->private_data;
122         char buf[3];
123
124         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
125         buf[1] = '\n';
126         buf[2] = '\0';
127         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128 }
129
130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131                                  size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         bool enable;
135         int err;
136
137         err = kstrtobool_from_user(user_buf, count, &enable);
138         if (err)
139                 return err;
140
141         /* When the diagnostic flags are not persistent and the transport
142          * is not active or in user channel operation, then there is no need
143          * for the vendor callback. Instead just store the desired value and
144          * the setting will be programmed when the controller gets powered on.
145          */
146         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147             (!test_bit(HCI_RUNNING, &hdev->flags) ||
148              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
149                 goto done;
150
151         hci_req_sync_lock(hdev);
152         err = hdev->set_diag(hdev, enable);
153         hci_req_sync_unlock(hdev);
154
155         if (err < 0)
156                 return err;
157
158 done:
159         if (enable)
160                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161         else
162                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164         return count;
165 }
166
167 static const struct file_operations vendor_diag_fops = {
168         .open           = simple_open,
169         .read           = vendor_diag_read,
170         .write          = vendor_diag_write,
171         .llseek         = default_llseek,
172 };
173
174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
175 {
176         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177                             &dut_mode_fops);
178
179         if (hdev->set_diag)
180                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181                                     &vendor_diag_fops);
182 }
183
184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
185 {
186         BT_DBG("%s %ld", req->hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &req->hdev->flags);
190         hci_req_add(req, HCI_OP_RESET, 0, NULL);
191         return 0;
192 }
193
194 static void bredr_init(struct hci_request *req)
195 {
196         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Read Local Supported Features */
199         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
200
201         /* Read Local Version */
202         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
203
204         /* Read BD Address */
205         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
206 }
207
208 static void amp_init1(struct hci_request *req)
209 {
210         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
211
212         /* Read Local Version */
213         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Local Supported Commands */
216         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
218         /* Read Local AMP Info */
219         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
220
221         /* Read Data Blk size */
222         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
223
224         /* Read Flow Control Mode */
225         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
227         /* Read Location Data */
228         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
229 }
230
231 static int amp_init2(struct hci_request *req)
232 {
233         /* Read Local Supported Features. Not all AMP controllers
234          * support this so it's placed conditionally in the second
235          * stage init.
236          */
237         if (req->hdev->commands[14] & 0x20)
238                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
239
240         return 0;
241 }
242
243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
244 {
245         struct hci_dev *hdev = req->hdev;
246
247         BT_DBG("%s %ld", hdev->name, opt);
248
249         /* Reset */
250         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251                 hci_reset_req(req, 0);
252
253         switch (hdev->dev_type) {
254         case HCI_PRIMARY:
255                 bredr_init(req);
256                 break;
257         case HCI_AMP:
258                 amp_init1(req);
259                 break;
260         default:
261                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
262                 break;
263         }
264
265         return 0;
266 }
267
268 static void bredr_setup(struct hci_request *req)
269 {
270         __le16 param;
271         __u8 flt_type;
272
273         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
275
276         /* Read Class of Device */
277         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
278
279         /* Read Local Name */
280         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
281
282         /* Read Voice Setting */
283         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
284
285         /* Read Number of Supported IAC */
286         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
288         /* Read Current IAC LAP */
289         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
291         /* Clear Event Filters */
292         flt_type = HCI_FLT_CLEAR_ALL;
293         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
294
295         /* Connection accept timeout ~20 secs */
296         param = cpu_to_le16(0x7d00);
297         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
298 }
299
300 static void le_setup(struct hci_request *req)
301 {
302         struct hci_dev *hdev = req->hdev;
303
304         /* Read LE Buffer Size */
305         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
306
307         /* Read LE Local Supported Features */
308         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
309
310         /* Read LE Supported States */
311         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
313         /* LE-only controllers have LE implicitly enabled */
314         if (!lmp_bredr_capable(hdev))
315                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
316 }
317
318 static void hci_setup_event_mask(struct hci_request *req)
319 {
320         struct hci_dev *hdev = req->hdev;
321
322         /* The second byte is 0xff instead of 0x9f (two reserved bits
323          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324          * command otherwise.
325          */
326         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329          * any event mask for pre 1.2 devices.
330          */
331         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332                 return;
333
334         if (lmp_bredr_capable(hdev)) {
335                 events[4] |= 0x01; /* Flow Specification Complete */
336         } else {
337                 /* Use a different default for LE-only devices */
338                 memset(events, 0, sizeof(events));
339                 events[1] |= 0x20; /* Command Complete */
340                 events[1] |= 0x40; /* Command Status */
341                 events[1] |= 0x80; /* Hardware Error */
342
343                 /* If the controller supports the Disconnect command, enable
344                  * the corresponding event. In addition enable packet flow
345                  * control related events.
346                  */
347                 if (hdev->commands[0] & 0x20) {
348                         events[0] |= 0x10; /* Disconnection Complete */
349                         events[2] |= 0x04; /* Number of Completed Packets */
350                         events[3] |= 0x02; /* Data Buffer Overflow */
351                 }
352
353                 /* If the controller supports the Read Remote Version
354                  * Information command, enable the corresponding event.
355                  */
356                 if (hdev->commands[2] & 0x80)
357                         events[1] |= 0x08; /* Read Remote Version Information
358                                             * Complete
359                                             */
360
361                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362                         events[0] |= 0x80; /* Encryption Change */
363                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
364                 }
365         }
366
367         if (lmp_inq_rssi_capable(hdev) ||
368             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369                 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
371         if (lmp_ext_feat_capable(hdev))
372                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374         if (lmp_esco_capable(hdev)) {
375                 events[5] |= 0x08; /* Synchronous Connection Complete */
376                 events[5] |= 0x10; /* Synchronous Connection Changed */
377         }
378
379         if (lmp_sniffsubr_capable(hdev))
380                 events[5] |= 0x20; /* Sniff Subrating */
381
382         if (lmp_pause_enc_capable(hdev))
383                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385         if (lmp_ext_inq_capable(hdev))
386                 events[5] |= 0x40; /* Extended Inquiry Result */
387
388         if (lmp_no_flush_capable(hdev))
389                 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391         if (lmp_lsto_capable(hdev))
392                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394         if (lmp_ssp_capable(hdev)) {
395                 events[6] |= 0x01;      /* IO Capability Request */
396                 events[6] |= 0x02;      /* IO Capability Response */
397                 events[6] |= 0x04;      /* User Confirmation Request */
398                 events[6] |= 0x08;      /* User Passkey Request */
399                 events[6] |= 0x10;      /* Remote OOB Data Request */
400                 events[6] |= 0x20;      /* Simple Pairing Complete */
401                 events[7] |= 0x04;      /* User Passkey Notification */
402                 events[7] |= 0x08;      /* Keypress Notification */
403                 events[7] |= 0x10;      /* Remote Host Supported
404                                          * Features Notification
405                                          */
406         }
407
408         if (lmp_le_capable(hdev))
409                 events[7] |= 0x20;      /* LE Meta-Event */
410
411         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
412 }
413
414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
415 {
416         struct hci_dev *hdev = req->hdev;
417
418         if (hdev->dev_type == HCI_AMP)
419                 return amp_init2(req);
420
421         if (lmp_bredr_capable(hdev))
422                 bredr_setup(req);
423         else
424                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
425
426         if (lmp_le_capable(hdev))
427                 le_setup(req);
428
429         /* All Bluetooth 1.2 and later controllers should support the
430          * HCI command for reading the local supported commands.
431          *
432          * Unfortunately some controllers indicate Bluetooth 1.2 support,
433          * but do not have support for this command. If that is the case,
434          * the driver can quirk the behavior and skip reading the local
435          * supported commands.
436          */
437         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
440
441         if (lmp_ssp_capable(hdev)) {
442                 /* When SSP is available, then the host features page
443                  * should also be available as well. However some
444                  * controllers list the max_page as 0 as long as SSP
445                  * has not been enabled. To achieve proper debugging
446                  * output, force the minimum max_page to 1 at least.
447                  */
448                 hdev->max_page = 0x01;
449
450                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
451                         u8 mode = 0x01;
452
453                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454                                     sizeof(mode), &mode);
455                 } else {
456                         struct hci_cp_write_eir cp;
457
458                         memset(hdev->eir, 0, sizeof(hdev->eir));
459                         memset(&cp, 0, sizeof(cp));
460
461                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
462                 }
463         }
464
465         if (lmp_inq_rssi_capable(hdev) ||
466             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
467                 u8 mode;
468
469                 /* If Extended Inquiry Result events are supported, then
470                  * they are clearly preferred over Inquiry Result with RSSI
471                  * events.
472                  */
473                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476         }
477
478         if (lmp_inq_tx_pwr_capable(hdev))
479                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
480
481         if (lmp_ext_feat_capable(hdev)) {
482                 struct hci_cp_read_local_ext_features cp;
483
484                 cp.page = 0x01;
485                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486                             sizeof(cp), &cp);
487         }
488
489         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
490                 u8 enable = 1;
491                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492                             &enable);
493         }
494
495         return 0;
496 }
497
498 static void hci_setup_link_policy(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501         struct hci_cp_write_def_link_policy cp;
502         u16 link_policy = 0;
503
504         if (lmp_rswitch_capable(hdev))
505                 link_policy |= HCI_LP_RSWITCH;
506         if (lmp_hold_capable(hdev))
507                 link_policy |= HCI_LP_HOLD;
508         if (lmp_sniff_capable(hdev))
509                 link_policy |= HCI_LP_SNIFF;
510         if (lmp_park_capable(hdev))
511                 link_policy |= HCI_LP_PARK;
512
513         cp.policy = cpu_to_le16(link_policy);
514         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
515 }
516
517 static void hci_set_le_support(struct hci_request *req)
518 {
519         struct hci_dev *hdev = req->hdev;
520         struct hci_cp_write_le_host_supported cp;
521
522         /* LE-only devices do not support explicit enablement */
523         if (!lmp_bredr_capable(hdev))
524                 return;
525
526         memset(&cp, 0, sizeof(cp));
527
528         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
529                 cp.le = 0x01;
530                 cp.simul = 0x00;
531         }
532
533         if (cp.le != lmp_host_le_capable(hdev))
534                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535                             &cp);
536 }
537
538 static void hci_set_event_mask_page_2(struct hci_request *req)
539 {
540         struct hci_dev *hdev = req->hdev;
541         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542         bool changed = false;
543
544         /* If Connectionless Slave Broadcast master role is supported
545          * enable all necessary events for it.
546          */
547         if (lmp_csb_master_capable(hdev)) {
548                 events[1] |= 0x40;      /* Triggered Clock Capture */
549                 events[1] |= 0x80;      /* Synchronization Train Complete */
550                 events[2] |= 0x10;      /* Slave Page Response Timeout */
551                 events[2] |= 0x20;      /* CSB Channel Map Change */
552                 changed = true;
553         }
554
555         /* If Connectionless Slave Broadcast slave role is supported
556          * enable all necessary events for it.
557          */
558         if (lmp_csb_slave_capable(hdev)) {
559                 events[2] |= 0x01;      /* Synchronization Train Received */
560                 events[2] |= 0x02;      /* CSB Receive */
561                 events[2] |= 0x04;      /* CSB Timeout */
562                 events[2] |= 0x08;      /* Truncated Page Complete */
563                 changed = true;
564         }
565
566         /* Enable Authenticated Payload Timeout Expired event if supported */
567         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
568                 events[2] |= 0x80;
569                 changed = true;
570         }
571
572         /* Some Broadcom based controllers indicate support for Set Event
573          * Mask Page 2 command, but then actually do not support it. Since
574          * the default value is all bits set to zero, the command is only
575          * required if the event mask has to be changed. In case no change
576          * to the event mask is needed, skip this command.
577          */
578         if (changed)
579                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580                             sizeof(events), events);
581 }
582
583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
584 {
585         struct hci_dev *hdev = req->hdev;
586         u8 p;
587
588         hci_setup_event_mask(req);
589
590         if (hdev->commands[6] & 0x20 &&
591             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592                 struct hci_cp_read_stored_link_key cp;
593
594                 bacpy(&cp.bdaddr, BDADDR_ANY);
595                 cp.read_all = 0x01;
596                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597         }
598
599         if (hdev->commands[5] & 0x10)
600                 hci_setup_link_policy(req);
601
602         if (hdev->commands[8] & 0x01)
603                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605         /* Some older Broadcom based Bluetooth 1.2 controllers do not
606          * support the Read Page Scan Type command. Check support for
607          * this command in the bit mask of supported commands.
608          */
609         if (hdev->commands[13] & 0x01)
610                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
612         if (lmp_le_capable(hdev)) {
613                 u8 events[8];
614
615                 memset(events, 0, sizeof(events));
616
617                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618                         events[0] |= 0x10;      /* LE Long Term Key Request */
619
620                 /* If controller supports the Connection Parameters Request
621                  * Link Layer Procedure, enable the corresponding event.
622                  */
623                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624                         events[0] |= 0x20;      /* LE Remote Connection
625                                                  * Parameter Request
626                                                  */
627
628                 /* If the controller supports the Data Length Extension
629                  * feature, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632                         events[0] |= 0x40;      /* LE Data Length Change */
633
634                 /* If the controller supports Extended Scanner Filter
635                  * Policies, enable the correspondig event.
636                  */
637                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638                         events[1] |= 0x04;      /* LE Direct Advertising
639                                                  * Report
640                                                  */
641
642                 /* If the controller supports Channel Selection Algorithm #2
643                  * feature, enable the corresponding event.
644                  */
645                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646                         events[2] |= 0x08;      /* LE Channel Selection
647                                                  * Algorithm
648                                                  */
649
650                 /* If the controller supports the LE Set Scan Enable command,
651                  * enable the corresponding advertising report event.
652                  */
653                 if (hdev->commands[26] & 0x08)
654                         events[0] |= 0x02;      /* LE Advertising Report */
655
656                 /* If the controller supports the LE Create Connection
657                  * command, enable the corresponding event.
658                  */
659                 if (hdev->commands[26] & 0x10)
660                         events[0] |= 0x01;      /* LE Connection Complete */
661
662                 /* If the controller supports the LE Connection Update
663                  * command, enable the corresponding event.
664                  */
665                 if (hdev->commands[27] & 0x04)
666                         events[0] |= 0x04;      /* LE Connection Update
667                                                  * Complete
668                                                  */
669
670                 /* If the controller supports the LE Read Remote Used Features
671                  * command, enable the corresponding event.
672                  */
673                 if (hdev->commands[27] & 0x20)
674                         events[0] |= 0x08;      /* LE Read Remote Used
675                                                  * Features Complete
676                                                  */
677
678                 /* If the controller supports the LE Read Local P-256
679                  * Public Key command, enable the corresponding event.
680                  */
681                 if (hdev->commands[34] & 0x02)
682                         events[0] |= 0x80;      /* LE Read Local P-256
683                                                  * Public Key Complete
684                                                  */
685
686                 /* If the controller supports the LE Generate DHKey
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[34] & 0x04)
690                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
691
692                 /* If the controller supports the LE Set Default PHY or
693                  * LE Set PHY commands, enable the corresponding event.
694                  */
695                 if (hdev->commands[35] & (0x20 | 0x40))
696                         events[1] |= 0x08;        /* LE PHY Update Complete */
697
698                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
699                             events);
700
701                 if (hdev->commands[25] & 0x40) {
702                         /* Read LE Advertising Channel TX Power */
703                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
704                 }
705
706                 if (hdev->commands[26] & 0x40) {
707                         /* Read LE White List Size */
708                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
709                                     0, NULL);
710                 }
711
712                 if (hdev->commands[26] & 0x80) {
713                         /* Clear LE White List */
714                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
715                 }
716
717                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
718                         /* Read LE Maximum Data Length */
719                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
720
721                         /* Read LE Suggested Default Data Length */
722                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
723                 }
724
725                 hci_set_le_support(req);
726         }
727
728         /* Read features beyond page 1 if available */
729         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
730                 struct hci_cp_read_local_ext_features cp;
731
732                 cp.page = p;
733                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
734                             sizeof(cp), &cp);
735         }
736
737         return 0;
738 }
739
740 static int hci_init4_req(struct hci_request *req, unsigned long opt)
741 {
742         struct hci_dev *hdev = req->hdev;
743
744         /* Some Broadcom based Bluetooth controllers do not support the
745          * Delete Stored Link Key command. They are clearly indicating its
746          * absence in the bit mask of supported commands.
747          *
748          * Check the supported commands and only if the the command is marked
749          * as supported send it. If not supported assume that the controller
750          * does not have actual support for stored link keys which makes this
751          * command redundant anyway.
752          *
753          * Some controllers indicate that they support handling deleting
754          * stored link keys, but they don't. The quirk lets a driver
755          * just disable this command.
756          */
757         if (hdev->commands[6] & 0x80 &&
758             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
759                 struct hci_cp_delete_stored_link_key cp;
760
761                 bacpy(&cp.bdaddr, BDADDR_ANY);
762                 cp.delete_all = 0x01;
763                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
764                             sizeof(cp), &cp);
765         }
766
767         /* Set event mask page 2 if the HCI command for it is supported */
768         if (hdev->commands[22] & 0x04)
769                 hci_set_event_mask_page_2(req);
770
771         /* Read local codec list if the HCI command is supported */
772         if (hdev->commands[29] & 0x20)
773                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
774
775         /* Get MWS transport configuration if the HCI command is supported */
776         if (hdev->commands[30] & 0x08)
777                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
778
779         /* Check for Synchronization Train support */
780         if (lmp_sync_train_capable(hdev))
781                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
782
783         /* Enable Secure Connections if supported and configured */
784         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
785             bredr_sc_enabled(hdev)) {
786                 u8 support = 0x01;
787
788                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
789                             sizeof(support), &support);
790         }
791
792         /* Set Suggested Default Data Length to maximum if supported */
793         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
794                 struct hci_cp_le_write_def_data_len cp;
795
796                 cp.tx_len = hdev->le_max_tx_len;
797                 cp.tx_time = hdev->le_max_tx_time;
798                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
799         }
800
801         /* Set Default PHY parameters if command is supported */
802         if (hdev->commands[35] & 0x20) {
803                 struct hci_cp_le_set_default_phy cp;
804
805                 /* No transmitter PHY or receiver PHY preferences */
806                 cp.all_phys = 0x03;
807                 cp.tx_phys = 0;
808                 cp.rx_phys = 0;
809
810                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
811         }
812
813         return 0;
814 }
815
816 static int __hci_init(struct hci_dev *hdev)
817 {
818         int err;
819
820         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
821         if (err < 0)
822                 return err;
823
824         if (hci_dev_test_flag(hdev, HCI_SETUP))
825                 hci_debugfs_create_basic(hdev);
826
827         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
828         if (err < 0)
829                 return err;
830
831         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
832          * BR/EDR/LE type controllers. AMP controllers only need the
833          * first two stages of init.
834          */
835         if (hdev->dev_type != HCI_PRIMARY)
836                 return 0;
837
838         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
839         if (err < 0)
840                 return err;
841
842         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
843         if (err < 0)
844                 return err;
845
846         /* This function is only called when the controller is actually in
847          * configured state. When the controller is marked as unconfigured,
848          * this initialization procedure is not run.
849          *
850          * It means that it is possible that a controller runs through its
851          * setup phase and then discovers missing settings. If that is the
852          * case, then this function will not be called. It then will only
853          * be called during the config phase.
854          *
855          * So only when in setup phase or config phase, create the debugfs
856          * entries and register the SMP channels.
857          */
858         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
859             !hci_dev_test_flag(hdev, HCI_CONFIG))
860                 return 0;
861
862         hci_debugfs_create_common(hdev);
863
864         if (lmp_bredr_capable(hdev))
865                 hci_debugfs_create_bredr(hdev);
866
867         if (lmp_le_capable(hdev))
868                 hci_debugfs_create_le(hdev);
869
870         return 0;
871 }
872
873 static int hci_init0_req(struct hci_request *req, unsigned long opt)
874 {
875         struct hci_dev *hdev = req->hdev;
876
877         BT_DBG("%s %ld", hdev->name, opt);
878
879         /* Reset */
880         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
881                 hci_reset_req(req, 0);
882
883         /* Read Local Version */
884         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
885
886         /* Read BD Address */
887         if (hdev->set_bdaddr)
888                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
889
890         return 0;
891 }
892
893 static int __hci_unconf_init(struct hci_dev *hdev)
894 {
895         int err;
896
897         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
898                 return 0;
899
900         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
901         if (err < 0)
902                 return err;
903
904         if (hci_dev_test_flag(hdev, HCI_SETUP))
905                 hci_debugfs_create_basic(hdev);
906
907         return 0;
908 }
909
910 static int hci_scan_req(struct hci_request *req, unsigned long opt)
911 {
912         __u8 scan = opt;
913
914         BT_DBG("%s %x", req->hdev->name, scan);
915
916         /* Inquiry and Page scans */
917         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
918         return 0;
919 }
920
921 static int hci_auth_req(struct hci_request *req, unsigned long opt)
922 {
923         __u8 auth = opt;
924
925         BT_DBG("%s %x", req->hdev->name, auth);
926
927         /* Authentication */
928         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
929         return 0;
930 }
931
932 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
933 {
934         __u8 encrypt = opt;
935
936         BT_DBG("%s %x", req->hdev->name, encrypt);
937
938         /* Encryption */
939         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
940         return 0;
941 }
942
943 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
944 {
945         __le16 policy = cpu_to_le16(opt);
946
947         BT_DBG("%s %x", req->hdev->name, policy);
948
949         /* Default link policy */
950         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
951         return 0;
952 }
953
954 /* Get HCI device by index.
955  * Device is held on return. */
956 struct hci_dev *hci_dev_get(int index)
957 {
958         struct hci_dev *hdev = NULL, *d;
959
960         BT_DBG("%d", index);
961
962         if (index < 0)
963                 return NULL;
964
965         read_lock(&hci_dev_list_lock);
966         list_for_each_entry(d, &hci_dev_list, list) {
967                 if (d->id == index) {
968                         hdev = hci_dev_hold(d);
969                         break;
970                 }
971         }
972         read_unlock(&hci_dev_list_lock);
973         return hdev;
974 }
975
976 /* ---- Inquiry support ---- */
977
978 bool hci_discovery_active(struct hci_dev *hdev)
979 {
980         struct discovery_state *discov = &hdev->discovery;
981
982         switch (discov->state) {
983         case DISCOVERY_FINDING:
984         case DISCOVERY_RESOLVING:
985                 return true;
986
987         default:
988                 return false;
989         }
990 }
991
992 void hci_discovery_set_state(struct hci_dev *hdev, int state)
993 {
994         int old_state = hdev->discovery.state;
995
996         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
997
998         if (old_state == state)
999                 return;
1000
1001         hdev->discovery.state = state;
1002
1003         switch (state) {
1004         case DISCOVERY_STOPPED:
1005                 hci_update_background_scan(hdev);
1006
1007                 if (old_state != DISCOVERY_STARTING)
1008                         mgmt_discovering(hdev, 0);
1009                 break;
1010         case DISCOVERY_STARTING:
1011                 break;
1012         case DISCOVERY_FINDING:
1013                 mgmt_discovering(hdev, 1);
1014                 break;
1015         case DISCOVERY_RESOLVING:
1016                 break;
1017         case DISCOVERY_STOPPING:
1018                 break;
1019         }
1020 }
1021
1022 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1023 {
1024         struct discovery_state *cache = &hdev->discovery;
1025         struct inquiry_entry *p, *n;
1026
1027         list_for_each_entry_safe(p, n, &cache->all, all) {
1028                 list_del(&p->all);
1029                 kfree(p);
1030         }
1031
1032         INIT_LIST_HEAD(&cache->unknown);
1033         INIT_LIST_HEAD(&cache->resolve);
1034 }
1035
1036 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1037                                                bdaddr_t *bdaddr)
1038 {
1039         struct discovery_state *cache = &hdev->discovery;
1040         struct inquiry_entry *e;
1041
1042         BT_DBG("cache %p, %pMR", cache, bdaddr);
1043
1044         list_for_each_entry(e, &cache->all, all) {
1045                 if (!bacmp(&e->data.bdaddr, bdaddr))
1046                         return e;
1047         }
1048
1049         return NULL;
1050 }
1051
1052 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1053                                                        bdaddr_t *bdaddr)
1054 {
1055         struct discovery_state *cache = &hdev->discovery;
1056         struct inquiry_entry *e;
1057
1058         BT_DBG("cache %p, %pMR", cache, bdaddr);
1059
1060         list_for_each_entry(e, &cache->unknown, list) {
1061                 if (!bacmp(&e->data.bdaddr, bdaddr))
1062                         return e;
1063         }
1064
1065         return NULL;
1066 }
1067
1068 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1069                                                        bdaddr_t *bdaddr,
1070                                                        int state)
1071 {
1072         struct discovery_state *cache = &hdev->discovery;
1073         struct inquiry_entry *e;
1074
1075         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1076
1077         list_for_each_entry(e, &cache->resolve, list) {
1078                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1079                         return e;
1080                 if (!bacmp(&e->data.bdaddr, bdaddr))
1081                         return e;
1082         }
1083
1084         return NULL;
1085 }
1086
1087 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1088                                       struct inquiry_entry *ie)
1089 {
1090         struct discovery_state *cache = &hdev->discovery;
1091         struct list_head *pos = &cache->resolve;
1092         struct inquiry_entry *p;
1093
1094         list_del(&ie->list);
1095
1096         list_for_each_entry(p, &cache->resolve, list) {
1097                 if (p->name_state != NAME_PENDING &&
1098                     abs(p->data.rssi) >= abs(ie->data.rssi))
1099                         break;
1100                 pos = &p->list;
1101         }
1102
1103         list_add(&ie->list, pos);
1104 }
1105
1106 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1107                              bool name_known)
1108 {
1109         struct discovery_state *cache = &hdev->discovery;
1110         struct inquiry_entry *ie;
1111         u32 flags = 0;
1112
1113         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1114
1115         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1116
1117         if (!data->ssp_mode)
1118                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1119
1120         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1121         if (ie) {
1122                 if (!ie->data.ssp_mode)
1123                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1124
1125                 if (ie->name_state == NAME_NEEDED &&
1126                     data->rssi != ie->data.rssi) {
1127                         ie->data.rssi = data->rssi;
1128                         hci_inquiry_cache_update_resolve(hdev, ie);
1129                 }
1130
1131                 goto update;
1132         }
1133
1134         /* Entry not in the cache. Add new one. */
1135         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1136         if (!ie) {
1137                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1138                 goto done;
1139         }
1140
1141         list_add(&ie->all, &cache->all);
1142
1143         if (name_known) {
1144                 ie->name_state = NAME_KNOWN;
1145         } else {
1146                 ie->name_state = NAME_NOT_KNOWN;
1147                 list_add(&ie->list, &cache->unknown);
1148         }
1149
1150 update:
1151         if (name_known && ie->name_state != NAME_KNOWN &&
1152             ie->name_state != NAME_PENDING) {
1153                 ie->name_state = NAME_KNOWN;
1154                 list_del(&ie->list);
1155         }
1156
1157         memcpy(&ie->data, data, sizeof(*data));
1158         ie->timestamp = jiffies;
1159         cache->timestamp = jiffies;
1160
1161         if (ie->name_state == NAME_NOT_KNOWN)
1162                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163
1164 done:
1165         return flags;
1166 }
1167
1168 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1169 {
1170         struct discovery_state *cache = &hdev->discovery;
1171         struct inquiry_info *info = (struct inquiry_info *) buf;
1172         struct inquiry_entry *e;
1173         int copied = 0;
1174
1175         list_for_each_entry(e, &cache->all, all) {
1176                 struct inquiry_data *data = &e->data;
1177
1178                 if (copied >= num)
1179                         break;
1180
1181                 bacpy(&info->bdaddr, &data->bdaddr);
1182                 info->pscan_rep_mode    = data->pscan_rep_mode;
1183                 info->pscan_period_mode = data->pscan_period_mode;
1184                 info->pscan_mode        = data->pscan_mode;
1185                 memcpy(info->dev_class, data->dev_class, 3);
1186                 info->clock_offset      = data->clock_offset;
1187
1188                 info++;
1189                 copied++;
1190         }
1191
1192         BT_DBG("cache %p, copied %d", cache, copied);
1193         return copied;
1194 }
1195
1196 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1197 {
1198         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1199         struct hci_dev *hdev = req->hdev;
1200         struct hci_cp_inquiry cp;
1201
1202         BT_DBG("%s", hdev->name);
1203
1204         if (test_bit(HCI_INQUIRY, &hdev->flags))
1205                 return 0;
1206
1207         /* Start Inquiry */
1208         memcpy(&cp.lap, &ir->lap, 3);
1209         cp.length  = ir->length;
1210         cp.num_rsp = ir->num_rsp;
1211         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1212
1213         return 0;
1214 }
1215
1216 int hci_inquiry(void __user *arg)
1217 {
1218         __u8 __user *ptr = arg;
1219         struct hci_inquiry_req ir;
1220         struct hci_dev *hdev;
1221         int err = 0, do_inquiry = 0, max_rsp;
1222         long timeo;
1223         __u8 *buf;
1224
1225         if (copy_from_user(&ir, ptr, sizeof(ir)))
1226                 return -EFAULT;
1227
1228         hdev = hci_dev_get(ir.dev_id);
1229         if (!hdev)
1230                 return -ENODEV;
1231
1232         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1233                 err = -EBUSY;
1234                 goto done;
1235         }
1236
1237         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1238                 err = -EOPNOTSUPP;
1239                 goto done;
1240         }
1241
1242         if (hdev->dev_type != HCI_PRIMARY) {
1243                 err = -EOPNOTSUPP;
1244                 goto done;
1245         }
1246
1247         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1248                 err = -EOPNOTSUPP;
1249                 goto done;
1250         }
1251
1252         hci_dev_lock(hdev);
1253         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1254             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1255                 hci_inquiry_cache_flush(hdev);
1256                 do_inquiry = 1;
1257         }
1258         hci_dev_unlock(hdev);
1259
1260         timeo = ir.length * msecs_to_jiffies(2000);
1261
1262         if (do_inquiry) {
1263                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1264                                    timeo, NULL);
1265                 if (err < 0)
1266                         goto done;
1267
1268                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1269                  * cleared). If it is interrupted by a signal, return -EINTR.
1270                  */
1271                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1272                                 TASK_INTERRUPTIBLE))
1273                         return -EINTR;
1274         }
1275
1276         /* for unlimited number of responses we will use buffer with
1277          * 255 entries
1278          */
1279         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1280
1281         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1282          * copy it to the user space.
1283          */
1284         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1285         if (!buf) {
1286                 err = -ENOMEM;
1287                 goto done;
1288         }
1289
1290         hci_dev_lock(hdev);
1291         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1292         hci_dev_unlock(hdev);
1293
1294         BT_DBG("num_rsp %d", ir.num_rsp);
1295
1296         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1297                 ptr += sizeof(ir);
1298                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1299                                  ir.num_rsp))
1300                         err = -EFAULT;
1301         } else
1302                 err = -EFAULT;
1303
1304         kfree(buf);
1305
1306 done:
1307         hci_dev_put(hdev);
1308         return err;
1309 }
1310
1311 static int hci_dev_do_open(struct hci_dev *hdev)
1312 {
1313         int ret = 0;
1314
1315         BT_DBG("%s %p", hdev->name, hdev);
1316
1317         hci_req_sync_lock(hdev);
1318
1319         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1320                 ret = -ENODEV;
1321                 goto done;
1322         }
1323
1324         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1325             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1326                 /* Check for rfkill but allow the HCI setup stage to
1327                  * proceed (which in itself doesn't cause any RF activity).
1328                  */
1329                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1330                         ret = -ERFKILL;
1331                         goto done;
1332                 }
1333
1334                 /* Check for valid public address or a configured static
1335                  * random adddress, but let the HCI setup proceed to
1336                  * be able to determine if there is a public address
1337                  * or not.
1338                  *
1339                  * In case of user channel usage, it is not important
1340                  * if a public address or static random address is
1341                  * available.
1342                  *
1343                  * This check is only valid for BR/EDR controllers
1344                  * since AMP controllers do not have an address.
1345                  */
1346                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1347                     hdev->dev_type == HCI_PRIMARY &&
1348                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1349                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1350                         ret = -EADDRNOTAVAIL;
1351                         goto done;
1352                 }
1353         }
1354
1355         if (test_bit(HCI_UP, &hdev->flags)) {
1356                 ret = -EALREADY;
1357                 goto done;
1358         }
1359
1360         if (hdev->open(hdev)) {
1361                 ret = -EIO;
1362                 goto done;
1363         }
1364
1365         set_bit(HCI_RUNNING, &hdev->flags);
1366         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1367
1368         atomic_set(&hdev->cmd_cnt, 1);
1369         set_bit(HCI_INIT, &hdev->flags);
1370
1371         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1372                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1373
1374                 if (hdev->setup)
1375                         ret = hdev->setup(hdev);
1376
1377                 /* The transport driver can set these quirks before
1378                  * creating the HCI device or in its setup callback.
1379                  *
1380                  * In case any of them is set, the controller has to
1381                  * start up as unconfigured.
1382                  */
1383                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1384                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1385                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1386
1387                 /* For an unconfigured controller it is required to
1388                  * read at least the version information provided by
1389                  * the Read Local Version Information command.
1390                  *
1391                  * If the set_bdaddr driver callback is provided, then
1392                  * also the original Bluetooth public device address
1393                  * will be read using the Read BD Address command.
1394                  */
1395                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1396                         ret = __hci_unconf_init(hdev);
1397         }
1398
1399         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1400                 /* If public address change is configured, ensure that
1401                  * the address gets programmed. If the driver does not
1402                  * support changing the public address, fail the power
1403                  * on procedure.
1404                  */
1405                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1406                     hdev->set_bdaddr)
1407                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1408                 else
1409                         ret = -EADDRNOTAVAIL;
1410         }
1411
1412         if (!ret) {
1413                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1414                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1415                         ret = __hci_init(hdev);
1416                         if (!ret && hdev->post_init)
1417                                 ret = hdev->post_init(hdev);
1418                 }
1419         }
1420
1421         /* If the HCI Reset command is clearing all diagnostic settings,
1422          * then they need to be reprogrammed after the init procedure
1423          * completed.
1424          */
1425         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1426             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1427             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1428                 ret = hdev->set_diag(hdev, true);
1429
1430         clear_bit(HCI_INIT, &hdev->flags);
1431
1432         if (!ret) {
1433                 hci_dev_hold(hdev);
1434                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1435                 set_bit(HCI_UP, &hdev->flags);
1436                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1437                 hci_leds_update_powered(hdev, true);
1438                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1439                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1440                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1441                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1442                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1443                     hdev->dev_type == HCI_PRIMARY) {
1444                         ret = __hci_req_hci_power_on(hdev);
1445                         mgmt_power_on(hdev, ret);
1446                 }
1447         } else {
1448                 /* Init failed, cleanup */
1449                 flush_work(&hdev->tx_work);
1450                 flush_work(&hdev->cmd_work);
1451                 flush_work(&hdev->rx_work);
1452
1453                 skb_queue_purge(&hdev->cmd_q);
1454                 skb_queue_purge(&hdev->rx_q);
1455
1456                 if (hdev->flush)
1457                         hdev->flush(hdev);
1458
1459                 if (hdev->sent_cmd) {
1460                         kfree_skb(hdev->sent_cmd);
1461                         hdev->sent_cmd = NULL;
1462                 }
1463
1464                 clear_bit(HCI_RUNNING, &hdev->flags);
1465                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1466
1467                 hdev->close(hdev);
1468                 hdev->flags &= BIT(HCI_RAW);
1469         }
1470
1471 done:
1472         hci_req_sync_unlock(hdev);
1473         return ret;
1474 }
1475
1476 /* ---- HCI ioctl helpers ---- */
1477
1478 int hci_dev_open(__u16 dev)
1479 {
1480         struct hci_dev *hdev;
1481         int err;
1482
1483         hdev = hci_dev_get(dev);
1484         if (!hdev)
1485                 return -ENODEV;
1486
1487         /* Devices that are marked as unconfigured can only be powered
1488          * up as user channel. Trying to bring them up as normal devices
1489          * will result into a failure. Only user channel operation is
1490          * possible.
1491          *
1492          * When this function is called for a user channel, the flag
1493          * HCI_USER_CHANNEL will be set first before attempting to
1494          * open the device.
1495          */
1496         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1497             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1498                 err = -EOPNOTSUPP;
1499                 goto done;
1500         }
1501
1502         /* We need to ensure that no other power on/off work is pending
1503          * before proceeding to call hci_dev_do_open. This is
1504          * particularly important if the setup procedure has not yet
1505          * completed.
1506          */
1507         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1508                 cancel_delayed_work(&hdev->power_off);
1509
1510         /* After this call it is guaranteed that the setup procedure
1511          * has finished. This means that error conditions like RFKILL
1512          * or no valid public or static random address apply.
1513          */
1514         flush_workqueue(hdev->req_workqueue);
1515
1516         /* For controllers not using the management interface and that
1517          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1518          * so that pairing works for them. Once the management interface
1519          * is in use this bit will be cleared again and userspace has
1520          * to explicitly enable it.
1521          */
1522         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1523             !hci_dev_test_flag(hdev, HCI_MGMT))
1524                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1525
1526         err = hci_dev_do_open(hdev);
1527
1528 done:
1529         hci_dev_put(hdev);
1530         return err;
1531 }
1532
1533 /* This function requires the caller holds hdev->lock */
1534 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1535 {
1536         struct hci_conn_params *p;
1537
1538         list_for_each_entry(p, &hdev->le_conn_params, list) {
1539                 if (p->conn) {
1540                         hci_conn_drop(p->conn);
1541                         hci_conn_put(p->conn);
1542                         p->conn = NULL;
1543                 }
1544                 list_del_init(&p->action);
1545         }
1546
1547         BT_DBG("All LE pending actions cleared");
1548 }
1549
1550 int hci_dev_do_close(struct hci_dev *hdev)
1551 {
1552         bool auto_off;
1553
1554         BT_DBG("%s %p", hdev->name, hdev);
1555
1556         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1557             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1558             test_bit(HCI_UP, &hdev->flags)) {
1559                 /* Execute vendor specific shutdown routine */
1560                 if (hdev->shutdown)
1561                         hdev->shutdown(hdev);
1562         }
1563
1564         cancel_delayed_work(&hdev->power_off);
1565
1566         hci_request_cancel_all(hdev);
1567         hci_req_sync_lock(hdev);
1568
1569         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1570                 cancel_delayed_work_sync(&hdev->cmd_timer);
1571                 hci_req_sync_unlock(hdev);
1572                 return 0;
1573         }
1574
1575         hci_leds_update_powered(hdev, false);
1576
1577         /* Flush RX and TX works */
1578         flush_work(&hdev->tx_work);
1579         flush_work(&hdev->rx_work);
1580
1581         if (hdev->discov_timeout > 0) {
1582                 hdev->discov_timeout = 0;
1583                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1584                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1585         }
1586
1587         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1588                 cancel_delayed_work(&hdev->service_cache);
1589
1590         if (hci_dev_test_flag(hdev, HCI_MGMT))
1591                 cancel_delayed_work_sync(&hdev->rpa_expired);
1592
1593         /* Avoid potential lockdep warnings from the *_flush() calls by
1594          * ensuring the workqueue is empty up front.
1595          */
1596         drain_workqueue(hdev->workqueue);
1597
1598         hci_dev_lock(hdev);
1599
1600         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1601
1602         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1603
1604         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1605             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1606             hci_dev_test_flag(hdev, HCI_MGMT))
1607                 __mgmt_power_off(hdev);
1608
1609         hci_inquiry_cache_flush(hdev);
1610         hci_pend_le_actions_clear(hdev);
1611         hci_conn_hash_flush(hdev);
1612         hci_dev_unlock(hdev);
1613
1614         smp_unregister(hdev);
1615
1616         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1617
1618         if (hdev->flush)
1619                 hdev->flush(hdev);
1620
1621         /* Reset device */
1622         skb_queue_purge(&hdev->cmd_q);
1623         atomic_set(&hdev->cmd_cnt, 1);
1624         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1625             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1626                 set_bit(HCI_INIT, &hdev->flags);
1627                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1628                 clear_bit(HCI_INIT, &hdev->flags);
1629         }
1630
1631         /* flush cmd  work */
1632         flush_work(&hdev->cmd_work);
1633
1634         /* Drop queues */
1635         skb_queue_purge(&hdev->rx_q);
1636         skb_queue_purge(&hdev->cmd_q);
1637         skb_queue_purge(&hdev->raw_q);
1638
1639         /* Drop last sent command */
1640         if (hdev->sent_cmd) {
1641                 cancel_delayed_work_sync(&hdev->cmd_timer);
1642                 kfree_skb(hdev->sent_cmd);
1643                 hdev->sent_cmd = NULL;
1644         }
1645
1646         clear_bit(HCI_RUNNING, &hdev->flags);
1647         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1648
1649         /* After this point our queues are empty
1650          * and no tasks are scheduled. */
1651         hdev->close(hdev);
1652
1653         /* Clear flags */
1654         hdev->flags &= BIT(HCI_RAW);
1655         hci_dev_clear_volatile_flags(hdev);
1656
1657         /* Controller radio is available but is currently powered down */
1658         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1659
1660         memset(hdev->eir, 0, sizeof(hdev->eir));
1661         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1662         bacpy(&hdev->random_addr, BDADDR_ANY);
1663
1664         hci_req_sync_unlock(hdev);
1665
1666         hci_dev_put(hdev);
1667         return 0;
1668 }
1669
1670 int hci_dev_close(__u16 dev)
1671 {
1672         struct hci_dev *hdev;
1673         int err;
1674
1675         hdev = hci_dev_get(dev);
1676         if (!hdev)
1677                 return -ENODEV;
1678
1679         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1680                 err = -EBUSY;
1681                 goto done;
1682         }
1683
1684         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1685                 cancel_delayed_work(&hdev->power_off);
1686
1687         err = hci_dev_do_close(hdev);
1688
1689 done:
1690         hci_dev_put(hdev);
1691         return err;
1692 }
1693
1694 static int hci_dev_do_reset(struct hci_dev *hdev)
1695 {
1696         int ret;
1697
1698         BT_DBG("%s %p", hdev->name, hdev);
1699
1700         hci_req_sync_lock(hdev);
1701
1702         /* Drop queues */
1703         skb_queue_purge(&hdev->rx_q);
1704         skb_queue_purge(&hdev->cmd_q);
1705
1706         /* Avoid potential lockdep warnings from the *_flush() calls by
1707          * ensuring the workqueue is empty up front.
1708          */
1709         drain_workqueue(hdev->workqueue);
1710
1711         hci_dev_lock(hdev);
1712         hci_inquiry_cache_flush(hdev);
1713         hci_conn_hash_flush(hdev);
1714         hci_dev_unlock(hdev);
1715
1716         if (hdev->flush)
1717                 hdev->flush(hdev);
1718
1719         atomic_set(&hdev->cmd_cnt, 1);
1720         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1721
1722         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1723
1724         hci_req_sync_unlock(hdev);
1725         return ret;
1726 }
1727
1728 int hci_dev_reset(__u16 dev)
1729 {
1730         struct hci_dev *hdev;
1731         int err;
1732
1733         hdev = hci_dev_get(dev);
1734         if (!hdev)
1735                 return -ENODEV;
1736
1737         if (!test_bit(HCI_UP, &hdev->flags)) {
1738                 err = -ENETDOWN;
1739                 goto done;
1740         }
1741
1742         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1743                 err = -EBUSY;
1744                 goto done;
1745         }
1746
1747         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1748                 err = -EOPNOTSUPP;
1749                 goto done;
1750         }
1751
1752         err = hci_dev_do_reset(hdev);
1753
1754 done:
1755         hci_dev_put(hdev);
1756         return err;
1757 }
1758
1759 int hci_dev_reset_stat(__u16 dev)
1760 {
1761         struct hci_dev *hdev;
1762         int ret = 0;
1763
1764         hdev = hci_dev_get(dev);
1765         if (!hdev)
1766                 return -ENODEV;
1767
1768         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1769                 ret = -EBUSY;
1770                 goto done;
1771         }
1772
1773         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1774                 ret = -EOPNOTSUPP;
1775                 goto done;
1776         }
1777
1778         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1779
1780 done:
1781         hci_dev_put(hdev);
1782         return ret;
1783 }
1784
1785 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1786 {
1787         bool conn_changed, discov_changed;
1788
1789         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1790
1791         if ((scan & SCAN_PAGE))
1792                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1793                                                           HCI_CONNECTABLE);
1794         else
1795                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1796                                                            HCI_CONNECTABLE);
1797
1798         if ((scan & SCAN_INQUIRY)) {
1799                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1800                                                             HCI_DISCOVERABLE);
1801         } else {
1802                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1803                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804                                                              HCI_DISCOVERABLE);
1805         }
1806
1807         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1808                 return;
1809
1810         if (conn_changed || discov_changed) {
1811                 /* In case this was disabled through mgmt */
1812                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1813
1814                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1815                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1816
1817                 mgmt_new_settings(hdev);
1818         }
1819 }
1820
1821 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1822 {
1823         struct hci_dev *hdev;
1824         struct hci_dev_req dr;
1825         int err = 0;
1826
1827         if (copy_from_user(&dr, arg, sizeof(dr)))
1828                 return -EFAULT;
1829
1830         hdev = hci_dev_get(dr.dev_id);
1831         if (!hdev)
1832                 return -ENODEV;
1833
1834         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1835                 err = -EBUSY;
1836                 goto done;
1837         }
1838
1839         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1840                 err = -EOPNOTSUPP;
1841                 goto done;
1842         }
1843
1844         if (hdev->dev_type != HCI_PRIMARY) {
1845                 err = -EOPNOTSUPP;
1846                 goto done;
1847         }
1848
1849         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1850                 err = -EOPNOTSUPP;
1851                 goto done;
1852         }
1853
1854         switch (cmd) {
1855         case HCISETAUTH:
1856                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1857                                    HCI_INIT_TIMEOUT, NULL);
1858                 break;
1859
1860         case HCISETENCRYPT:
1861                 if (!lmp_encrypt_capable(hdev)) {
1862                         err = -EOPNOTSUPP;
1863                         break;
1864                 }
1865
1866                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867                         /* Auth must be enabled first */
1868                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869                                            HCI_INIT_TIMEOUT, NULL);
1870                         if (err)
1871                                 break;
1872                 }
1873
1874                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1875                                    HCI_INIT_TIMEOUT, NULL);
1876                 break;
1877
1878         case HCISETSCAN:
1879                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1880                                    HCI_INIT_TIMEOUT, NULL);
1881
1882                 /* Ensure that the connectable and discoverable states
1883                  * get correctly modified as this was a non-mgmt change.
1884                  */
1885                 if (!err)
1886                         hci_update_scan_state(hdev, dr.dev_opt);
1887                 break;
1888
1889         case HCISETLINKPOL:
1890                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1891                                    HCI_INIT_TIMEOUT, NULL);
1892                 break;
1893
1894         case HCISETLINKMODE:
1895                 hdev->link_mode = ((__u16) dr.dev_opt) &
1896                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897                 break;
1898
1899         case HCISETPTYPE:
1900                 hdev->pkt_type = (__u16) dr.dev_opt;
1901                 break;
1902
1903         case HCISETACLMTU:
1904                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1905                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1906                 break;
1907
1908         case HCISETSCOMTU:
1909                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1910                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1911                 break;
1912
1913         default:
1914                 err = -EINVAL;
1915                 break;
1916         }
1917
1918 done:
1919         hci_dev_put(hdev);
1920         return err;
1921 }
1922
1923 int hci_get_dev_list(void __user *arg)
1924 {
1925         struct hci_dev *hdev;
1926         struct hci_dev_list_req *dl;
1927         struct hci_dev_req *dr;
1928         int n = 0, size, err;
1929         __u16 dev_num;
1930
1931         if (get_user(dev_num, (__u16 __user *) arg))
1932                 return -EFAULT;
1933
1934         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1935                 return -EINVAL;
1936
1937         size = sizeof(*dl) + dev_num * sizeof(*dr);
1938
1939         dl = kzalloc(size, GFP_KERNEL);
1940         if (!dl)
1941                 return -ENOMEM;
1942
1943         dr = dl->dev_req;
1944
1945         read_lock(&hci_dev_list_lock);
1946         list_for_each_entry(hdev, &hci_dev_list, list) {
1947                 unsigned long flags = hdev->flags;
1948
1949                 /* When the auto-off is configured it means the transport
1950                  * is running, but in that case still indicate that the
1951                  * device is actually down.
1952                  */
1953                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1954                         flags &= ~BIT(HCI_UP);
1955
1956                 (dr + n)->dev_id  = hdev->id;
1957                 (dr + n)->dev_opt = flags;
1958
1959                 if (++n >= dev_num)
1960                         break;
1961         }
1962         read_unlock(&hci_dev_list_lock);
1963
1964         dl->dev_num = n;
1965         size = sizeof(*dl) + n * sizeof(*dr);
1966
1967         err = copy_to_user(arg, dl, size);
1968         kfree(dl);
1969
1970         return err ? -EFAULT : 0;
1971 }
1972
1973 int hci_get_dev_info(void __user *arg)
1974 {
1975         struct hci_dev *hdev;
1976         struct hci_dev_info di;
1977         unsigned long flags;
1978         int err = 0;
1979
1980         if (copy_from_user(&di, arg, sizeof(di)))
1981                 return -EFAULT;
1982
1983         hdev = hci_dev_get(di.dev_id);
1984         if (!hdev)
1985                 return -ENODEV;
1986
1987         /* When the auto-off is configured it means the transport
1988          * is running, but in that case still indicate that the
1989          * device is actually down.
1990          */
1991         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1992                 flags = hdev->flags & ~BIT(HCI_UP);
1993         else
1994                 flags = hdev->flags;
1995
1996         strcpy(di.name, hdev->name);
1997         di.bdaddr   = hdev->bdaddr;
1998         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1999         di.flags    = flags;
2000         di.pkt_type = hdev->pkt_type;
2001         if (lmp_bredr_capable(hdev)) {
2002                 di.acl_mtu  = hdev->acl_mtu;
2003                 di.acl_pkts = hdev->acl_pkts;
2004                 di.sco_mtu  = hdev->sco_mtu;
2005                 di.sco_pkts = hdev->sco_pkts;
2006         } else {
2007                 di.acl_mtu  = hdev->le_mtu;
2008                 di.acl_pkts = hdev->le_pkts;
2009                 di.sco_mtu  = 0;
2010                 di.sco_pkts = 0;
2011         }
2012         di.link_policy = hdev->link_policy;
2013         di.link_mode   = hdev->link_mode;
2014
2015         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016         memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018         if (copy_to_user(arg, &di, sizeof(di)))
2019                 err = -EFAULT;
2020
2021         hci_dev_put(hdev);
2022
2023         return err;
2024 }
2025
2026 /* ---- Interface to HCI drivers ---- */
2027
2028 static int hci_rfkill_set_block(void *data, bool blocked)
2029 {
2030         struct hci_dev *hdev = data;
2031
2032         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
2034         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2035                 return -EBUSY;
2036
2037         if (blocked) {
2038                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2039                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2041                         hci_dev_do_close(hdev);
2042         } else {
2043                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2044         }
2045
2046         return 0;
2047 }
2048
2049 static const struct rfkill_ops hci_rfkill_ops = {
2050         .set_block = hci_rfkill_set_block,
2051 };
2052
2053 static void hci_power_on(struct work_struct *work)
2054 {
2055         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2056         int err;
2057
2058         BT_DBG("%s", hdev->name);
2059
2060         if (test_bit(HCI_UP, &hdev->flags) &&
2061             hci_dev_test_flag(hdev, HCI_MGMT) &&
2062             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2063                 cancel_delayed_work(&hdev->power_off);
2064                 hci_req_sync_lock(hdev);
2065                 err = __hci_req_hci_power_on(hdev);
2066                 hci_req_sync_unlock(hdev);
2067                 mgmt_power_on(hdev, err);
2068                 return;
2069         }
2070
2071         err = hci_dev_do_open(hdev);
2072         if (err < 0) {
2073                 hci_dev_lock(hdev);
2074                 mgmt_set_powered_failed(hdev, err);
2075                 hci_dev_unlock(hdev);
2076                 return;
2077         }
2078
2079         /* During the HCI setup phase, a few error conditions are
2080          * ignored and they need to be checked now. If they are still
2081          * valid, it is important to turn the device back off.
2082          */
2083         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2084             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2085             (hdev->dev_type == HCI_PRIMARY &&
2086              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2087              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2088                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2089                 hci_dev_do_close(hdev);
2090         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2091                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2092                                    HCI_AUTO_OFF_TIMEOUT);
2093         }
2094
2095         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2096                 /* For unconfigured devices, set the HCI_RAW flag
2097                  * so that userspace can easily identify them.
2098                  */
2099                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2100                         set_bit(HCI_RAW, &hdev->flags);
2101
2102                 /* For fully configured devices, this will send
2103                  * the Index Added event. For unconfigured devices,
2104                  * it will send Unconfigued Index Added event.
2105                  *
2106                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2107                  * and no event will be send.
2108                  */
2109                 mgmt_index_added(hdev);
2110         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2111                 /* When the controller is now configured, then it
2112                  * is important to clear the HCI_RAW flag.
2113                  */
2114                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2115                         clear_bit(HCI_RAW, &hdev->flags);
2116
2117                 /* Powering on the controller with HCI_CONFIG set only
2118                  * happens with the transition from unconfigured to
2119                  * configured. This will send the Index Added event.
2120                  */
2121                 mgmt_index_added(hdev);
2122         }
2123 }
2124
2125 static void hci_power_off(struct work_struct *work)
2126 {
2127         struct hci_dev *hdev = container_of(work, struct hci_dev,
2128                                             power_off.work);
2129
2130         BT_DBG("%s", hdev->name);
2131
2132         hci_dev_do_close(hdev);
2133 }
2134
2135 static void hci_error_reset(struct work_struct *work)
2136 {
2137         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2138
2139         BT_DBG("%s", hdev->name);
2140
2141         if (hdev->hw_error)
2142                 hdev->hw_error(hdev, hdev->hw_error_code);
2143         else
2144                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2145
2146         if (hci_dev_do_close(hdev))
2147                 return;
2148
2149         hci_dev_do_open(hdev);
2150 }
2151
2152 void hci_uuids_clear(struct hci_dev *hdev)
2153 {
2154         struct bt_uuid *uuid, *tmp;
2155
2156         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2157                 list_del(&uuid->list);
2158                 kfree(uuid);
2159         }
2160 }
2161
2162 void hci_link_keys_clear(struct hci_dev *hdev)
2163 {
2164         struct link_key *key;
2165
2166         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2167                 list_del_rcu(&key->list);
2168                 kfree_rcu(key, rcu);
2169         }
2170 }
2171
2172 void hci_smp_ltks_clear(struct hci_dev *hdev)
2173 {
2174         struct smp_ltk *k;
2175
2176         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2177                 list_del_rcu(&k->list);
2178                 kfree_rcu(k, rcu);
2179         }
2180 }
2181
2182 void hci_smp_irks_clear(struct hci_dev *hdev)
2183 {
2184         struct smp_irk *k;
2185
2186         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2187                 list_del_rcu(&k->list);
2188                 kfree_rcu(k, rcu);
2189         }
2190 }
2191
2192 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2193 {
2194         struct link_key *k;
2195
2196         rcu_read_lock();
2197         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2198                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2199                         rcu_read_unlock();
2200                         return k;
2201                 }
2202         }
2203         rcu_read_unlock();
2204
2205         return NULL;
2206 }
2207
2208 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2209                                u8 key_type, u8 old_key_type)
2210 {
2211         /* Legacy key */
2212         if (key_type < 0x03)
2213                 return true;
2214
2215         /* Debug keys are insecure so don't store them persistently */
2216         if (key_type == HCI_LK_DEBUG_COMBINATION)
2217                 return false;
2218
2219         /* Changed combination key and there's no previous one */
2220         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2221                 return false;
2222
2223         /* Security mode 3 case */
2224         if (!conn)
2225                 return true;
2226
2227         /* BR/EDR key derived using SC from an LE link */
2228         if (conn->type == LE_LINK)
2229                 return true;
2230
2231         /* Neither local nor remote side had no-bonding as requirement */
2232         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2233                 return true;
2234
2235         /* Local side had dedicated bonding as requirement */
2236         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2237                 return true;
2238
2239         /* Remote side had dedicated bonding as requirement */
2240         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2241                 return true;
2242
2243         /* If none of the above criteria match, then don't store the key
2244          * persistently */
2245         return false;
2246 }
2247
2248 static u8 ltk_role(u8 type)
2249 {
2250         if (type == SMP_LTK)
2251                 return HCI_ROLE_MASTER;
2252
2253         return HCI_ROLE_SLAVE;
2254 }
2255
2256 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2257                              u8 addr_type, u8 role)
2258 {
2259         struct smp_ltk *k;
2260
2261         rcu_read_lock();
2262         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2263                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2264                         continue;
2265
2266                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2267                         rcu_read_unlock();
2268                         return k;
2269                 }
2270         }
2271         rcu_read_unlock();
2272
2273         return NULL;
2274 }
2275
2276 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2277 {
2278         struct smp_irk *irk;
2279
2280         rcu_read_lock();
2281         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2282                 if (!bacmp(&irk->rpa, rpa)) {
2283                         rcu_read_unlock();
2284                         return irk;
2285                 }
2286         }
2287
2288         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2289                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2290                         bacpy(&irk->rpa, rpa);
2291                         rcu_read_unlock();
2292                         return irk;
2293                 }
2294         }
2295         rcu_read_unlock();
2296
2297         return NULL;
2298 }
2299
2300 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2301                                      u8 addr_type)
2302 {
2303         struct smp_irk *irk;
2304
2305         /* Identity Address must be public or static random */
2306         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2307                 return NULL;
2308
2309         rcu_read_lock();
2310         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2311                 if (addr_type == irk->addr_type &&
2312                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2313                         rcu_read_unlock();
2314                         return irk;
2315                 }
2316         }
2317         rcu_read_unlock();
2318
2319         return NULL;
2320 }
2321
2322 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2323                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2324                                   u8 pin_len, bool *persistent)
2325 {
2326         struct link_key *key, *old_key;
2327         u8 old_key_type;
2328
2329         old_key = hci_find_link_key(hdev, bdaddr);
2330         if (old_key) {
2331                 old_key_type = old_key->type;
2332                 key = old_key;
2333         } else {
2334                 old_key_type = conn ? conn->key_type : 0xff;
2335                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2336                 if (!key)
2337                         return NULL;
2338                 list_add_rcu(&key->list, &hdev->link_keys);
2339         }
2340
2341         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2342
2343         /* Some buggy controller combinations generate a changed
2344          * combination key for legacy pairing even when there's no
2345          * previous key */
2346         if (type == HCI_LK_CHANGED_COMBINATION &&
2347             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2348                 type = HCI_LK_COMBINATION;
2349                 if (conn)
2350                         conn->key_type = type;
2351         }
2352
2353         bacpy(&key->bdaddr, bdaddr);
2354         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2355         key->pin_len = pin_len;
2356
2357         if (type == HCI_LK_CHANGED_COMBINATION)
2358                 key->type = old_key_type;
2359         else
2360                 key->type = type;
2361
2362         if (persistent)
2363                 *persistent = hci_persistent_key(hdev, conn, type,
2364                                                  old_key_type);
2365
2366         return key;
2367 }
2368
2369 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2370                             u8 addr_type, u8 type, u8 authenticated,
2371                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2372 {
2373         struct smp_ltk *key, *old_key;
2374         u8 role = ltk_role(type);
2375
2376         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2377         if (old_key)
2378                 key = old_key;
2379         else {
2380                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2381                 if (!key)
2382                         return NULL;
2383                 list_add_rcu(&key->list, &hdev->long_term_keys);
2384         }
2385
2386         bacpy(&key->bdaddr, bdaddr);
2387         key->bdaddr_type = addr_type;
2388         memcpy(key->val, tk, sizeof(key->val));
2389         key->authenticated = authenticated;
2390         key->ediv = ediv;
2391         key->rand = rand;
2392         key->enc_size = enc_size;
2393         key->type = type;
2394
2395         return key;
2396 }
2397
2398 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2399                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2400 {
2401         struct smp_irk *irk;
2402
2403         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2404         if (!irk) {
2405                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2406                 if (!irk)
2407                         return NULL;
2408
2409                 bacpy(&irk->bdaddr, bdaddr);
2410                 irk->addr_type = addr_type;
2411
2412                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2413         }
2414
2415         memcpy(irk->val, val, 16);
2416         bacpy(&irk->rpa, rpa);
2417
2418         return irk;
2419 }
2420
2421 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2422 {
2423         struct link_key *key;
2424
2425         key = hci_find_link_key(hdev, bdaddr);
2426         if (!key)
2427                 return -ENOENT;
2428
2429         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2430
2431         list_del_rcu(&key->list);
2432         kfree_rcu(key, rcu);
2433
2434         return 0;
2435 }
2436
2437 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2438 {
2439         struct smp_ltk *k;
2440         int removed = 0;
2441
2442         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2443                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2444                         continue;
2445
2446                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2447
2448                 list_del_rcu(&k->list);
2449                 kfree_rcu(k, rcu);
2450                 removed++;
2451         }
2452
2453         return removed ? 0 : -ENOENT;
2454 }
2455
2456 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2457 {
2458         struct smp_irk *k;
2459
2460         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2461                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2462                         continue;
2463
2464                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2465
2466                 list_del_rcu(&k->list);
2467                 kfree_rcu(k, rcu);
2468         }
2469 }
2470
2471 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2472 {
2473         struct smp_ltk *k;
2474         struct smp_irk *irk;
2475         u8 addr_type;
2476
2477         if (type == BDADDR_BREDR) {
2478                 if (hci_find_link_key(hdev, bdaddr))
2479                         return true;
2480                 return false;
2481         }
2482
2483         /* Convert to HCI addr type which struct smp_ltk uses */
2484         if (type == BDADDR_LE_PUBLIC)
2485                 addr_type = ADDR_LE_DEV_PUBLIC;
2486         else
2487                 addr_type = ADDR_LE_DEV_RANDOM;
2488
2489         irk = hci_get_irk(hdev, bdaddr, addr_type);
2490         if (irk) {
2491                 bdaddr = &irk->bdaddr;
2492                 addr_type = irk->addr_type;
2493         }
2494
2495         rcu_read_lock();
2496         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2497                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2498                         rcu_read_unlock();
2499                         return true;
2500                 }
2501         }
2502         rcu_read_unlock();
2503
2504         return false;
2505 }
2506
2507 /* HCI command timer function */
2508 static void hci_cmd_timeout(struct work_struct *work)
2509 {
2510         struct hci_dev *hdev = container_of(work, struct hci_dev,
2511                                             cmd_timer.work);
2512
2513         if (hdev->sent_cmd) {
2514                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2515                 u16 opcode = __le16_to_cpu(sent->opcode);
2516
2517                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2518         } else {
2519                 bt_dev_err(hdev, "command tx timeout");
2520         }
2521
2522         atomic_set(&hdev->cmd_cnt, 1);
2523         queue_work(hdev->workqueue, &hdev->cmd_work);
2524 }
2525
2526 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2527                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2528 {
2529         struct oob_data *data;
2530
2531         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2532                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2533                         continue;
2534                 if (data->bdaddr_type != bdaddr_type)
2535                         continue;
2536                 return data;
2537         }
2538
2539         return NULL;
2540 }
2541
2542 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543                                u8 bdaddr_type)
2544 {
2545         struct oob_data *data;
2546
2547         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2548         if (!data)
2549                 return -ENOENT;
2550
2551         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2552
2553         list_del(&data->list);
2554         kfree(data);
2555
2556         return 0;
2557 }
2558
2559 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2560 {
2561         struct oob_data *data, *n;
2562
2563         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2564                 list_del(&data->list);
2565                 kfree(data);
2566         }
2567 }
2568
2569 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2570                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2571                             u8 *hash256, u8 *rand256)
2572 {
2573         struct oob_data *data;
2574
2575         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2576         if (!data) {
2577                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2578                 if (!data)
2579                         return -ENOMEM;
2580
2581                 bacpy(&data->bdaddr, bdaddr);
2582                 data->bdaddr_type = bdaddr_type;
2583                 list_add(&data->list, &hdev->remote_oob_data);
2584         }
2585
2586         if (hash192 && rand192) {
2587                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2588                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2589                 if (hash256 && rand256)
2590                         data->present = 0x03;
2591         } else {
2592                 memset(data->hash192, 0, sizeof(data->hash192));
2593                 memset(data->rand192, 0, sizeof(data->rand192));
2594                 if (hash256 && rand256)
2595                         data->present = 0x02;
2596                 else
2597                         data->present = 0x00;
2598         }
2599
2600         if (hash256 && rand256) {
2601                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2602                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2603         } else {
2604                 memset(data->hash256, 0, sizeof(data->hash256));
2605                 memset(data->rand256, 0, sizeof(data->rand256));
2606                 if (hash192 && rand192)
2607                         data->present = 0x01;
2608         }
2609
2610         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2611
2612         return 0;
2613 }
2614
2615 /* This function requires the caller holds hdev->lock */
2616 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2617 {
2618         struct adv_info *adv_instance;
2619
2620         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2621                 if (adv_instance->instance == instance)
2622                         return adv_instance;
2623         }
2624
2625         return NULL;
2626 }
2627
2628 /* This function requires the caller holds hdev->lock */
2629 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2630 {
2631         struct adv_info *cur_instance;
2632
2633         cur_instance = hci_find_adv_instance(hdev, instance);
2634         if (!cur_instance)
2635                 return NULL;
2636
2637         if (cur_instance == list_last_entry(&hdev->adv_instances,
2638                                             struct adv_info, list))
2639                 return list_first_entry(&hdev->adv_instances,
2640                                                  struct adv_info, list);
2641         else
2642                 return list_next_entry(cur_instance, list);
2643 }
2644
2645 /* This function requires the caller holds hdev->lock */
2646 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2647 {
2648         struct adv_info *adv_instance;
2649
2650         adv_instance = hci_find_adv_instance(hdev, instance);
2651         if (!adv_instance)
2652                 return -ENOENT;
2653
2654         BT_DBG("%s removing %dMR", hdev->name, instance);
2655
2656         if (hdev->cur_adv_instance == instance) {
2657                 if (hdev->adv_instance_timeout) {
2658                         cancel_delayed_work(&hdev->adv_instance_expire);
2659                         hdev->adv_instance_timeout = 0;
2660                 }
2661                 hdev->cur_adv_instance = 0x00;
2662         }
2663
2664         list_del(&adv_instance->list);
2665         kfree(adv_instance);
2666
2667         hdev->adv_instance_cnt--;
2668
2669         return 0;
2670 }
2671
2672 /* This function requires the caller holds hdev->lock */
2673 void hci_adv_instances_clear(struct hci_dev *hdev)
2674 {
2675         struct adv_info *adv_instance, *n;
2676
2677         if (hdev->adv_instance_timeout) {
2678                 cancel_delayed_work(&hdev->adv_instance_expire);
2679                 hdev->adv_instance_timeout = 0;
2680         }
2681
2682         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2683                 list_del(&adv_instance->list);
2684                 kfree(adv_instance);
2685         }
2686
2687         hdev->adv_instance_cnt = 0;
2688         hdev->cur_adv_instance = 0x00;
2689 }
2690
2691 /* This function requires the caller holds hdev->lock */
2692 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2693                          u16 adv_data_len, u8 *adv_data,
2694                          u16 scan_rsp_len, u8 *scan_rsp_data,
2695                          u16 timeout, u16 duration)
2696 {
2697         struct adv_info *adv_instance;
2698
2699         adv_instance = hci_find_adv_instance(hdev, instance);
2700         if (adv_instance) {
2701                 memset(adv_instance->adv_data, 0,
2702                        sizeof(adv_instance->adv_data));
2703                 memset(adv_instance->scan_rsp_data, 0,
2704                        sizeof(adv_instance->scan_rsp_data));
2705         } else {
2706                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2707                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2708                         return -EOVERFLOW;
2709
2710                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2711                 if (!adv_instance)
2712                         return -ENOMEM;
2713
2714                 adv_instance->pending = true;
2715                 adv_instance->instance = instance;
2716                 list_add(&adv_instance->list, &hdev->adv_instances);
2717                 hdev->adv_instance_cnt++;
2718         }
2719
2720         adv_instance->flags = flags;
2721         adv_instance->adv_data_len = adv_data_len;
2722         adv_instance->scan_rsp_len = scan_rsp_len;
2723
2724         if (adv_data_len)
2725                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2726
2727         if (scan_rsp_len)
2728                 memcpy(adv_instance->scan_rsp_data,
2729                        scan_rsp_data, scan_rsp_len);
2730
2731         adv_instance->timeout = timeout;
2732         adv_instance->remaining_time = timeout;
2733
2734         if (duration == 0)
2735                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2736         else
2737                 adv_instance->duration = duration;
2738
2739         BT_DBG("%s for %dMR", hdev->name, instance);
2740
2741         return 0;
2742 }
2743
2744 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2745                                          bdaddr_t *bdaddr, u8 type)
2746 {
2747         struct bdaddr_list *b;
2748
2749         list_for_each_entry(b, bdaddr_list, list) {
2750                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2751                         return b;
2752         }
2753
2754         return NULL;
2755 }
2756
2757 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2758 {
2759         struct bdaddr_list *b, *n;
2760
2761         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2762                 list_del(&b->list);
2763                 kfree(b);
2764         }
2765 }
2766
2767 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2768 {
2769         struct bdaddr_list *entry;
2770
2771         if (!bacmp(bdaddr, BDADDR_ANY))
2772                 return -EBADF;
2773
2774         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2775                 return -EEXIST;
2776
2777         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2778         if (!entry)
2779                 return -ENOMEM;
2780
2781         bacpy(&entry->bdaddr, bdaddr);
2782         entry->bdaddr_type = type;
2783
2784         list_add(&entry->list, list);
2785
2786         return 0;
2787 }
2788
2789 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2790 {
2791         struct bdaddr_list *entry;
2792
2793         if (!bacmp(bdaddr, BDADDR_ANY)) {
2794                 hci_bdaddr_list_clear(list);
2795                 return 0;
2796         }
2797
2798         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2799         if (!entry)
2800                 return -ENOENT;
2801
2802         list_del(&entry->list);
2803         kfree(entry);
2804
2805         return 0;
2806 }
2807
2808 /* This function requires the caller holds hdev->lock */
2809 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2810                                                bdaddr_t *addr, u8 addr_type)
2811 {
2812         struct hci_conn_params *params;
2813
2814         list_for_each_entry(params, &hdev->le_conn_params, list) {
2815                 if (bacmp(&params->addr, addr) == 0 &&
2816                     params->addr_type == addr_type) {
2817                         return params;
2818                 }
2819         }
2820
2821         return NULL;
2822 }
2823
2824 /* This function requires the caller holds hdev->lock */
2825 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2826                                                   bdaddr_t *addr, u8 addr_type)
2827 {
2828         struct hci_conn_params *param;
2829
2830         list_for_each_entry(param, list, action) {
2831                 if (bacmp(&param->addr, addr) == 0 &&
2832                     param->addr_type == addr_type)
2833                         return param;
2834         }
2835
2836         return NULL;
2837 }
2838
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2841                                             bdaddr_t *addr, u8 addr_type)
2842 {
2843         struct hci_conn_params *params;
2844
2845         params = hci_conn_params_lookup(hdev, addr, addr_type);
2846         if (params)
2847                 return params;
2848
2849         params = kzalloc(sizeof(*params), GFP_KERNEL);
2850         if (!params) {
2851                 bt_dev_err(hdev, "out of memory");
2852                 return NULL;
2853         }
2854
2855         bacpy(&params->addr, addr);
2856         params->addr_type = addr_type;
2857
2858         list_add(&params->list, &hdev->le_conn_params);
2859         INIT_LIST_HEAD(&params->action);
2860
2861         params->conn_min_interval = hdev->le_conn_min_interval;
2862         params->conn_max_interval = hdev->le_conn_max_interval;
2863         params->conn_latency = hdev->le_conn_latency;
2864         params->supervision_timeout = hdev->le_supv_timeout;
2865         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2866
2867         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2868
2869         return params;
2870 }
2871
2872 static void hci_conn_params_free(struct hci_conn_params *params)
2873 {
2874         if (params->conn) {
2875                 hci_conn_drop(params->conn);
2876                 hci_conn_put(params->conn);
2877         }
2878
2879         list_del(&params->action);
2880         list_del(&params->list);
2881         kfree(params);
2882 }
2883
2884 /* This function requires the caller holds hdev->lock */
2885 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2886 {
2887         struct hci_conn_params *params;
2888
2889         params = hci_conn_params_lookup(hdev, addr, addr_type);
2890         if (!params)
2891                 return;
2892
2893         hci_conn_params_free(params);
2894
2895         hci_update_background_scan(hdev);
2896
2897         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2898 }
2899
2900 /* This function requires the caller holds hdev->lock */
2901 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2902 {
2903         struct hci_conn_params *params, *tmp;
2904
2905         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2906                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2907                         continue;
2908
2909                 /* If trying to estabilish one time connection to disabled
2910                  * device, leave the params, but mark them as just once.
2911                  */
2912                 if (params->explicit_connect) {
2913                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2914                         continue;
2915                 }
2916
2917                 list_del(&params->list);
2918                 kfree(params);
2919         }
2920
2921         BT_DBG("All LE disabled connection parameters were removed");
2922 }
2923
2924 /* This function requires the caller holds hdev->lock */
2925 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2926 {
2927         struct hci_conn_params *params, *tmp;
2928
2929         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2930                 hci_conn_params_free(params);
2931
2932         BT_DBG("All LE connection parameters were removed");
2933 }
2934
2935 /* Copy the Identity Address of the controller.
2936  *
2937  * If the controller has a public BD_ADDR, then by default use that one.
2938  * If this is a LE only controller without a public address, default to
2939  * the static random address.
2940  *
2941  * For debugging purposes it is possible to force controllers with a
2942  * public address to use the static random address instead.
2943  *
2944  * In case BR/EDR has been disabled on a dual-mode controller and
2945  * userspace has configured a static address, then that address
2946  * becomes the identity address instead of the public BR/EDR address.
2947  */
2948 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2949                                u8 *bdaddr_type)
2950 {
2951         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2952             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2953             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2954              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2955                 bacpy(bdaddr, &hdev->static_addr);
2956                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2957         } else {
2958                 bacpy(bdaddr, &hdev->bdaddr);
2959                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2960         }
2961 }
2962
2963 /* Alloc HCI device */
2964 struct hci_dev *hci_alloc_dev(void)
2965 {
2966         struct hci_dev *hdev;
2967
2968         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2969         if (!hdev)
2970                 return NULL;
2971
2972         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2973         hdev->esco_type = (ESCO_HV1);
2974         hdev->link_mode = (HCI_LM_ACCEPT);
2975         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2976         hdev->io_capability = 0x03;     /* No Input No Output */
2977         hdev->manufacturer = 0xffff;    /* Default to internal use */
2978         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2979         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2980         hdev->adv_instance_cnt = 0;
2981         hdev->cur_adv_instance = 0x00;
2982         hdev->adv_instance_timeout = 0;
2983
2984         hdev->sniff_max_interval = 800;
2985         hdev->sniff_min_interval = 80;
2986
2987         hdev->le_adv_channel_map = 0x07;
2988         hdev->le_adv_min_interval = 0x0800;
2989         hdev->le_adv_max_interval = 0x0800;
2990         hdev->le_scan_interval = 0x0060;
2991         hdev->le_scan_window = 0x0030;
2992         hdev->le_conn_min_interval = 0x0018;
2993         hdev->le_conn_max_interval = 0x0028;
2994         hdev->le_conn_latency = 0x0000;
2995         hdev->le_supv_timeout = 0x002a;
2996         hdev->le_def_tx_len = 0x001b;
2997         hdev->le_def_tx_time = 0x0148;
2998         hdev->le_max_tx_len = 0x001b;
2999         hdev->le_max_tx_time = 0x0148;
3000         hdev->le_max_rx_len = 0x001b;
3001         hdev->le_max_rx_time = 0x0148;
3002
3003         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3004         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3005         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3006         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3007
3008         mutex_init(&hdev->lock);
3009         mutex_init(&hdev->req_lock);
3010
3011         INIT_LIST_HEAD(&hdev->mgmt_pending);
3012         INIT_LIST_HEAD(&hdev->blacklist);
3013         INIT_LIST_HEAD(&hdev->whitelist);
3014         INIT_LIST_HEAD(&hdev->uuids);
3015         INIT_LIST_HEAD(&hdev->link_keys);
3016         INIT_LIST_HEAD(&hdev->long_term_keys);
3017         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3018         INIT_LIST_HEAD(&hdev->remote_oob_data);
3019         INIT_LIST_HEAD(&hdev->le_white_list);
3020         INIT_LIST_HEAD(&hdev->le_conn_params);
3021         INIT_LIST_HEAD(&hdev->pend_le_conns);
3022         INIT_LIST_HEAD(&hdev->pend_le_reports);
3023         INIT_LIST_HEAD(&hdev->conn_hash.list);
3024         INIT_LIST_HEAD(&hdev->adv_instances);
3025
3026         INIT_WORK(&hdev->rx_work, hci_rx_work);
3027         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3028         INIT_WORK(&hdev->tx_work, hci_tx_work);
3029         INIT_WORK(&hdev->power_on, hci_power_on);
3030         INIT_WORK(&hdev->error_reset, hci_error_reset);
3031
3032         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3033
3034         skb_queue_head_init(&hdev->rx_q);
3035         skb_queue_head_init(&hdev->cmd_q);
3036         skb_queue_head_init(&hdev->raw_q);
3037
3038         init_waitqueue_head(&hdev->req_wait_q);
3039
3040         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3041
3042         hci_request_setup(hdev);
3043
3044         hci_init_sysfs(hdev);
3045         discovery_init(hdev);
3046
3047         return hdev;
3048 }
3049 EXPORT_SYMBOL(hci_alloc_dev);
3050
3051 /* Free HCI device */
3052 void hci_free_dev(struct hci_dev *hdev)
3053 {
3054         /* will free via device release */
3055         put_device(&hdev->dev);
3056 }
3057 EXPORT_SYMBOL(hci_free_dev);
3058
3059 /* Register HCI device */
3060 int hci_register_dev(struct hci_dev *hdev)
3061 {
3062         int id, error;
3063
3064         if (!hdev->open || !hdev->close || !hdev->send)
3065                 return -EINVAL;
3066
3067         /* Do not allow HCI_AMP devices to register at index 0,
3068          * so the index can be used as the AMP controller ID.
3069          */
3070         switch (hdev->dev_type) {
3071         case HCI_PRIMARY:
3072                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3073                 break;
3074         case HCI_AMP:
3075                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3076                 break;
3077         default:
3078                 return -EINVAL;
3079         }
3080
3081         if (id < 0)
3082                 return id;
3083
3084         sprintf(hdev->name, "hci%d", id);
3085         hdev->id = id;
3086
3087         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3088
3089         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3090         if (!hdev->workqueue) {
3091                 error = -ENOMEM;
3092                 goto err;
3093         }
3094
3095         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3096                                                       hdev->name);
3097         if (!hdev->req_workqueue) {
3098                 destroy_workqueue(hdev->workqueue);
3099                 error = -ENOMEM;
3100                 goto err;
3101         }
3102
3103         if (!IS_ERR_OR_NULL(bt_debugfs))
3104                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3105
3106         dev_set_name(&hdev->dev, "%s", hdev->name);
3107
3108         error = device_add(&hdev->dev);
3109         if (error < 0)
3110                 goto err_wqueue;
3111
3112         hci_leds_init(hdev);
3113
3114         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3115                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3116                                     hdev);
3117         if (hdev->rfkill) {
3118                 if (rfkill_register(hdev->rfkill) < 0) {
3119                         rfkill_destroy(hdev->rfkill);
3120                         hdev->rfkill = NULL;
3121                 }
3122         }
3123
3124         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3125                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3126
3127         hci_dev_set_flag(hdev, HCI_SETUP);
3128         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3129
3130         if (hdev->dev_type == HCI_PRIMARY) {
3131                 /* Assume BR/EDR support until proven otherwise (such as
3132                  * through reading supported features during init.
3133                  */
3134                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3135         }
3136
3137         write_lock(&hci_dev_list_lock);
3138         list_add(&hdev->list, &hci_dev_list);
3139         write_unlock(&hci_dev_list_lock);
3140
3141         /* Devices that are marked for raw-only usage are unconfigured
3142          * and should not be included in normal operation.
3143          */
3144         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3145                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3146
3147         hci_sock_dev_event(hdev, HCI_DEV_REG);
3148         hci_dev_hold(hdev);
3149
3150         queue_work(hdev->req_workqueue, &hdev->power_on);
3151
3152         return id;
3153
3154 err_wqueue:
3155         destroy_workqueue(hdev->workqueue);
3156         destroy_workqueue(hdev->req_workqueue);
3157 err:
3158         ida_simple_remove(&hci_index_ida, hdev->id);
3159
3160         return error;
3161 }
3162 EXPORT_SYMBOL(hci_register_dev);
3163
3164 /* Unregister HCI device */
3165 void hci_unregister_dev(struct hci_dev *hdev)
3166 {
3167         int id;
3168
3169         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3170
3171         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3172
3173         id = hdev->id;
3174
3175         write_lock(&hci_dev_list_lock);
3176         list_del(&hdev->list);
3177         write_unlock(&hci_dev_list_lock);
3178
3179         cancel_work_sync(&hdev->power_on);
3180
3181         hci_dev_do_close(hdev);
3182
3183         if (!test_bit(HCI_INIT, &hdev->flags) &&
3184             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3185             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3186                 hci_dev_lock(hdev);
3187                 mgmt_index_removed(hdev);
3188                 hci_dev_unlock(hdev);
3189         }
3190
3191         /* mgmt_index_removed should take care of emptying the
3192          * pending list */
3193         BUG_ON(!list_empty(&hdev->mgmt_pending));
3194
3195         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3196
3197         if (hdev->rfkill) {
3198                 rfkill_unregister(hdev->rfkill);
3199                 rfkill_destroy(hdev->rfkill);
3200         }
3201
3202         device_del(&hdev->dev);
3203
3204         debugfs_remove_recursive(hdev->debugfs);
3205         kfree_const(hdev->hw_info);
3206         kfree_const(hdev->fw_info);
3207
3208         destroy_workqueue(hdev->workqueue);
3209         destroy_workqueue(hdev->req_workqueue);
3210
3211         hci_dev_lock(hdev);
3212         hci_bdaddr_list_clear(&hdev->blacklist);
3213         hci_bdaddr_list_clear(&hdev->whitelist);
3214         hci_uuids_clear(hdev);
3215         hci_link_keys_clear(hdev);
3216         hci_smp_ltks_clear(hdev);
3217         hci_smp_irks_clear(hdev);
3218         hci_remote_oob_data_clear(hdev);
3219         hci_adv_instances_clear(hdev);
3220         hci_bdaddr_list_clear(&hdev->le_white_list);
3221         hci_conn_params_clear_all(hdev);
3222         hci_discovery_filter_clear(hdev);
3223         hci_dev_unlock(hdev);
3224
3225         hci_dev_put(hdev);
3226
3227         ida_simple_remove(&hci_index_ida, id);
3228 }
3229 EXPORT_SYMBOL(hci_unregister_dev);
3230
3231 /* Suspend HCI device */
3232 int hci_suspend_dev(struct hci_dev *hdev)
3233 {
3234         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3235         return 0;
3236 }
3237 EXPORT_SYMBOL(hci_suspend_dev);
3238
3239 /* Resume HCI device */
3240 int hci_resume_dev(struct hci_dev *hdev)
3241 {
3242         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3243         return 0;
3244 }
3245 EXPORT_SYMBOL(hci_resume_dev);
3246
3247 /* Reset HCI device */
3248 int hci_reset_dev(struct hci_dev *hdev)
3249 {
3250         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3251         struct sk_buff *skb;
3252
3253         skb = bt_skb_alloc(3, GFP_ATOMIC);
3254         if (!skb)
3255                 return -ENOMEM;
3256
3257         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3258         skb_put_data(skb, hw_err, 3);
3259
3260         /* Send Hardware Error to upper stack */
3261         return hci_recv_frame(hdev, skb);
3262 }
3263 EXPORT_SYMBOL(hci_reset_dev);
3264
3265 /* Receive frame from HCI drivers */
3266 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3267 {
3268         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3269                       && !test_bit(HCI_INIT, &hdev->flags))) {
3270                 kfree_skb(skb);
3271                 return -ENXIO;
3272         }
3273
3274         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3275             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3276             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3277                 kfree_skb(skb);
3278                 return -EINVAL;
3279         }
3280
3281         /* Incoming skb */
3282         bt_cb(skb)->incoming = 1;
3283
3284         /* Time stamp */
3285         __net_timestamp(skb);
3286
3287         skb_queue_tail(&hdev->rx_q, skb);
3288         queue_work(hdev->workqueue, &hdev->rx_work);
3289
3290         return 0;
3291 }
3292 EXPORT_SYMBOL(hci_recv_frame);
3293
3294 /* Receive diagnostic message from HCI drivers */
3295 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3296 {
3297         /* Mark as diagnostic packet */
3298         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3299
3300         /* Time stamp */
3301         __net_timestamp(skb);
3302
3303         skb_queue_tail(&hdev->rx_q, skb);
3304         queue_work(hdev->workqueue, &hdev->rx_work);
3305
3306         return 0;
3307 }
3308 EXPORT_SYMBOL(hci_recv_diag);
3309
3310 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3311 {
3312         va_list vargs;
3313
3314         va_start(vargs, fmt);
3315         kfree_const(hdev->hw_info);
3316         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3317         va_end(vargs);
3318 }
3319 EXPORT_SYMBOL(hci_set_hw_info);
3320
3321 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3322 {
3323         va_list vargs;
3324
3325         va_start(vargs, fmt);
3326         kfree_const(hdev->fw_info);
3327         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3328         va_end(vargs);
3329 }
3330 EXPORT_SYMBOL(hci_set_fw_info);
3331
3332 /* ---- Interface to upper protocols ---- */
3333
3334 int hci_register_cb(struct hci_cb *cb)
3335 {
3336         BT_DBG("%p name %s", cb, cb->name);
3337
3338         mutex_lock(&hci_cb_list_lock);
3339         list_add_tail(&cb->list, &hci_cb_list);
3340         mutex_unlock(&hci_cb_list_lock);
3341
3342         return 0;
3343 }
3344 EXPORT_SYMBOL(hci_register_cb);
3345
3346 int hci_unregister_cb(struct hci_cb *cb)
3347 {
3348         BT_DBG("%p name %s", cb, cb->name);
3349
3350         mutex_lock(&hci_cb_list_lock);
3351         list_del(&cb->list);
3352         mutex_unlock(&hci_cb_list_lock);
3353
3354         return 0;
3355 }
3356 EXPORT_SYMBOL(hci_unregister_cb);
3357
3358 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3359 {
3360         int err;
3361
3362         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3363                skb->len);
3364
3365         /* Time stamp */
3366         __net_timestamp(skb);
3367
3368         /* Send copy to monitor */
3369         hci_send_to_monitor(hdev, skb);
3370
3371         if (atomic_read(&hdev->promisc)) {
3372                 /* Send copy to the sockets */
3373                 hci_send_to_sock(hdev, skb);
3374         }
3375
3376         /* Get rid of skb owner, prior to sending to the driver. */
3377         skb_orphan(skb);
3378
3379         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3380                 kfree_skb(skb);
3381                 return;
3382         }
3383
3384         err = hdev->send(hdev, skb);
3385         if (err < 0) {
3386                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3387                 kfree_skb(skb);
3388         }
3389 }
3390
3391 /* Send HCI command */
3392 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3393                  const void *param)
3394 {
3395         struct sk_buff *skb;
3396
3397         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3398
3399         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3400         if (!skb) {
3401                 bt_dev_err(hdev, "no memory for command");
3402                 return -ENOMEM;
3403         }
3404
3405         /* Stand-alone HCI commands must be flagged as
3406          * single-command requests.
3407          */
3408         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3409
3410         skb_queue_tail(&hdev->cmd_q, skb);
3411         queue_work(hdev->workqueue, &hdev->cmd_work);
3412
3413         return 0;
3414 }
3415
3416 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3417                    const void *param)
3418 {
3419         struct sk_buff *skb;
3420
3421         if (hci_opcode_ogf(opcode) != 0x3f) {
3422                 /* A controller receiving a command shall respond with either
3423                  * a Command Status Event or a Command Complete Event.
3424                  * Therefore, all standard HCI commands must be sent via the
3425                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3426                  * Some vendors do not comply with this rule for vendor-specific
3427                  * commands and do not return any event. We want to support
3428                  * unresponded commands for such cases only.
3429                  */
3430                 bt_dev_err(hdev, "unresponded command not supported");
3431                 return -EINVAL;
3432         }
3433
3434         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3435         if (!skb) {
3436                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3437                            opcode);
3438                 return -ENOMEM;
3439         }
3440
3441         hci_send_frame(hdev, skb);
3442
3443         return 0;
3444 }
3445 EXPORT_SYMBOL(__hci_cmd_send);
3446
3447 /* Get data from the previously sent command */
3448 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3449 {
3450         struct hci_command_hdr *hdr;
3451
3452         if (!hdev->sent_cmd)
3453                 return NULL;
3454
3455         hdr = (void *) hdev->sent_cmd->data;
3456
3457         if (hdr->opcode != cpu_to_le16(opcode))
3458                 return NULL;
3459
3460         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3461
3462         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3463 }
3464
3465 /* Send HCI command and wait for command commplete event */
3466 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3467                              const void *param, u32 timeout)
3468 {
3469         struct sk_buff *skb;
3470
3471         if (!test_bit(HCI_UP, &hdev->flags))
3472                 return ERR_PTR(-ENETDOWN);
3473
3474         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3475
3476         hci_req_sync_lock(hdev);
3477         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3478         hci_req_sync_unlock(hdev);
3479
3480         return skb;
3481 }
3482 EXPORT_SYMBOL(hci_cmd_sync);
3483
3484 /* Send ACL data */
3485 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3486 {
3487         struct hci_acl_hdr *hdr;
3488         int len = skb->len;
3489
3490         skb_push(skb, HCI_ACL_HDR_SIZE);
3491         skb_reset_transport_header(skb);
3492         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3493         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3494         hdr->dlen   = cpu_to_le16(len);
3495 }
3496
3497 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3498                           struct sk_buff *skb, __u16 flags)
3499 {
3500         struct hci_conn *conn = chan->conn;
3501         struct hci_dev *hdev = conn->hdev;
3502         struct sk_buff *list;
3503
3504         skb->len = skb_headlen(skb);
3505         skb->data_len = 0;
3506
3507         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3508
3509         switch (hdev->dev_type) {
3510         case HCI_PRIMARY:
3511                 hci_add_acl_hdr(skb, conn->handle, flags);
3512                 break;
3513         case HCI_AMP:
3514                 hci_add_acl_hdr(skb, chan->handle, flags);
3515                 break;
3516         default:
3517                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3518                 return;
3519         }
3520
3521         list = skb_shinfo(skb)->frag_list;
3522         if (!list) {
3523                 /* Non fragmented */
3524                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3525
3526                 skb_queue_tail(queue, skb);
3527         } else {
3528                 /* Fragmented */
3529                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3530
3531                 skb_shinfo(skb)->frag_list = NULL;
3532
3533                 /* Queue all fragments atomically. We need to use spin_lock_bh
3534                  * here because of 6LoWPAN links, as there this function is
3535                  * called from softirq and using normal spin lock could cause
3536                  * deadlocks.
3537                  */
3538                 spin_lock_bh(&queue->lock);
3539
3540                 __skb_queue_tail(queue, skb);
3541
3542                 flags &= ~ACL_START;
3543                 flags |= ACL_CONT;
3544                 do {
3545                         skb = list; list = list->next;
3546
3547                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3548                         hci_add_acl_hdr(skb, conn->handle, flags);
3549
3550                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3551
3552                         __skb_queue_tail(queue, skb);
3553                 } while (list);
3554
3555                 spin_unlock_bh(&queue->lock);
3556         }
3557 }
3558
3559 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3560 {
3561         struct hci_dev *hdev = chan->conn->hdev;
3562
3563         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3564
3565         hci_queue_acl(chan, &chan->data_q, skb, flags);
3566
3567         queue_work(hdev->workqueue, &hdev->tx_work);
3568 }
3569
3570 /* Send SCO data */
3571 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3572 {
3573         struct hci_dev *hdev = conn->hdev;
3574         struct hci_sco_hdr hdr;
3575
3576         BT_DBG("%s len %d", hdev->name, skb->len);
3577
3578         hdr.handle = cpu_to_le16(conn->handle);
3579         hdr.dlen   = skb->len;
3580
3581         skb_push(skb, HCI_SCO_HDR_SIZE);
3582         skb_reset_transport_header(skb);
3583         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3584
3585         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3586
3587         skb_queue_tail(&conn->data_q, skb);
3588         queue_work(hdev->workqueue, &hdev->tx_work);
3589 }
3590
3591 /* ---- HCI TX task (outgoing data) ---- */
3592
3593 /* HCI Connection scheduler */
3594 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3595                                      int *quote)
3596 {
3597         struct hci_conn_hash *h = &hdev->conn_hash;
3598         struct hci_conn *conn = NULL, *c;
3599         unsigned int num = 0, min = ~0;
3600
3601         /* We don't have to lock device here. Connections are always
3602          * added and removed with TX task disabled. */
3603
3604         rcu_read_lock();
3605
3606         list_for_each_entry_rcu(c, &h->list, list) {
3607                 if (c->type != type || skb_queue_empty(&c->data_q))
3608                         continue;
3609
3610                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3611                         continue;
3612
3613                 num++;
3614
3615                 if (c->sent < min) {
3616                         min  = c->sent;
3617                         conn = c;
3618                 }
3619
3620                 if (hci_conn_num(hdev, type) == num)
3621                         break;
3622         }
3623
3624         rcu_read_unlock();
3625
3626         if (conn) {
3627                 int cnt, q;
3628
3629                 switch (conn->type) {
3630                 case ACL_LINK:
3631                         cnt = hdev->acl_cnt;
3632                         break;
3633                 case SCO_LINK:
3634                 case ESCO_LINK:
3635                         cnt = hdev->sco_cnt;
3636                         break;
3637                 case LE_LINK:
3638                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3639                         break;
3640                 default:
3641                         cnt = 0;
3642                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3643                 }
3644
3645                 q = cnt / num;
3646                 *quote = q ? q : 1;
3647         } else
3648                 *quote = 0;
3649
3650         BT_DBG("conn %p quote %d", conn, *quote);
3651         return conn;
3652 }
3653
3654 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3655 {
3656         struct hci_conn_hash *h = &hdev->conn_hash;
3657         struct hci_conn *c;
3658
3659         bt_dev_err(hdev, "link tx timeout");
3660
3661         rcu_read_lock();
3662
3663         /* Kill stalled connections */
3664         list_for_each_entry_rcu(c, &h->list, list) {
3665                 if (c->type == type && c->sent) {
3666                         bt_dev_err(hdev, "killing stalled connection %pMR",
3667                                    &c->dst);
3668                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3669                 }
3670         }
3671
3672         rcu_read_unlock();
3673 }
3674
3675 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3676                                       int *quote)
3677 {
3678         struct hci_conn_hash *h = &hdev->conn_hash;
3679         struct hci_chan *chan = NULL;
3680         unsigned int num = 0, min = ~0, cur_prio = 0;
3681         struct hci_conn *conn;
3682         int cnt, q, conn_num = 0;
3683
3684         BT_DBG("%s", hdev->name);
3685
3686         rcu_read_lock();
3687
3688         list_for_each_entry_rcu(conn, &h->list, list) {
3689                 struct hci_chan *tmp;
3690
3691                 if (conn->type != type)
3692                         continue;
3693
3694                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3695                         continue;
3696
3697                 conn_num++;
3698
3699                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3700                         struct sk_buff *skb;
3701
3702                         if (skb_queue_empty(&tmp->data_q))
3703                                 continue;
3704
3705                         skb = skb_peek(&tmp->data_q);
3706                         if (skb->priority < cur_prio)
3707                                 continue;
3708
3709                         if (skb->priority > cur_prio) {
3710                                 num = 0;
3711                                 min = ~0;
3712                                 cur_prio = skb->priority;
3713                         }
3714
3715                         num++;
3716
3717                         if (conn->sent < min) {
3718                                 min  = conn->sent;
3719                                 chan = tmp;
3720                         }
3721                 }
3722
3723                 if (hci_conn_num(hdev, type) == conn_num)
3724                         break;
3725         }
3726
3727         rcu_read_unlock();
3728
3729         if (!chan)
3730                 return NULL;
3731
3732         switch (chan->conn->type) {
3733         case ACL_LINK:
3734                 cnt = hdev->acl_cnt;
3735                 break;
3736         case AMP_LINK:
3737                 cnt = hdev->block_cnt;
3738                 break;
3739         case SCO_LINK:
3740         case ESCO_LINK:
3741                 cnt = hdev->sco_cnt;
3742                 break;
3743         case LE_LINK:
3744                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3745                 break;
3746         default:
3747                 cnt = 0;
3748                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3749         }
3750
3751         q = cnt / num;
3752         *quote = q ? q : 1;
3753         BT_DBG("chan %p quote %d", chan, *quote);
3754         return chan;
3755 }
3756
3757 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3758 {
3759         struct hci_conn_hash *h = &hdev->conn_hash;
3760         struct hci_conn *conn;
3761         int num = 0;
3762
3763         BT_DBG("%s", hdev->name);
3764
3765         rcu_read_lock();
3766
3767         list_for_each_entry_rcu(conn, &h->list, list) {
3768                 struct hci_chan *chan;
3769
3770                 if (conn->type != type)
3771                         continue;
3772
3773                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3774                         continue;
3775
3776                 num++;
3777
3778                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3779                         struct sk_buff *skb;
3780
3781                         if (chan->sent) {
3782                                 chan->sent = 0;
3783                                 continue;
3784                         }
3785
3786                         if (skb_queue_empty(&chan->data_q))
3787                                 continue;
3788
3789                         skb = skb_peek(&chan->data_q);
3790                         if (skb->priority >= HCI_PRIO_MAX - 1)
3791                                 continue;
3792
3793                         skb->priority = HCI_PRIO_MAX - 1;
3794
3795                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3796                                skb->priority);
3797                 }
3798
3799                 if (hci_conn_num(hdev, type) == num)
3800                         break;
3801         }
3802
3803         rcu_read_unlock();
3804
3805 }
3806
3807 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3808 {
3809         /* Calculate count of blocks used by this packet */
3810         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3811 }
3812
3813 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3814 {
3815         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3816                 /* ACL tx timeout must be longer than maximum
3817                  * link supervision timeout (40.9 seconds) */
3818                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3819                                        HCI_ACL_TX_TIMEOUT))
3820                         hci_link_tx_to(hdev, ACL_LINK);
3821         }
3822 }
3823
3824 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3825 {
3826         unsigned int cnt = hdev->acl_cnt;
3827         struct hci_chan *chan;
3828         struct sk_buff *skb;
3829         int quote;
3830
3831         __check_timeout(hdev, cnt);
3832
3833         while (hdev->acl_cnt &&
3834                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3835                 u32 priority = (skb_peek(&chan->data_q))->priority;
3836                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3837                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3838                                skb->len, skb->priority);
3839
3840                         /* Stop if priority has changed */
3841                         if (skb->priority < priority)
3842                                 break;
3843
3844                         skb = skb_dequeue(&chan->data_q);
3845
3846                         hci_conn_enter_active_mode(chan->conn,
3847                                                    bt_cb(skb)->force_active);
3848
3849                         hci_send_frame(hdev, skb);
3850                         hdev->acl_last_tx = jiffies;
3851
3852                         hdev->acl_cnt--;
3853                         chan->sent++;
3854                         chan->conn->sent++;
3855                 }
3856         }
3857
3858         if (cnt != hdev->acl_cnt)
3859                 hci_prio_recalculate(hdev, ACL_LINK);
3860 }
3861
3862 static void hci_sched_acl_blk(struct hci_dev *hdev)
3863 {
3864         unsigned int cnt = hdev->block_cnt;
3865         struct hci_chan *chan;
3866         struct sk_buff *skb;
3867         int quote;
3868         u8 type;
3869
3870         __check_timeout(hdev, cnt);
3871
3872         BT_DBG("%s", hdev->name);
3873
3874         if (hdev->dev_type == HCI_AMP)
3875                 type = AMP_LINK;
3876         else
3877                 type = ACL_LINK;
3878
3879         while (hdev->block_cnt > 0 &&
3880                (chan = hci_chan_sent(hdev, type, &quote))) {
3881                 u32 priority = (skb_peek(&chan->data_q))->priority;
3882                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3883                         int blocks;
3884
3885                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3886                                skb->len, skb->priority);
3887
3888                         /* Stop if priority has changed */
3889                         if (skb->priority < priority)
3890                                 break;
3891
3892                         skb = skb_dequeue(&chan->data_q);
3893
3894                         blocks = __get_blocks(hdev, skb);
3895                         if (blocks > hdev->block_cnt)
3896                                 return;
3897
3898                         hci_conn_enter_active_mode(chan->conn,
3899                                                    bt_cb(skb)->force_active);
3900
3901                         hci_send_frame(hdev, skb);
3902                         hdev->acl_last_tx = jiffies;
3903
3904                         hdev->block_cnt -= blocks;
3905                         quote -= blocks;
3906
3907                         chan->sent += blocks;
3908                         chan->conn->sent += blocks;
3909                 }
3910         }
3911
3912         if (cnt != hdev->block_cnt)
3913                 hci_prio_recalculate(hdev, type);
3914 }
3915
3916 static void hci_sched_acl(struct hci_dev *hdev)
3917 {
3918         BT_DBG("%s", hdev->name);
3919
3920         /* No ACL link over BR/EDR controller */
3921         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3922                 return;
3923
3924         /* No AMP link over AMP controller */
3925         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3926                 return;
3927
3928         switch (hdev->flow_ctl_mode) {
3929         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3930                 hci_sched_acl_pkt(hdev);
3931                 break;
3932
3933         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3934                 hci_sched_acl_blk(hdev);
3935                 break;
3936         }
3937 }
3938
3939 /* Schedule SCO */
3940 static void hci_sched_sco(struct hci_dev *hdev)
3941 {
3942         struct hci_conn *conn;
3943         struct sk_buff *skb;
3944         int quote;
3945
3946         BT_DBG("%s", hdev->name);
3947
3948         if (!hci_conn_num(hdev, SCO_LINK))
3949                 return;
3950
3951         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3952                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3953                         BT_DBG("skb %p len %d", skb, skb->len);
3954                         hci_send_frame(hdev, skb);
3955
3956                         conn->sent++;
3957                         if (conn->sent == ~0)
3958                                 conn->sent = 0;
3959                 }
3960         }
3961 }
3962
3963 static void hci_sched_esco(struct hci_dev *hdev)
3964 {
3965         struct hci_conn *conn;
3966         struct sk_buff *skb;
3967         int quote;
3968
3969         BT_DBG("%s", hdev->name);
3970
3971         if (!hci_conn_num(hdev, ESCO_LINK))
3972                 return;
3973
3974         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3975                                                      &quote))) {
3976                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3977                         BT_DBG("skb %p len %d", skb, skb->len);
3978                         hci_send_frame(hdev, skb);
3979
3980                         conn->sent++;
3981                         if (conn->sent == ~0)
3982                                 conn->sent = 0;
3983                 }
3984         }
3985 }
3986
3987 static void hci_sched_le(struct hci_dev *hdev)
3988 {
3989         struct hci_chan *chan;
3990         struct sk_buff *skb;
3991         int quote, cnt, tmp;
3992
3993         BT_DBG("%s", hdev->name);
3994
3995         if (!hci_conn_num(hdev, LE_LINK))
3996                 return;
3997
3998         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3999                 /* LE tx timeout must be longer than maximum
4000                  * link supervision timeout (40.9 seconds) */
4001                 if (!hdev->le_cnt && hdev->le_pkts &&
4002                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4003                         hci_link_tx_to(hdev, LE_LINK);
4004         }
4005
4006         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4007         tmp = cnt;
4008         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4009                 u32 priority = (skb_peek(&chan->data_q))->priority;
4010                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4011                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4012                                skb->len, skb->priority);
4013
4014                         /* Stop if priority has changed */
4015                         if (skb->priority < priority)
4016                                 break;
4017
4018                         skb = skb_dequeue(&chan->data_q);
4019
4020                         hci_send_frame(hdev, skb);
4021                         hdev->le_last_tx = jiffies;
4022
4023                         cnt--;
4024                         chan->sent++;
4025                         chan->conn->sent++;
4026                 }
4027         }
4028
4029         if (hdev->le_pkts)
4030                 hdev->le_cnt = cnt;
4031         else
4032                 hdev->acl_cnt = cnt;
4033
4034         if (cnt != tmp)
4035                 hci_prio_recalculate(hdev, LE_LINK);
4036 }
4037
4038 static void hci_tx_work(struct work_struct *work)
4039 {
4040         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4041         struct sk_buff *skb;
4042
4043         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4044                hdev->sco_cnt, hdev->le_cnt);
4045
4046         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4047                 /* Schedule queues and send stuff to HCI driver */
4048                 hci_sched_acl(hdev);
4049                 hci_sched_sco(hdev);
4050                 hci_sched_esco(hdev);
4051                 hci_sched_le(hdev);
4052         }
4053
4054         /* Send next queued raw (unknown type) packet */
4055         while ((skb = skb_dequeue(&hdev->raw_q)))
4056                 hci_send_frame(hdev, skb);
4057 }
4058
4059 /* ----- HCI RX task (incoming data processing) ----- */
4060
4061 /* ACL data packet */
4062 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4063 {
4064         struct hci_acl_hdr *hdr = (void *) skb->data;
4065         struct hci_conn *conn;
4066         __u16 handle, flags;
4067
4068         skb_pull(skb, HCI_ACL_HDR_SIZE);
4069
4070         handle = __le16_to_cpu(hdr->handle);
4071         flags  = hci_flags(handle);
4072         handle = hci_handle(handle);
4073
4074         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4075                handle, flags);
4076
4077         hdev->stat.acl_rx++;
4078
4079         hci_dev_lock(hdev);
4080         conn = hci_conn_hash_lookup_handle(hdev, handle);
4081         hci_dev_unlock(hdev);
4082
4083         if (conn) {
4084                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4085
4086                 /* Send to upper protocol */
4087                 l2cap_recv_acldata(conn, skb, flags);
4088                 return;
4089         } else {
4090                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4091                            handle);
4092         }
4093
4094         kfree_skb(skb);
4095 }
4096
4097 /* SCO data packet */
4098 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4099 {
4100         struct hci_sco_hdr *hdr = (void *) skb->data;
4101         struct hci_conn *conn;
4102         __u16 handle;
4103
4104         skb_pull(skb, HCI_SCO_HDR_SIZE);
4105
4106         handle = __le16_to_cpu(hdr->handle);
4107
4108         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4109
4110         hdev->stat.sco_rx++;
4111
4112         hci_dev_lock(hdev);
4113         conn = hci_conn_hash_lookup_handle(hdev, handle);
4114         hci_dev_unlock(hdev);
4115
4116         if (conn) {
4117                 /* Send to upper protocol */
4118                 sco_recv_scodata(conn, skb);
4119                 return;
4120         } else {
4121                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4122                            handle);
4123         }
4124
4125         kfree_skb(skb);
4126 }
4127
4128 static bool hci_req_is_complete(struct hci_dev *hdev)
4129 {
4130         struct sk_buff *skb;
4131
4132         skb = skb_peek(&hdev->cmd_q);
4133         if (!skb)
4134                 return true;
4135
4136         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4137 }
4138
4139 static void hci_resend_last(struct hci_dev *hdev)
4140 {
4141         struct hci_command_hdr *sent;
4142         struct sk_buff *skb;
4143         u16 opcode;
4144
4145         if (!hdev->sent_cmd)
4146                 return;
4147
4148         sent = (void *) hdev->sent_cmd->data;
4149         opcode = __le16_to_cpu(sent->opcode);
4150         if (opcode == HCI_OP_RESET)
4151                 return;
4152
4153         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4154         if (!skb)
4155                 return;
4156
4157         skb_queue_head(&hdev->cmd_q, skb);
4158         queue_work(hdev->workqueue, &hdev->cmd_work);
4159 }
4160
4161 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4162                           hci_req_complete_t *req_complete,
4163                           hci_req_complete_skb_t *req_complete_skb)
4164 {
4165         struct sk_buff *skb;
4166         unsigned long flags;
4167
4168         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4169
4170         /* If the completed command doesn't match the last one that was
4171          * sent we need to do special handling of it.
4172          */
4173         if (!hci_sent_cmd_data(hdev, opcode)) {
4174                 /* Some CSR based controllers generate a spontaneous
4175                  * reset complete event during init and any pending
4176                  * command will never be completed. In such a case we
4177                  * need to resend whatever was the last sent
4178                  * command.
4179                  */
4180                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4181                         hci_resend_last(hdev);
4182
4183                 return;
4184         }
4185
4186         /* If the command succeeded and there's still more commands in
4187          * this request the request is not yet complete.
4188          */
4189         if (!status && !hci_req_is_complete(hdev))
4190                 return;
4191
4192         /* If this was the last command in a request the complete
4193          * callback would be found in hdev->sent_cmd instead of the
4194          * command queue (hdev->cmd_q).
4195          */
4196         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4197                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4198                 return;
4199         }
4200
4201         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4202                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4203                 return;
4204         }
4205
4206         /* Remove all pending commands belonging to this request */
4207         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4208         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4209                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4210                         __skb_queue_head(&hdev->cmd_q, skb);
4211                         break;
4212                 }
4213
4214                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4215                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4216                 else
4217                         *req_complete = bt_cb(skb)->hci.req_complete;
4218                 kfree_skb(skb);
4219         }
4220         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4221 }
4222
4223 static void hci_rx_work(struct work_struct *work)
4224 {
4225         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4226         struct sk_buff *skb;
4227
4228         BT_DBG("%s", hdev->name);
4229
4230         while ((skb = skb_dequeue(&hdev->rx_q))) {
4231                 /* Send copy to monitor */
4232                 hci_send_to_monitor(hdev, skb);
4233
4234                 if (atomic_read(&hdev->promisc)) {
4235                         /* Send copy to the sockets */
4236                         hci_send_to_sock(hdev, skb);
4237                 }
4238
4239                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4240                         kfree_skb(skb);
4241                         continue;
4242                 }
4243
4244                 if (test_bit(HCI_INIT, &hdev->flags)) {
4245                         /* Don't process data packets in this states. */
4246                         switch (hci_skb_pkt_type(skb)) {
4247                         case HCI_ACLDATA_PKT:
4248                         case HCI_SCODATA_PKT:
4249                                 kfree_skb(skb);
4250                                 continue;
4251                         }
4252                 }
4253
4254                 /* Process frame */
4255                 switch (hci_skb_pkt_type(skb)) {
4256                 case HCI_EVENT_PKT:
4257                         BT_DBG("%s Event packet", hdev->name);
4258                         hci_event_packet(hdev, skb);
4259                         break;
4260
4261                 case HCI_ACLDATA_PKT:
4262                         BT_DBG("%s ACL data packet", hdev->name);
4263                         hci_acldata_packet(hdev, skb);
4264                         break;
4265
4266                 case HCI_SCODATA_PKT:
4267                         BT_DBG("%s SCO data packet", hdev->name);
4268                         hci_scodata_packet(hdev, skb);
4269                         break;
4270
4271                 default:
4272                         kfree_skb(skb);
4273                         break;
4274                 }
4275         }
4276 }
4277
4278 static void hci_cmd_work(struct work_struct *work)
4279 {
4280         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4281         struct sk_buff *skb;
4282
4283         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4284                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4285
4286         /* Send queued commands */
4287         if (atomic_read(&hdev->cmd_cnt)) {
4288                 skb = skb_dequeue(&hdev->cmd_q);
4289                 if (!skb)
4290                         return;
4291
4292                 kfree_skb(hdev->sent_cmd);
4293
4294                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4295                 if (hdev->sent_cmd) {
4296                         atomic_dec(&hdev->cmd_cnt);
4297                         hci_send_frame(hdev, skb);
4298                         if (test_bit(HCI_RESET, &hdev->flags))
4299                                 cancel_delayed_work(&hdev->cmd_timer);
4300                         else
4301                                 schedule_delayed_work(&hdev->cmd_timer,
4302                                                       HCI_CMD_TIMEOUT);
4303                 } else {
4304                         skb_queue_head(&hdev->cmd_q, skb);
4305                         queue_work(hdev->workqueue, &hdev->cmd_work);
4306                 }
4307         }
4308 }