Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Slave Broadcast master role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_csb_master_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Slave Page Response Timeout */
555                 events[2] |= 0x20;      /* CSB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Slave Broadcast slave role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_csb_slave_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CSB Receive */
565                 events[2] |= 0x04;      /* CSB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the correspondig event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if (hdev->commands[38] & 0x80) {
746                         /* Read LE Min/Max Tx Power*/
747                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748                                     0, NULL);
749                 }
750
751                 if (hdev->commands[26] & 0x40) {
752                         /* Read LE White List Size */
753                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
754                                     0, NULL);
755                 }
756
757                 if (hdev->commands[26] & 0x80) {
758                         /* Clear LE White List */
759                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
760                 }
761
762                 if (hdev->commands[34] & 0x40) {
763                         /* Read LE Resolving List Size */
764                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765                                     0, NULL);
766                 }
767
768                 if (hdev->commands[34] & 0x20) {
769                         /* Clear LE Resolving List */
770                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771                 }
772
773                 if (hdev->commands[35] & 0x04) {
774                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775
776                         /* Set RPA timeout */
777                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778                                     &rpa_timeout);
779                 }
780
781                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782                         /* Read LE Maximum Data Length */
783                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784
785                         /* Read LE Suggested Default Data Length */
786                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787                 }
788
789                 if (ext_adv_capable(hdev)) {
790                         /* Read LE Number of Supported Advertising Sets */
791                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792                                     0, NULL);
793                 }
794
795                 hci_set_le_support(req);
796         }
797
798         /* Read features beyond page 1 if available */
799         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800                 struct hci_cp_read_local_ext_features cp;
801
802                 cp.page = p;
803                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804                             sizeof(cp), &cp);
805         }
806
807         return 0;
808 }
809
810 static int hci_init4_req(struct hci_request *req, unsigned long opt)
811 {
812         struct hci_dev *hdev = req->hdev;
813
814         /* Some Broadcom based Bluetooth controllers do not support the
815          * Delete Stored Link Key command. They are clearly indicating its
816          * absence in the bit mask of supported commands.
817          *
818          * Check the supported commands and only if the command is marked
819          * as supported send it. If not supported assume that the controller
820          * does not have actual support for stored link keys which makes this
821          * command redundant anyway.
822          *
823          * Some controllers indicate that they support handling deleting
824          * stored link keys, but they don't. The quirk lets a driver
825          * just disable this command.
826          */
827         if (hdev->commands[6] & 0x80 &&
828             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829                 struct hci_cp_delete_stored_link_key cp;
830
831                 bacpy(&cp.bdaddr, BDADDR_ANY);
832                 cp.delete_all = 0x01;
833                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834                             sizeof(cp), &cp);
835         }
836
837         /* Set event mask page 2 if the HCI command for it is supported */
838         if (hdev->commands[22] & 0x04)
839                 hci_set_event_mask_page_2(req);
840
841         /* Read local codec list if the HCI command is supported */
842         if (hdev->commands[29] & 0x20)
843                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844
845         /* Read local pairing options if the HCI command is supported */
846         if (hdev->commands[41] & 0x08)
847                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848
849         /* Get MWS transport configuration if the HCI command is supported */
850         if (hdev->commands[30] & 0x08)
851                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852
853         /* Check for Synchronization Train support */
854         if (lmp_sync_train_capable(hdev))
855                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
856
857         /* Enable Secure Connections if supported and configured */
858         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
859             bredr_sc_enabled(hdev)) {
860                 u8 support = 0x01;
861
862                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863                             sizeof(support), &support);
864         }
865
866         /* Set erroneous data reporting if supported to the wideband speech
867          * setting value
868          */
869         if (hdev->commands[18] & 0x08 &&
870             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
871                 bool enabled = hci_dev_test_flag(hdev,
872                                                  HCI_WIDEBAND_SPEECH_ENABLED);
873
874                 if (enabled !=
875                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876                         struct hci_cp_write_def_err_data_reporting cp;
877
878                         cp.err_data_reporting = enabled ?
879                                                 ERR_DATA_REPORTING_ENABLED :
880                                                 ERR_DATA_REPORTING_DISABLED;
881
882                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883                                     sizeof(cp), &cp);
884                 }
885         }
886
887         /* Set Suggested Default Data Length to maximum if supported */
888         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889                 struct hci_cp_le_write_def_data_len cp;
890
891                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
893                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894         }
895
896         /* Set Default PHY parameters if command is supported */
897         if (hdev->commands[35] & 0x20) {
898                 struct hci_cp_le_set_default_phy cp;
899
900                 cp.all_phys = 0x00;
901                 cp.tx_phys = hdev->le_tx_def_phys;
902                 cp.rx_phys = hdev->le_rx_def_phys;
903
904                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905         }
906
907         return 0;
908 }
909
910 static int __hci_init(struct hci_dev *hdev)
911 {
912         int err;
913
914         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917
918         if (hci_dev_test_flag(hdev, HCI_SETUP))
919                 hci_debugfs_create_basic(hdev);
920
921         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
922         if (err < 0)
923                 return err;
924
925         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
926          * BR/EDR/LE type controllers. AMP controllers only need the
927          * first two stages of init.
928          */
929         if (hdev->dev_type != HCI_PRIMARY)
930                 return 0;
931
932         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
933         if (err < 0)
934                 return err;
935
936         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
937         if (err < 0)
938                 return err;
939
940         /* This function is only called when the controller is actually in
941          * configured state. When the controller is marked as unconfigured,
942          * this initialization procedure is not run.
943          *
944          * It means that it is possible that a controller runs through its
945          * setup phase and then discovers missing settings. If that is the
946          * case, then this function will not be called. It then will only
947          * be called during the config phase.
948          *
949          * So only when in setup phase or config phase, create the debugfs
950          * entries and register the SMP channels.
951          */
952         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953             !hci_dev_test_flag(hdev, HCI_CONFIG))
954                 return 0;
955
956         hci_debugfs_create_common(hdev);
957
958         if (lmp_bredr_capable(hdev))
959                 hci_debugfs_create_bredr(hdev);
960
961         if (lmp_le_capable(hdev))
962                 hci_debugfs_create_le(hdev);
963
964         return 0;
965 }
966
967 static int hci_init0_req(struct hci_request *req, unsigned long opt)
968 {
969         struct hci_dev *hdev = req->hdev;
970
971         BT_DBG("%s %ld", hdev->name, opt);
972
973         /* Reset */
974         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975                 hci_reset_req(req, 0);
976
977         /* Read Local Version */
978         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979
980         /* Read BD Address */
981         if (hdev->set_bdaddr)
982                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
983
984         return 0;
985 }
986
987 static int __hci_unconf_init(struct hci_dev *hdev)
988 {
989         int err;
990
991         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992                 return 0;
993
994         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
995         if (err < 0)
996                 return err;
997
998         if (hci_dev_test_flag(hdev, HCI_SETUP))
999                 hci_debugfs_create_basic(hdev);
1000
1001         return 0;
1002 }
1003
1004 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1005 {
1006         __u8 scan = opt;
1007
1008         BT_DBG("%s %x", req->hdev->name, scan);
1009
1010         /* Inquiry and Page scans */
1011         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1012         return 0;
1013 }
1014
1015 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1016 {
1017         __u8 auth = opt;
1018
1019         BT_DBG("%s %x", req->hdev->name, auth);
1020
1021         /* Authentication */
1022         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1023         return 0;
1024 }
1025
1026 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027 {
1028         __u8 encrypt = opt;
1029
1030         BT_DBG("%s %x", req->hdev->name, encrypt);
1031
1032         /* Encryption */
1033         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1034         return 0;
1035 }
1036
1037 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038 {
1039         __le16 policy = cpu_to_le16(opt);
1040
1041         BT_DBG("%s %x", req->hdev->name, policy);
1042
1043         /* Default link policy */
1044         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1045         return 0;
1046 }
1047
1048 /* Get HCI device by index.
1049  * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1051 {
1052         struct hci_dev *hdev = NULL, *d;
1053
1054         BT_DBG("%d", index);
1055
1056         if (index < 0)
1057                 return NULL;
1058
1059         read_lock(&hci_dev_list_lock);
1060         list_for_each_entry(d, &hci_dev_list, list) {
1061                 if (d->id == index) {
1062                         hdev = hci_dev_hold(d);
1063                         break;
1064                 }
1065         }
1066         read_unlock(&hci_dev_list_lock);
1067         return hdev;
1068 }
1069
1070 /* ---- Inquiry support ---- */
1071
1072 bool hci_discovery_active(struct hci_dev *hdev)
1073 {
1074         struct discovery_state *discov = &hdev->discovery;
1075
1076         switch (discov->state) {
1077         case DISCOVERY_FINDING:
1078         case DISCOVERY_RESOLVING:
1079                 return true;
1080
1081         default:
1082                 return false;
1083         }
1084 }
1085
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 {
1088         int old_state = hdev->discovery.state;
1089
1090         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
1092         if (old_state == state)
1093                 return;
1094
1095         hdev->discovery.state = state;
1096
1097         switch (state) {
1098         case DISCOVERY_STOPPED:
1099                 hci_update_background_scan(hdev);
1100
1101                 if (old_state != DISCOVERY_STARTING)
1102                         mgmt_discovering(hdev, 0);
1103                 break;
1104         case DISCOVERY_STARTING:
1105                 break;
1106         case DISCOVERY_FINDING:
1107                 mgmt_discovering(hdev, 1);
1108                 break;
1109         case DISCOVERY_RESOLVING:
1110                 break;
1111         case DISCOVERY_STOPPING:
1112                 break;
1113         }
1114 }
1115
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *p, *n;
1120
1121         list_for_each_entry_safe(p, n, &cache->all, all) {
1122                 list_del(&p->all);
1123                 kfree(p);
1124         }
1125
1126         INIT_LIST_HEAD(&cache->unknown);
1127         INIT_LIST_HEAD(&cache->resolve);
1128 }
1129
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131                                                bdaddr_t *bdaddr)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *e;
1135
1136         BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138         list_for_each_entry(e, &cache->all, all) {
1139                 if (!bacmp(&e->data.bdaddr, bdaddr))
1140                         return e;
1141         }
1142
1143         return NULL;
1144 }
1145
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147                                                        bdaddr_t *bdaddr)
1148 {
1149         struct discovery_state *cache = &hdev->discovery;
1150         struct inquiry_entry *e;
1151
1152         BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154         list_for_each_entry(e, &cache->unknown, list) {
1155                 if (!bacmp(&e->data.bdaddr, bdaddr))
1156                         return e;
1157         }
1158
1159         return NULL;
1160 }
1161
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163                                                        bdaddr_t *bdaddr,
1164                                                        int state)
1165 {
1166         struct discovery_state *cache = &hdev->discovery;
1167         struct inquiry_entry *e;
1168
1169         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170
1171         list_for_each_entry(e, &cache->resolve, list) {
1172                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173                         return e;
1174                 if (!bacmp(&e->data.bdaddr, bdaddr))
1175                         return e;
1176         }
1177
1178         return NULL;
1179 }
1180
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182                                       struct inquiry_entry *ie)
1183 {
1184         struct discovery_state *cache = &hdev->discovery;
1185         struct list_head *pos = &cache->resolve;
1186         struct inquiry_entry *p;
1187
1188         list_del(&ie->list);
1189
1190         list_for_each_entry(p, &cache->resolve, list) {
1191                 if (p->name_state != NAME_PENDING &&
1192                     abs(p->data.rssi) >= abs(ie->data.rssi))
1193                         break;
1194                 pos = &p->list;
1195         }
1196
1197         list_add(&ie->list, pos);
1198 }
1199
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201                              bool name_known)
1202 {
1203         struct discovery_state *cache = &hdev->discovery;
1204         struct inquiry_entry *ie;
1205         u32 flags = 0;
1206
1207         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208
1209         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210
1211         if (!data->ssp_mode)
1212                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213
1214         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215         if (ie) {
1216                 if (!ie->data.ssp_mode)
1217                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219                 if (ie->name_state == NAME_NEEDED &&
1220                     data->rssi != ie->data.rssi) {
1221                         ie->data.rssi = data->rssi;
1222                         hci_inquiry_cache_update_resolve(hdev, ie);
1223                 }
1224
1225                 goto update;
1226         }
1227
1228         /* Entry not in the cache. Add new one. */
1229         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230         if (!ie) {
1231                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232                 goto done;
1233         }
1234
1235         list_add(&ie->all, &cache->all);
1236
1237         if (name_known) {
1238                 ie->name_state = NAME_KNOWN;
1239         } else {
1240                 ie->name_state = NAME_NOT_KNOWN;
1241                 list_add(&ie->list, &cache->unknown);
1242         }
1243
1244 update:
1245         if (name_known && ie->name_state != NAME_KNOWN &&
1246             ie->name_state != NAME_PENDING) {
1247                 ie->name_state = NAME_KNOWN;
1248                 list_del(&ie->list);
1249         }
1250
1251         memcpy(&ie->data, data, sizeof(*data));
1252         ie->timestamp = jiffies;
1253         cache->timestamp = jiffies;
1254
1255         if (ie->name_state == NAME_NOT_KNOWN)
1256                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257
1258 done:
1259         return flags;
1260 }
1261
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 {
1264         struct discovery_state *cache = &hdev->discovery;
1265         struct inquiry_info *info = (struct inquiry_info *) buf;
1266         struct inquiry_entry *e;
1267         int copied = 0;
1268
1269         list_for_each_entry(e, &cache->all, all) {
1270                 struct inquiry_data *data = &e->data;
1271
1272                 if (copied >= num)
1273                         break;
1274
1275                 bacpy(&info->bdaddr, &data->bdaddr);
1276                 info->pscan_rep_mode    = data->pscan_rep_mode;
1277                 info->pscan_period_mode = data->pscan_period_mode;
1278                 info->pscan_mode        = data->pscan_mode;
1279                 memcpy(info->dev_class, data->dev_class, 3);
1280                 info->clock_offset      = data->clock_offset;
1281
1282                 info++;
1283                 copied++;
1284         }
1285
1286         BT_DBG("cache %p, copied %d", cache, copied);
1287         return copied;
1288 }
1289
1290 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1291 {
1292         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293         struct hci_dev *hdev = req->hdev;
1294         struct hci_cp_inquiry cp;
1295
1296         BT_DBG("%s", hdev->name);
1297
1298         if (test_bit(HCI_INQUIRY, &hdev->flags))
1299                 return 0;
1300
1301         /* Start Inquiry */
1302         memcpy(&cp.lap, &ir->lap, 3);
1303         cp.length  = ir->length;
1304         cp.num_rsp = ir->num_rsp;
1305         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306
1307         return 0;
1308 }
1309
1310 int hci_inquiry(void __user *arg)
1311 {
1312         __u8 __user *ptr = arg;
1313         struct hci_inquiry_req ir;
1314         struct hci_dev *hdev;
1315         int err = 0, do_inquiry = 0, max_rsp;
1316         long timeo;
1317         __u8 *buf;
1318
1319         if (copy_from_user(&ir, ptr, sizeof(ir)))
1320                 return -EFAULT;
1321
1322         hdev = hci_dev_get(ir.dev_id);
1323         if (!hdev)
1324                 return -ENODEV;
1325
1326         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1327                 err = -EBUSY;
1328                 goto done;
1329         }
1330
1331         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1332                 err = -EOPNOTSUPP;
1333                 goto done;
1334         }
1335
1336         if (hdev->dev_type != HCI_PRIMARY) {
1337                 err = -EOPNOTSUPP;
1338                 goto done;
1339         }
1340
1341         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1342                 err = -EOPNOTSUPP;
1343                 goto done;
1344         }
1345
1346         hci_dev_lock(hdev);
1347         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1348             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1349                 hci_inquiry_cache_flush(hdev);
1350                 do_inquiry = 1;
1351         }
1352         hci_dev_unlock(hdev);
1353
1354         timeo = ir.length * msecs_to_jiffies(2000);
1355
1356         if (do_inquiry) {
1357                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1358                                    timeo, NULL);
1359                 if (err < 0)
1360                         goto done;
1361
1362                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1363                  * cleared). If it is interrupted by a signal, return -EINTR.
1364                  */
1365                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1366                                 TASK_INTERRUPTIBLE)) {
1367                         err = -EINTR;
1368                         goto done;
1369                 }
1370         }
1371
1372         /* for unlimited number of responses we will use buffer with
1373          * 255 entries
1374          */
1375         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1376
1377         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1378          * copy it to the user space.
1379          */
1380         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1381         if (!buf) {
1382                 err = -ENOMEM;
1383                 goto done;
1384         }
1385
1386         hci_dev_lock(hdev);
1387         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1388         hci_dev_unlock(hdev);
1389
1390         BT_DBG("num_rsp %d", ir.num_rsp);
1391
1392         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1393                 ptr += sizeof(ir);
1394                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1395                                  ir.num_rsp))
1396                         err = -EFAULT;
1397         } else
1398                 err = -EFAULT;
1399
1400         kfree(buf);
1401
1402 done:
1403         hci_dev_put(hdev);
1404         return err;
1405 }
1406
1407 /**
1408  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1409  *                                     (BD_ADDR) for a HCI device from
1410  *                                     a firmware node property.
1411  * @hdev:       The HCI device
1412  *
1413  * Search the firmware node for 'local-bd-address'.
1414  *
1415  * All-zero BD addresses are rejected, because those could be properties
1416  * that exist in the firmware tables, but were not updated by the firmware. For
1417  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1418  */
1419 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1420 {
1421         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1422         bdaddr_t ba;
1423         int ret;
1424
1425         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1426                                             (u8 *)&ba, sizeof(ba));
1427         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1428                 return;
1429
1430         bacpy(&hdev->public_addr, &ba);
1431 }
1432
1433 static int hci_dev_do_open(struct hci_dev *hdev)
1434 {
1435         int ret = 0;
1436
1437         BT_DBG("%s %p", hdev->name, hdev);
1438
1439         hci_req_sync_lock(hdev);
1440
1441         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1442                 ret = -ENODEV;
1443                 goto done;
1444         }
1445
1446         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1448                 /* Check for rfkill but allow the HCI setup stage to
1449                  * proceed (which in itself doesn't cause any RF activity).
1450                  */
1451                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1452                         ret = -ERFKILL;
1453                         goto done;
1454                 }
1455
1456                 /* Check for valid public address or a configured static
1457                  * random adddress, but let the HCI setup proceed to
1458                  * be able to determine if there is a public address
1459                  * or not.
1460                  *
1461                  * In case of user channel usage, it is not important
1462                  * if a public address or static random address is
1463                  * available.
1464                  *
1465                  * This check is only valid for BR/EDR controllers
1466                  * since AMP controllers do not have an address.
1467                  */
1468                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1469                     hdev->dev_type == HCI_PRIMARY &&
1470                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472                         ret = -EADDRNOTAVAIL;
1473                         goto done;
1474                 }
1475         }
1476
1477         if (test_bit(HCI_UP, &hdev->flags)) {
1478                 ret = -EALREADY;
1479                 goto done;
1480         }
1481
1482         if (hdev->open(hdev)) {
1483                 ret = -EIO;
1484                 goto done;
1485         }
1486
1487         set_bit(HCI_RUNNING, &hdev->flags);
1488         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1489
1490         atomic_set(&hdev->cmd_cnt, 1);
1491         set_bit(HCI_INIT, &hdev->flags);
1492
1493         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1494             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1495                 bool invalid_bdaddr;
1496
1497                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1498
1499                 if (hdev->setup)
1500                         ret = hdev->setup(hdev);
1501
1502                 /* The transport driver can set the quirk to mark the
1503                  * BD_ADDR invalid before creating the HCI device or in
1504                  * its setup callback.
1505                  */
1506                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1507                                           &hdev->quirks);
1508
1509                 if (ret)
1510                         goto setup_failed;
1511
1512                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1513                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1514                                 hci_dev_get_bd_addr_from_property(hdev);
1515
1516                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517                             hdev->set_bdaddr) {
1518                                 ret = hdev->set_bdaddr(hdev,
1519                                                        &hdev->public_addr);
1520
1521                                 /* If setting of the BD_ADDR from the device
1522                                  * property succeeds, then treat the address
1523                                  * as valid even if the invalid BD_ADDR
1524                                  * quirk indicates otherwise.
1525                                  */
1526                                 if (!ret)
1527                                         invalid_bdaddr = false;
1528                         }
1529                 }
1530
1531 setup_failed:
1532                 /* The transport driver can set these quirks before
1533                  * creating the HCI device or in its setup callback.
1534                  *
1535                  * For the invalid BD_ADDR quirk it is possible that
1536                  * it becomes a valid address if the bootloader does
1537                  * provide it (see above).
1538                  *
1539                  * In case any of them is set, the controller has to
1540                  * start up as unconfigured.
1541                  */
1542                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1543                     invalid_bdaddr)
1544                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1545
1546                 /* For an unconfigured controller it is required to
1547                  * read at least the version information provided by
1548                  * the Read Local Version Information command.
1549                  *
1550                  * If the set_bdaddr driver callback is provided, then
1551                  * also the original Bluetooth public device address
1552                  * will be read using the Read BD Address command.
1553                  */
1554                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1555                         ret = __hci_unconf_init(hdev);
1556         }
1557
1558         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1559                 /* If public address change is configured, ensure that
1560                  * the address gets programmed. If the driver does not
1561                  * support changing the public address, fail the power
1562                  * on procedure.
1563                  */
1564                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1565                     hdev->set_bdaddr)
1566                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1567                 else
1568                         ret = -EADDRNOTAVAIL;
1569         }
1570
1571         if (!ret) {
1572                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1573                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1574                         ret = __hci_init(hdev);
1575                         if (!ret && hdev->post_init)
1576                                 ret = hdev->post_init(hdev);
1577                 }
1578         }
1579
1580         /* If the HCI Reset command is clearing all diagnostic settings,
1581          * then they need to be reprogrammed after the init procedure
1582          * completed.
1583          */
1584         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1585             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1586             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1587                 ret = hdev->set_diag(hdev, true);
1588
1589         msft_do_open(hdev);
1590         aosp_do_open(hdev);
1591
1592         clear_bit(HCI_INIT, &hdev->flags);
1593
1594         if (!ret) {
1595                 hci_dev_hold(hdev);
1596                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1597                 hci_adv_instances_set_rpa_expired(hdev, true);
1598                 set_bit(HCI_UP, &hdev->flags);
1599                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1600                 hci_leds_update_powered(hdev, true);
1601                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1602                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1603                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1604                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1605                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1606                     hdev->dev_type == HCI_PRIMARY) {
1607                         ret = __hci_req_hci_power_on(hdev);
1608                         mgmt_power_on(hdev, ret);
1609                 }
1610         } else {
1611                 /* Init failed, cleanup */
1612                 flush_work(&hdev->tx_work);
1613
1614                 /* Since hci_rx_work() is possible to awake new cmd_work
1615                  * it should be flushed first to avoid unexpected call of
1616                  * hci_cmd_work()
1617                  */
1618                 flush_work(&hdev->rx_work);
1619                 flush_work(&hdev->cmd_work);
1620
1621                 skb_queue_purge(&hdev->cmd_q);
1622                 skb_queue_purge(&hdev->rx_q);
1623
1624                 if (hdev->flush)
1625                         hdev->flush(hdev);
1626
1627                 if (hdev->sent_cmd) {
1628                         kfree_skb(hdev->sent_cmd);
1629                         hdev->sent_cmd = NULL;
1630                 }
1631
1632                 clear_bit(HCI_RUNNING, &hdev->flags);
1633                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1634
1635                 hdev->close(hdev);
1636                 hdev->flags &= BIT(HCI_RAW);
1637         }
1638
1639 done:
1640         hci_req_sync_unlock(hdev);
1641         return ret;
1642 }
1643
1644 /* ---- HCI ioctl helpers ---- */
1645
1646 int hci_dev_open(__u16 dev)
1647 {
1648         struct hci_dev *hdev;
1649         int err;
1650
1651         hdev = hci_dev_get(dev);
1652         if (!hdev)
1653                 return -ENODEV;
1654
1655         /* Devices that are marked as unconfigured can only be powered
1656          * up as user channel. Trying to bring them up as normal devices
1657          * will result into a failure. Only user channel operation is
1658          * possible.
1659          *
1660          * When this function is called for a user channel, the flag
1661          * HCI_USER_CHANNEL will be set first before attempting to
1662          * open the device.
1663          */
1664         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1665             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1666                 err = -EOPNOTSUPP;
1667                 goto done;
1668         }
1669
1670         /* We need to ensure that no other power on/off work is pending
1671          * before proceeding to call hci_dev_do_open. This is
1672          * particularly important if the setup procedure has not yet
1673          * completed.
1674          */
1675         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1676                 cancel_delayed_work(&hdev->power_off);
1677
1678         /* After this call it is guaranteed that the setup procedure
1679          * has finished. This means that error conditions like RFKILL
1680          * or no valid public or static random address apply.
1681          */
1682         flush_workqueue(hdev->req_workqueue);
1683
1684         /* For controllers not using the management interface and that
1685          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1686          * so that pairing works for them. Once the management interface
1687          * is in use this bit will be cleared again and userspace has
1688          * to explicitly enable it.
1689          */
1690         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1691             !hci_dev_test_flag(hdev, HCI_MGMT))
1692                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1693
1694         err = hci_dev_do_open(hdev);
1695
1696 done:
1697         hci_dev_put(hdev);
1698         return err;
1699 }
1700
1701 /* This function requires the caller holds hdev->lock */
1702 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1703 {
1704         struct hci_conn_params *p;
1705
1706         list_for_each_entry(p, &hdev->le_conn_params, list) {
1707                 if (p->conn) {
1708                         hci_conn_drop(p->conn);
1709                         hci_conn_put(p->conn);
1710                         p->conn = NULL;
1711                 }
1712                 list_del_init(&p->action);
1713         }
1714
1715         BT_DBG("All LE pending actions cleared");
1716 }
1717
1718 int hci_dev_do_close(struct hci_dev *hdev)
1719 {
1720         bool auto_off;
1721
1722         BT_DBG("%s %p", hdev->name, hdev);
1723
1724         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1725             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1726             test_bit(HCI_UP, &hdev->flags)) {
1727                 /* Execute vendor specific shutdown routine */
1728                 if (hdev->shutdown)
1729                         hdev->shutdown(hdev);
1730         }
1731
1732         cancel_delayed_work(&hdev->power_off);
1733
1734         hci_request_cancel_all(hdev);
1735         hci_req_sync_lock(hdev);
1736
1737         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1738                 cancel_delayed_work_sync(&hdev->cmd_timer);
1739                 hci_req_sync_unlock(hdev);
1740                 return 0;
1741         }
1742
1743         hci_leds_update_powered(hdev, false);
1744
1745         /* Flush RX and TX works */
1746         flush_work(&hdev->tx_work);
1747         flush_work(&hdev->rx_work);
1748
1749         if (hdev->discov_timeout > 0) {
1750                 hdev->discov_timeout = 0;
1751                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1752                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1753         }
1754
1755         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1756                 cancel_delayed_work(&hdev->service_cache);
1757
1758         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1759                 struct adv_info *adv_instance;
1760
1761                 cancel_delayed_work_sync(&hdev->rpa_expired);
1762
1763                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1764                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1765         }
1766
1767         /* Avoid potential lockdep warnings from the *_flush() calls by
1768          * ensuring the workqueue is empty up front.
1769          */
1770         drain_workqueue(hdev->workqueue);
1771
1772         hci_dev_lock(hdev);
1773
1774         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1775
1776         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1777
1778         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1779             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1780             hci_dev_test_flag(hdev, HCI_MGMT))
1781                 __mgmt_power_off(hdev);
1782
1783         hci_inquiry_cache_flush(hdev);
1784         hci_pend_le_actions_clear(hdev);
1785         hci_conn_hash_flush(hdev);
1786         hci_dev_unlock(hdev);
1787
1788         smp_unregister(hdev);
1789
1790         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1791
1792         aosp_do_close(hdev);
1793         msft_do_close(hdev);
1794
1795         if (hdev->flush)
1796                 hdev->flush(hdev);
1797
1798         /* Reset device */
1799         skb_queue_purge(&hdev->cmd_q);
1800         atomic_set(&hdev->cmd_cnt, 1);
1801         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1802             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1803                 set_bit(HCI_INIT, &hdev->flags);
1804                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1805                 clear_bit(HCI_INIT, &hdev->flags);
1806         }
1807
1808         /* flush cmd  work */
1809         flush_work(&hdev->cmd_work);
1810
1811         /* Drop queues */
1812         skb_queue_purge(&hdev->rx_q);
1813         skb_queue_purge(&hdev->cmd_q);
1814         skb_queue_purge(&hdev->raw_q);
1815
1816         /* Drop last sent command */
1817         if (hdev->sent_cmd) {
1818                 cancel_delayed_work_sync(&hdev->cmd_timer);
1819                 kfree_skb(hdev->sent_cmd);
1820                 hdev->sent_cmd = NULL;
1821         }
1822
1823         clear_bit(HCI_RUNNING, &hdev->flags);
1824         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1825
1826         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1827                 wake_up(&hdev->suspend_wait_q);
1828
1829         /* After this point our queues are empty
1830          * and no tasks are scheduled. */
1831         hdev->close(hdev);
1832
1833         /* Clear flags */
1834         hdev->flags &= BIT(HCI_RAW);
1835         hci_dev_clear_volatile_flags(hdev);
1836
1837         /* Controller radio is available but is currently powered down */
1838         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1839
1840         memset(hdev->eir, 0, sizeof(hdev->eir));
1841         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1842         bacpy(&hdev->random_addr, BDADDR_ANY);
1843
1844         hci_req_sync_unlock(hdev);
1845
1846         hci_dev_put(hdev);
1847         return 0;
1848 }
1849
1850 int hci_dev_close(__u16 dev)
1851 {
1852         struct hci_dev *hdev;
1853         int err;
1854
1855         hdev = hci_dev_get(dev);
1856         if (!hdev)
1857                 return -ENODEV;
1858
1859         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1860                 err = -EBUSY;
1861                 goto done;
1862         }
1863
1864         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1865                 cancel_delayed_work(&hdev->power_off);
1866
1867         err = hci_dev_do_close(hdev);
1868
1869 done:
1870         hci_dev_put(hdev);
1871         return err;
1872 }
1873
1874 static int hci_dev_do_reset(struct hci_dev *hdev)
1875 {
1876         int ret;
1877
1878         BT_DBG("%s %p", hdev->name, hdev);
1879
1880         hci_req_sync_lock(hdev);
1881
1882         /* Drop queues */
1883         skb_queue_purge(&hdev->rx_q);
1884         skb_queue_purge(&hdev->cmd_q);
1885
1886         /* Avoid potential lockdep warnings from the *_flush() calls by
1887          * ensuring the workqueue is empty up front.
1888          */
1889         drain_workqueue(hdev->workqueue);
1890
1891         hci_dev_lock(hdev);
1892         hci_inquiry_cache_flush(hdev);
1893         hci_conn_hash_flush(hdev);
1894         hci_dev_unlock(hdev);
1895
1896         if (hdev->flush)
1897                 hdev->flush(hdev);
1898
1899         atomic_set(&hdev->cmd_cnt, 1);
1900         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1901
1902         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1903
1904         hci_req_sync_unlock(hdev);
1905         return ret;
1906 }
1907
1908 int hci_dev_reset(__u16 dev)
1909 {
1910         struct hci_dev *hdev;
1911         int err;
1912
1913         hdev = hci_dev_get(dev);
1914         if (!hdev)
1915                 return -ENODEV;
1916
1917         if (!test_bit(HCI_UP, &hdev->flags)) {
1918                 err = -ENETDOWN;
1919                 goto done;
1920         }
1921
1922         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1923                 err = -EBUSY;
1924                 goto done;
1925         }
1926
1927         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1928                 err = -EOPNOTSUPP;
1929                 goto done;
1930         }
1931
1932         err = hci_dev_do_reset(hdev);
1933
1934 done:
1935         hci_dev_put(hdev);
1936         return err;
1937 }
1938
1939 int hci_dev_reset_stat(__u16 dev)
1940 {
1941         struct hci_dev *hdev;
1942         int ret = 0;
1943
1944         hdev = hci_dev_get(dev);
1945         if (!hdev)
1946                 return -ENODEV;
1947
1948         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1949                 ret = -EBUSY;
1950                 goto done;
1951         }
1952
1953         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1954                 ret = -EOPNOTSUPP;
1955                 goto done;
1956         }
1957
1958         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1959
1960 done:
1961         hci_dev_put(hdev);
1962         return ret;
1963 }
1964
1965 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1966 {
1967         bool conn_changed, discov_changed;
1968
1969         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1970
1971         if ((scan & SCAN_PAGE))
1972                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1973                                                           HCI_CONNECTABLE);
1974         else
1975                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1976                                                            HCI_CONNECTABLE);
1977
1978         if ((scan & SCAN_INQUIRY)) {
1979                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1980                                                             HCI_DISCOVERABLE);
1981         } else {
1982                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1983                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1984                                                              HCI_DISCOVERABLE);
1985         }
1986
1987         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1988                 return;
1989
1990         if (conn_changed || discov_changed) {
1991                 /* In case this was disabled through mgmt */
1992                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1993
1994                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1995                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1996
1997                 mgmt_new_settings(hdev);
1998         }
1999 }
2000
2001 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2002 {
2003         struct hci_dev *hdev;
2004         struct hci_dev_req dr;
2005         int err = 0;
2006
2007         if (copy_from_user(&dr, arg, sizeof(dr)))
2008                 return -EFAULT;
2009
2010         hdev = hci_dev_get(dr.dev_id);
2011         if (!hdev)
2012                 return -ENODEV;
2013
2014         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2015                 err = -EBUSY;
2016                 goto done;
2017         }
2018
2019         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2020                 err = -EOPNOTSUPP;
2021                 goto done;
2022         }
2023
2024         if (hdev->dev_type != HCI_PRIMARY) {
2025                 err = -EOPNOTSUPP;
2026                 goto done;
2027         }
2028
2029         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2030                 err = -EOPNOTSUPP;
2031                 goto done;
2032         }
2033
2034         switch (cmd) {
2035         case HCISETAUTH:
2036                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2037                                    HCI_INIT_TIMEOUT, NULL);
2038                 break;
2039
2040         case HCISETENCRYPT:
2041                 if (!lmp_encrypt_capable(hdev)) {
2042                         err = -EOPNOTSUPP;
2043                         break;
2044                 }
2045
2046                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2047                         /* Auth must be enabled first */
2048                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2049                                            HCI_INIT_TIMEOUT, NULL);
2050                         if (err)
2051                                 break;
2052                 }
2053
2054                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2055                                    HCI_INIT_TIMEOUT, NULL);
2056                 break;
2057
2058         case HCISETSCAN:
2059                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2060                                    HCI_INIT_TIMEOUT, NULL);
2061
2062                 /* Ensure that the connectable and discoverable states
2063                  * get correctly modified as this was a non-mgmt change.
2064                  */
2065                 if (!err)
2066                         hci_update_scan_state(hdev, dr.dev_opt);
2067                 break;
2068
2069         case HCISETLINKPOL:
2070                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2071                                    HCI_INIT_TIMEOUT, NULL);
2072                 break;
2073
2074         case HCISETLINKMODE:
2075                 hdev->link_mode = ((__u16) dr.dev_opt) &
2076                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2077                 break;
2078
2079         case HCISETPTYPE:
2080                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2081                         break;
2082
2083                 hdev->pkt_type = (__u16) dr.dev_opt;
2084                 mgmt_phy_configuration_changed(hdev, NULL);
2085                 break;
2086
2087         case HCISETACLMTU:
2088                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2089                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2090                 break;
2091
2092         case HCISETSCOMTU:
2093                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2094                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2095                 break;
2096
2097         default:
2098                 err = -EINVAL;
2099                 break;
2100         }
2101
2102 done:
2103         hci_dev_put(hdev);
2104         return err;
2105 }
2106
2107 int hci_get_dev_list(void __user *arg)
2108 {
2109         struct hci_dev *hdev;
2110         struct hci_dev_list_req *dl;
2111         struct hci_dev_req *dr;
2112         int n = 0, size, err;
2113         __u16 dev_num;
2114
2115         if (get_user(dev_num, (__u16 __user *) arg))
2116                 return -EFAULT;
2117
2118         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2119                 return -EINVAL;
2120
2121         size = sizeof(*dl) + dev_num * sizeof(*dr);
2122
2123         dl = kzalloc(size, GFP_KERNEL);
2124         if (!dl)
2125                 return -ENOMEM;
2126
2127         dr = dl->dev_req;
2128
2129         read_lock(&hci_dev_list_lock);
2130         list_for_each_entry(hdev, &hci_dev_list, list) {
2131                 unsigned long flags = hdev->flags;
2132
2133                 /* When the auto-off is configured it means the transport
2134                  * is running, but in that case still indicate that the
2135                  * device is actually down.
2136                  */
2137                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2138                         flags &= ~BIT(HCI_UP);
2139
2140                 (dr + n)->dev_id  = hdev->id;
2141                 (dr + n)->dev_opt = flags;
2142
2143                 if (++n >= dev_num)
2144                         break;
2145         }
2146         read_unlock(&hci_dev_list_lock);
2147
2148         dl->dev_num = n;
2149         size = sizeof(*dl) + n * sizeof(*dr);
2150
2151         err = copy_to_user(arg, dl, size);
2152         kfree(dl);
2153
2154         return err ? -EFAULT : 0;
2155 }
2156
2157 int hci_get_dev_info(void __user *arg)
2158 {
2159         struct hci_dev *hdev;
2160         struct hci_dev_info di;
2161         unsigned long flags;
2162         int err = 0;
2163
2164         if (copy_from_user(&di, arg, sizeof(di)))
2165                 return -EFAULT;
2166
2167         hdev = hci_dev_get(di.dev_id);
2168         if (!hdev)
2169                 return -ENODEV;
2170
2171         /* When the auto-off is configured it means the transport
2172          * is running, but in that case still indicate that the
2173          * device is actually down.
2174          */
2175         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2176                 flags = hdev->flags & ~BIT(HCI_UP);
2177         else
2178                 flags = hdev->flags;
2179
2180         strcpy(di.name, hdev->name);
2181         di.bdaddr   = hdev->bdaddr;
2182         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2183         di.flags    = flags;
2184         di.pkt_type = hdev->pkt_type;
2185         if (lmp_bredr_capable(hdev)) {
2186                 di.acl_mtu  = hdev->acl_mtu;
2187                 di.acl_pkts = hdev->acl_pkts;
2188                 di.sco_mtu  = hdev->sco_mtu;
2189                 di.sco_pkts = hdev->sco_pkts;
2190         } else {
2191                 di.acl_mtu  = hdev->le_mtu;
2192                 di.acl_pkts = hdev->le_pkts;
2193                 di.sco_mtu  = 0;
2194                 di.sco_pkts = 0;
2195         }
2196         di.link_policy = hdev->link_policy;
2197         di.link_mode   = hdev->link_mode;
2198
2199         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2200         memcpy(&di.features, &hdev->features, sizeof(di.features));
2201
2202         if (copy_to_user(arg, &di, sizeof(di)))
2203                 err = -EFAULT;
2204
2205         hci_dev_put(hdev);
2206
2207         return err;
2208 }
2209
2210 /* ---- Interface to HCI drivers ---- */
2211
2212 static int hci_rfkill_set_block(void *data, bool blocked)
2213 {
2214         struct hci_dev *hdev = data;
2215
2216         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2217
2218         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2219                 return -EBUSY;
2220
2221         if (blocked) {
2222                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2223                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2224                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2225                         hci_dev_do_close(hdev);
2226         } else {
2227                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2228         }
2229
2230         return 0;
2231 }
2232
2233 static const struct rfkill_ops hci_rfkill_ops = {
2234         .set_block = hci_rfkill_set_block,
2235 };
2236
2237 static void hci_power_on(struct work_struct *work)
2238 {
2239         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2240         int err;
2241
2242         BT_DBG("%s", hdev->name);
2243
2244         if (test_bit(HCI_UP, &hdev->flags) &&
2245             hci_dev_test_flag(hdev, HCI_MGMT) &&
2246             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2247                 cancel_delayed_work(&hdev->power_off);
2248                 hci_req_sync_lock(hdev);
2249                 err = __hci_req_hci_power_on(hdev);
2250                 hci_req_sync_unlock(hdev);
2251                 mgmt_power_on(hdev, err);
2252                 return;
2253         }
2254
2255         err = hci_dev_do_open(hdev);
2256         if (err < 0) {
2257                 hci_dev_lock(hdev);
2258                 mgmt_set_powered_failed(hdev, err);
2259                 hci_dev_unlock(hdev);
2260                 return;
2261         }
2262
2263         /* During the HCI setup phase, a few error conditions are
2264          * ignored and they need to be checked now. If they are still
2265          * valid, it is important to turn the device back off.
2266          */
2267         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2268             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2269             (hdev->dev_type == HCI_PRIMARY &&
2270              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2271              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2272                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2273                 hci_dev_do_close(hdev);
2274         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2275                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2276                                    HCI_AUTO_OFF_TIMEOUT);
2277         }
2278
2279         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2280                 /* For unconfigured devices, set the HCI_RAW flag
2281                  * so that userspace can easily identify them.
2282                  */
2283                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2284                         set_bit(HCI_RAW, &hdev->flags);
2285
2286                 /* For fully configured devices, this will send
2287                  * the Index Added event. For unconfigured devices,
2288                  * it will send Unconfigued Index Added event.
2289                  *
2290                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2291                  * and no event will be send.
2292                  */
2293                 mgmt_index_added(hdev);
2294         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2295                 /* When the controller is now configured, then it
2296                  * is important to clear the HCI_RAW flag.
2297                  */
2298                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2299                         clear_bit(HCI_RAW, &hdev->flags);
2300
2301                 /* Powering on the controller with HCI_CONFIG set only
2302                  * happens with the transition from unconfigured to
2303                  * configured. This will send the Index Added event.
2304                  */
2305                 mgmt_index_added(hdev);
2306         }
2307 }
2308
2309 static void hci_power_off(struct work_struct *work)
2310 {
2311         struct hci_dev *hdev = container_of(work, struct hci_dev,
2312                                             power_off.work);
2313
2314         BT_DBG("%s", hdev->name);
2315
2316         hci_dev_do_close(hdev);
2317 }
2318
2319 static void hci_error_reset(struct work_struct *work)
2320 {
2321         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2322
2323         BT_DBG("%s", hdev->name);
2324
2325         if (hdev->hw_error)
2326                 hdev->hw_error(hdev, hdev->hw_error_code);
2327         else
2328                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2329
2330         if (hci_dev_do_close(hdev))
2331                 return;
2332
2333         hci_dev_do_open(hdev);
2334 }
2335
2336 void hci_uuids_clear(struct hci_dev *hdev)
2337 {
2338         struct bt_uuid *uuid, *tmp;
2339
2340         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2341                 list_del(&uuid->list);
2342                 kfree(uuid);
2343         }
2344 }
2345
2346 void hci_link_keys_clear(struct hci_dev *hdev)
2347 {
2348         struct link_key *key;
2349
2350         list_for_each_entry(key, &hdev->link_keys, list) {
2351                 list_del_rcu(&key->list);
2352                 kfree_rcu(key, rcu);
2353         }
2354 }
2355
2356 void hci_smp_ltks_clear(struct hci_dev *hdev)
2357 {
2358         struct smp_ltk *k;
2359
2360         list_for_each_entry(k, &hdev->long_term_keys, list) {
2361                 list_del_rcu(&k->list);
2362                 kfree_rcu(k, rcu);
2363         }
2364 }
2365
2366 void hci_smp_irks_clear(struct hci_dev *hdev)
2367 {
2368         struct smp_irk *k;
2369
2370         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2371                 list_del_rcu(&k->list);
2372                 kfree_rcu(k, rcu);
2373         }
2374 }
2375
2376 void hci_blocked_keys_clear(struct hci_dev *hdev)
2377 {
2378         struct blocked_key *b;
2379
2380         list_for_each_entry(b, &hdev->blocked_keys, list) {
2381                 list_del_rcu(&b->list);
2382                 kfree_rcu(b, rcu);
2383         }
2384 }
2385
2386 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2387 {
2388         bool blocked = false;
2389         struct blocked_key *b;
2390
2391         rcu_read_lock();
2392         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2393                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2394                         blocked = true;
2395                         break;
2396                 }
2397         }
2398
2399         rcu_read_unlock();
2400         return blocked;
2401 }
2402
2403 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2404 {
2405         struct link_key *k;
2406
2407         rcu_read_lock();
2408         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2409                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2410                         rcu_read_unlock();
2411
2412                         if (hci_is_blocked_key(hdev,
2413                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2414                                                k->val)) {
2415                                 bt_dev_warn_ratelimited(hdev,
2416                                                         "Link key blocked for %pMR",
2417                                                         &k->bdaddr);
2418                                 return NULL;
2419                         }
2420
2421                         return k;
2422                 }
2423         }
2424         rcu_read_unlock();
2425
2426         return NULL;
2427 }
2428
2429 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2430                                u8 key_type, u8 old_key_type)
2431 {
2432         /* Legacy key */
2433         if (key_type < 0x03)
2434                 return true;
2435
2436         /* Debug keys are insecure so don't store them persistently */
2437         if (key_type == HCI_LK_DEBUG_COMBINATION)
2438                 return false;
2439
2440         /* Changed combination key and there's no previous one */
2441         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2442                 return false;
2443
2444         /* Security mode 3 case */
2445         if (!conn)
2446                 return true;
2447
2448         /* BR/EDR key derived using SC from an LE link */
2449         if (conn->type == LE_LINK)
2450                 return true;
2451
2452         /* Neither local nor remote side had no-bonding as requirement */
2453         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2454                 return true;
2455
2456         /* Local side had dedicated bonding as requirement */
2457         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2458                 return true;
2459
2460         /* Remote side had dedicated bonding as requirement */
2461         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2462                 return true;
2463
2464         /* If none of the above criteria match, then don't store the key
2465          * persistently */
2466         return false;
2467 }
2468
2469 static u8 ltk_role(u8 type)
2470 {
2471         if (type == SMP_LTK)
2472                 return HCI_ROLE_MASTER;
2473
2474         return HCI_ROLE_SLAVE;
2475 }
2476
2477 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2478                              u8 addr_type, u8 role)
2479 {
2480         struct smp_ltk *k;
2481
2482         rcu_read_lock();
2483         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2484                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2485                         continue;
2486
2487                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2488                         rcu_read_unlock();
2489
2490                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2491                                                k->val)) {
2492                                 bt_dev_warn_ratelimited(hdev,
2493                                                         "LTK blocked for %pMR",
2494                                                         &k->bdaddr);
2495                                 return NULL;
2496                         }
2497
2498                         return k;
2499                 }
2500         }
2501         rcu_read_unlock();
2502
2503         return NULL;
2504 }
2505
2506 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2507 {
2508         struct smp_irk *irk_to_return = NULL;
2509         struct smp_irk *irk;
2510
2511         rcu_read_lock();
2512         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2513                 if (!bacmp(&irk->rpa, rpa)) {
2514                         irk_to_return = irk;
2515                         goto done;
2516                 }
2517         }
2518
2519         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2520                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2521                         bacpy(&irk->rpa, rpa);
2522                         irk_to_return = irk;
2523                         goto done;
2524                 }
2525         }
2526
2527 done:
2528         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2529                                                 irk_to_return->val)) {
2530                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2531                                         &irk_to_return->bdaddr);
2532                 irk_to_return = NULL;
2533         }
2534
2535         rcu_read_unlock();
2536
2537         return irk_to_return;
2538 }
2539
2540 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2541                                      u8 addr_type)
2542 {
2543         struct smp_irk *irk_to_return = NULL;
2544         struct smp_irk *irk;
2545
2546         /* Identity Address must be public or static random */
2547         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2548                 return NULL;
2549
2550         rcu_read_lock();
2551         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2552                 if (addr_type == irk->addr_type &&
2553                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2554                         irk_to_return = irk;
2555                         goto done;
2556                 }
2557         }
2558
2559 done:
2560
2561         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2562                                                 irk_to_return->val)) {
2563                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2564                                         &irk_to_return->bdaddr);
2565                 irk_to_return = NULL;
2566         }
2567
2568         rcu_read_unlock();
2569
2570         return irk_to_return;
2571 }
2572
2573 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2574                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2575                                   u8 pin_len, bool *persistent)
2576 {
2577         struct link_key *key, *old_key;
2578         u8 old_key_type;
2579
2580         old_key = hci_find_link_key(hdev, bdaddr);
2581         if (old_key) {
2582                 old_key_type = old_key->type;
2583                 key = old_key;
2584         } else {
2585                 old_key_type = conn ? conn->key_type : 0xff;
2586                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2587                 if (!key)
2588                         return NULL;
2589                 list_add_rcu(&key->list, &hdev->link_keys);
2590         }
2591
2592         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2593
2594         /* Some buggy controller combinations generate a changed
2595          * combination key for legacy pairing even when there's no
2596          * previous key */
2597         if (type == HCI_LK_CHANGED_COMBINATION &&
2598             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2599                 type = HCI_LK_COMBINATION;
2600                 if (conn)
2601                         conn->key_type = type;
2602         }
2603
2604         bacpy(&key->bdaddr, bdaddr);
2605         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2606         key->pin_len = pin_len;
2607
2608         if (type == HCI_LK_CHANGED_COMBINATION)
2609                 key->type = old_key_type;
2610         else
2611                 key->type = type;
2612
2613         if (persistent)
2614                 *persistent = hci_persistent_key(hdev, conn, type,
2615                                                  old_key_type);
2616
2617         return key;
2618 }
2619
2620 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2621                             u8 addr_type, u8 type, u8 authenticated,
2622                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2623 {
2624         struct smp_ltk *key, *old_key;
2625         u8 role = ltk_role(type);
2626
2627         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2628         if (old_key)
2629                 key = old_key;
2630         else {
2631                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2632                 if (!key)
2633                         return NULL;
2634                 list_add_rcu(&key->list, &hdev->long_term_keys);
2635         }
2636
2637         bacpy(&key->bdaddr, bdaddr);
2638         key->bdaddr_type = addr_type;
2639         memcpy(key->val, tk, sizeof(key->val));
2640         key->authenticated = authenticated;
2641         key->ediv = ediv;
2642         key->rand = rand;
2643         key->enc_size = enc_size;
2644         key->type = type;
2645
2646         return key;
2647 }
2648
2649 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2650                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2651 {
2652         struct smp_irk *irk;
2653
2654         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2655         if (!irk) {
2656                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2657                 if (!irk)
2658                         return NULL;
2659
2660                 bacpy(&irk->bdaddr, bdaddr);
2661                 irk->addr_type = addr_type;
2662
2663                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2664         }
2665
2666         memcpy(irk->val, val, 16);
2667         bacpy(&irk->rpa, rpa);
2668
2669         return irk;
2670 }
2671
2672 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2673 {
2674         struct link_key *key;
2675
2676         key = hci_find_link_key(hdev, bdaddr);
2677         if (!key)
2678                 return -ENOENT;
2679
2680         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2681
2682         list_del_rcu(&key->list);
2683         kfree_rcu(key, rcu);
2684
2685         return 0;
2686 }
2687
2688 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2689 {
2690         struct smp_ltk *k;
2691         int removed = 0;
2692
2693         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2694                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2695                         continue;
2696
2697                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2698
2699                 list_del_rcu(&k->list);
2700                 kfree_rcu(k, rcu);
2701                 removed++;
2702         }
2703
2704         return removed ? 0 : -ENOENT;
2705 }
2706
2707 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2708 {
2709         struct smp_irk *k;
2710
2711         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2712                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2713                         continue;
2714
2715                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2716
2717                 list_del_rcu(&k->list);
2718                 kfree_rcu(k, rcu);
2719         }
2720 }
2721
2722 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2723 {
2724         struct smp_ltk *k;
2725         struct smp_irk *irk;
2726         u8 addr_type;
2727
2728         if (type == BDADDR_BREDR) {
2729                 if (hci_find_link_key(hdev, bdaddr))
2730                         return true;
2731                 return false;
2732         }
2733
2734         /* Convert to HCI addr type which struct smp_ltk uses */
2735         if (type == BDADDR_LE_PUBLIC)
2736                 addr_type = ADDR_LE_DEV_PUBLIC;
2737         else
2738                 addr_type = ADDR_LE_DEV_RANDOM;
2739
2740         irk = hci_get_irk(hdev, bdaddr, addr_type);
2741         if (irk) {
2742                 bdaddr = &irk->bdaddr;
2743                 addr_type = irk->addr_type;
2744         }
2745
2746         rcu_read_lock();
2747         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2748                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2749                         rcu_read_unlock();
2750                         return true;
2751                 }
2752         }
2753         rcu_read_unlock();
2754
2755         return false;
2756 }
2757
2758 /* HCI command timer function */
2759 static void hci_cmd_timeout(struct work_struct *work)
2760 {
2761         struct hci_dev *hdev = container_of(work, struct hci_dev,
2762                                             cmd_timer.work);
2763
2764         if (hdev->sent_cmd) {
2765                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2766                 u16 opcode = __le16_to_cpu(sent->opcode);
2767
2768                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2769         } else {
2770                 bt_dev_err(hdev, "command tx timeout");
2771         }
2772
2773         if (hdev->cmd_timeout)
2774                 hdev->cmd_timeout(hdev);
2775
2776         atomic_set(&hdev->cmd_cnt, 1);
2777         queue_work(hdev->workqueue, &hdev->cmd_work);
2778 }
2779
2780 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2781                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2782 {
2783         struct oob_data *data;
2784
2785         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2786                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2787                         continue;
2788                 if (data->bdaddr_type != bdaddr_type)
2789                         continue;
2790                 return data;
2791         }
2792
2793         return NULL;
2794 }
2795
2796 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2797                                u8 bdaddr_type)
2798 {
2799         struct oob_data *data;
2800
2801         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2802         if (!data)
2803                 return -ENOENT;
2804
2805         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2806
2807         list_del(&data->list);
2808         kfree(data);
2809
2810         return 0;
2811 }
2812
2813 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2814 {
2815         struct oob_data *data, *n;
2816
2817         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2818                 list_del(&data->list);
2819                 kfree(data);
2820         }
2821 }
2822
2823 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2824                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2825                             u8 *hash256, u8 *rand256)
2826 {
2827         struct oob_data *data;
2828
2829         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2830         if (!data) {
2831                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2832                 if (!data)
2833                         return -ENOMEM;
2834
2835                 bacpy(&data->bdaddr, bdaddr);
2836                 data->bdaddr_type = bdaddr_type;
2837                 list_add(&data->list, &hdev->remote_oob_data);
2838         }
2839
2840         if (hash192 && rand192) {
2841                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2842                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2843                 if (hash256 && rand256)
2844                         data->present = 0x03;
2845         } else {
2846                 memset(data->hash192, 0, sizeof(data->hash192));
2847                 memset(data->rand192, 0, sizeof(data->rand192));
2848                 if (hash256 && rand256)
2849                         data->present = 0x02;
2850                 else
2851                         data->present = 0x00;
2852         }
2853
2854         if (hash256 && rand256) {
2855                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2856                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2857         } else {
2858                 memset(data->hash256, 0, sizeof(data->hash256));
2859                 memset(data->rand256, 0, sizeof(data->rand256));
2860                 if (hash192 && rand192)
2861                         data->present = 0x01;
2862         }
2863
2864         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2865
2866         return 0;
2867 }
2868
2869 /* This function requires the caller holds hdev->lock */
2870 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2871 {
2872         struct adv_info *adv_instance;
2873
2874         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2875                 if (adv_instance->instance == instance)
2876                         return adv_instance;
2877         }
2878
2879         return NULL;
2880 }
2881
2882 /* This function requires the caller holds hdev->lock */
2883 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2884 {
2885         struct adv_info *cur_instance;
2886
2887         cur_instance = hci_find_adv_instance(hdev, instance);
2888         if (!cur_instance)
2889                 return NULL;
2890
2891         if (cur_instance == list_last_entry(&hdev->adv_instances,
2892                                             struct adv_info, list))
2893                 return list_first_entry(&hdev->adv_instances,
2894                                                  struct adv_info, list);
2895         else
2896                 return list_next_entry(cur_instance, list);
2897 }
2898
2899 /* This function requires the caller holds hdev->lock */
2900 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2901 {
2902         struct adv_info *adv_instance;
2903
2904         adv_instance = hci_find_adv_instance(hdev, instance);
2905         if (!adv_instance)
2906                 return -ENOENT;
2907
2908         BT_DBG("%s removing %dMR", hdev->name, instance);
2909
2910         if (hdev->cur_adv_instance == instance) {
2911                 if (hdev->adv_instance_timeout) {
2912                         cancel_delayed_work(&hdev->adv_instance_expire);
2913                         hdev->adv_instance_timeout = 0;
2914                 }
2915                 hdev->cur_adv_instance = 0x00;
2916         }
2917
2918         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2919
2920         list_del(&adv_instance->list);
2921         kfree(adv_instance);
2922
2923         hdev->adv_instance_cnt--;
2924
2925         return 0;
2926 }
2927
2928 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2929 {
2930         struct adv_info *adv_instance, *n;
2931
2932         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2933                 adv_instance->rpa_expired = rpa_expired;
2934 }
2935
2936 /* This function requires the caller holds hdev->lock */
2937 void hci_adv_instances_clear(struct hci_dev *hdev)
2938 {
2939         struct adv_info *adv_instance, *n;
2940
2941         if (hdev->adv_instance_timeout) {
2942                 cancel_delayed_work(&hdev->adv_instance_expire);
2943                 hdev->adv_instance_timeout = 0;
2944         }
2945
2946         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2947                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2948                 list_del(&adv_instance->list);
2949                 kfree(adv_instance);
2950         }
2951
2952         hdev->adv_instance_cnt = 0;
2953         hdev->cur_adv_instance = 0x00;
2954 }
2955
2956 static void adv_instance_rpa_expired(struct work_struct *work)
2957 {
2958         struct adv_info *adv_instance = container_of(work, struct adv_info,
2959                                                      rpa_expired_cb.work);
2960
2961         BT_DBG("");
2962
2963         adv_instance->rpa_expired = true;
2964 }
2965
2966 /* This function requires the caller holds hdev->lock */
2967 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2968                          u16 adv_data_len, u8 *adv_data,
2969                          u16 scan_rsp_len, u8 *scan_rsp_data,
2970                          u16 timeout, u16 duration, s8 tx_power,
2971                          u32 min_interval, u32 max_interval)
2972 {
2973         struct adv_info *adv_instance;
2974
2975         adv_instance = hci_find_adv_instance(hdev, instance);
2976         if (adv_instance) {
2977                 memset(adv_instance->adv_data, 0,
2978                        sizeof(adv_instance->adv_data));
2979                 memset(adv_instance->scan_rsp_data, 0,
2980                        sizeof(adv_instance->scan_rsp_data));
2981         } else {
2982                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2983                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2984                         return -EOVERFLOW;
2985
2986                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2987                 if (!adv_instance)
2988                         return -ENOMEM;
2989
2990                 adv_instance->pending = true;
2991                 adv_instance->instance = instance;
2992                 list_add(&adv_instance->list, &hdev->adv_instances);
2993                 hdev->adv_instance_cnt++;
2994         }
2995
2996         adv_instance->flags = flags;
2997         adv_instance->adv_data_len = adv_data_len;
2998         adv_instance->scan_rsp_len = scan_rsp_len;
2999         adv_instance->min_interval = min_interval;
3000         adv_instance->max_interval = max_interval;
3001         adv_instance->tx_power = tx_power;
3002
3003         if (adv_data_len)
3004                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3005
3006         if (scan_rsp_len)
3007                 memcpy(adv_instance->scan_rsp_data,
3008                        scan_rsp_data, scan_rsp_len);
3009
3010         adv_instance->timeout = timeout;
3011         adv_instance->remaining_time = timeout;
3012
3013         if (duration == 0)
3014                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3015         else
3016                 adv_instance->duration = duration;
3017
3018         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3019                           adv_instance_rpa_expired);
3020
3021         BT_DBG("%s for %dMR", hdev->name, instance);
3022
3023         return 0;
3024 }
3025
3026 /* This function requires the caller holds hdev->lock */
3027 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3028                               u16 adv_data_len, u8 *adv_data,
3029                               u16 scan_rsp_len, u8 *scan_rsp_data)
3030 {
3031         struct adv_info *adv_instance;
3032
3033         adv_instance = hci_find_adv_instance(hdev, instance);
3034
3035         /* If advertisement doesn't exist, we can't modify its data */
3036         if (!adv_instance)
3037                 return -ENOENT;
3038
3039         if (adv_data_len) {
3040                 memset(adv_instance->adv_data, 0,
3041                        sizeof(adv_instance->adv_data));
3042                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3043                 adv_instance->adv_data_len = adv_data_len;
3044         }
3045
3046         if (scan_rsp_len) {
3047                 memset(adv_instance->scan_rsp_data, 0,
3048                        sizeof(adv_instance->scan_rsp_data));
3049                 memcpy(adv_instance->scan_rsp_data,
3050                        scan_rsp_data, scan_rsp_len);
3051                 adv_instance->scan_rsp_len = scan_rsp_len;
3052         }
3053
3054         return 0;
3055 }
3056
3057 /* This function requires the caller holds hdev->lock */
3058 void hci_adv_monitors_clear(struct hci_dev *hdev)
3059 {
3060         struct adv_monitor *monitor;
3061         int handle;
3062
3063         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3064                 hci_free_adv_monitor(hdev, monitor);
3065
3066         idr_destroy(&hdev->adv_monitors_idr);
3067 }
3068
3069 /* Frees the monitor structure and do some bookkeepings.
3070  * This function requires the caller holds hdev->lock.
3071  */
3072 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3073 {
3074         struct adv_pattern *pattern;
3075         struct adv_pattern *tmp;
3076
3077         if (!monitor)
3078                 return;
3079
3080         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3081                 list_del(&pattern->list);
3082                 kfree(pattern);
3083         }
3084
3085         if (monitor->handle)
3086                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3087
3088         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3089                 hdev->adv_monitors_cnt--;
3090                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3091         }
3092
3093         kfree(monitor);
3094 }
3095
3096 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3097 {
3098         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3099 }
3100
3101 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3102 {
3103         return mgmt_remove_adv_monitor_complete(hdev, status);
3104 }
3105
3106 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3107  * also attempts to forward the request to the controller.
3108  * Returns true if request is forwarded (result is pending), false otherwise.
3109  * This function requires the caller holds hdev->lock.
3110  */
3111 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3112                          int *err)
3113 {
3114         int min, max, handle;
3115
3116         *err = 0;
3117
3118         if (!monitor) {
3119                 *err = -EINVAL;
3120                 return false;
3121         }
3122
3123         min = HCI_MIN_ADV_MONITOR_HANDLE;
3124         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3125         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3126                            GFP_KERNEL);
3127         if (handle < 0) {
3128                 *err = handle;
3129                 return false;
3130         }
3131
3132         monitor->handle = handle;
3133
3134         if (!hdev_is_powered(hdev))
3135                 return false;
3136
3137         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3138         case HCI_ADV_MONITOR_EXT_NONE:
3139                 hci_update_background_scan(hdev);
3140                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3141                 /* Message was not forwarded to controller - not an error */
3142                 return false;
3143         case HCI_ADV_MONITOR_EXT_MSFT:
3144                 *err = msft_add_monitor_pattern(hdev, monitor);
3145                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3146                            *err);
3147                 break;
3148         }
3149
3150         return (*err == 0);
3151 }
3152
3153 /* Attempts to tell the controller and free the monitor. If somehow the
3154  * controller doesn't have a corresponding handle, remove anyway.
3155  * Returns true if request is forwarded (result is pending), false otherwise.
3156  * This function requires the caller holds hdev->lock.
3157  */
3158 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3159                                    struct adv_monitor *monitor,
3160                                    u16 handle, int *err)
3161 {
3162         *err = 0;
3163
3164         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3165         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3166                 goto free_monitor;
3167         case HCI_ADV_MONITOR_EXT_MSFT:
3168                 *err = msft_remove_monitor(hdev, monitor, handle);
3169                 break;
3170         }
3171
3172         /* In case no matching handle registered, just free the monitor */
3173         if (*err == -ENOENT)
3174                 goto free_monitor;
3175
3176         return (*err == 0);
3177
3178 free_monitor:
3179         if (*err == -ENOENT)
3180                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3181                             monitor->handle);
3182         hci_free_adv_monitor(hdev, monitor);
3183
3184         *err = 0;
3185         return false;
3186 }
3187
3188 /* Returns true if request is forwarded (result is pending), false otherwise.
3189  * This function requires the caller holds hdev->lock.
3190  */
3191 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3192 {
3193         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3194         bool pending;
3195
3196         if (!monitor) {
3197                 *err = -EINVAL;
3198                 return false;
3199         }
3200
3201         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3202         if (!*err && !pending)
3203                 hci_update_background_scan(hdev);
3204
3205         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3206                    hdev->name, handle, *err, pending ? "" : "not ");
3207
3208         return pending;
3209 }
3210
3211 /* Returns true if request is forwarded (result is pending), false otherwise.
3212  * This function requires the caller holds hdev->lock.
3213  */
3214 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3215 {
3216         struct adv_monitor *monitor;
3217         int idr_next_id = 0;
3218         bool pending = false;
3219         bool update = false;
3220
3221         *err = 0;
3222
3223         while (!*err && !pending) {
3224                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3225                 if (!monitor)
3226                         break;
3227
3228                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3229
3230                 if (!*err && !pending)
3231                         update = true;
3232         }
3233
3234         if (update)
3235                 hci_update_background_scan(hdev);
3236
3237         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3238                    hdev->name, *err, pending ? "" : "not ");
3239
3240         return pending;
3241 }
3242
3243 /* This function requires the caller holds hdev->lock */
3244 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3245 {
3246         return !idr_is_empty(&hdev->adv_monitors_idr);
3247 }
3248
3249 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3250 {
3251         if (msft_monitor_supported(hdev))
3252                 return HCI_ADV_MONITOR_EXT_MSFT;
3253
3254         return HCI_ADV_MONITOR_EXT_NONE;
3255 }
3256
3257 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3258                                          bdaddr_t *bdaddr, u8 type)
3259 {
3260         struct bdaddr_list *b;
3261
3262         list_for_each_entry(b, bdaddr_list, list) {
3263                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3264                         return b;
3265         }
3266
3267         return NULL;
3268 }
3269
3270 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3271                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3272                                 u8 type)
3273 {
3274         struct bdaddr_list_with_irk *b;
3275
3276         list_for_each_entry(b, bdaddr_list, list) {
3277                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3278                         return b;
3279         }
3280
3281         return NULL;
3282 }
3283
3284 struct bdaddr_list_with_flags *
3285 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3286                                   bdaddr_t *bdaddr, u8 type)
3287 {
3288         struct bdaddr_list_with_flags *b;
3289
3290         list_for_each_entry(b, bdaddr_list, list) {
3291                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3292                         return b;
3293         }
3294
3295         return NULL;
3296 }
3297
3298 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3299 {
3300         struct bdaddr_list *b, *n;
3301
3302         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3303                 list_del(&b->list);
3304                 kfree(b);
3305         }
3306 }
3307
3308 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3309 {
3310         struct bdaddr_list *entry;
3311
3312         if (!bacmp(bdaddr, BDADDR_ANY))
3313                 return -EBADF;
3314
3315         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3316                 return -EEXIST;
3317
3318         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3319         if (!entry)
3320                 return -ENOMEM;
3321
3322         bacpy(&entry->bdaddr, bdaddr);
3323         entry->bdaddr_type = type;
3324
3325         list_add(&entry->list, list);
3326
3327         return 0;
3328 }
3329
3330 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3331                                         u8 type, u8 *peer_irk, u8 *local_irk)
3332 {
3333         struct bdaddr_list_with_irk *entry;
3334
3335         if (!bacmp(bdaddr, BDADDR_ANY))
3336                 return -EBADF;
3337
3338         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3339                 return -EEXIST;
3340
3341         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3342         if (!entry)
3343                 return -ENOMEM;
3344
3345         bacpy(&entry->bdaddr, bdaddr);
3346         entry->bdaddr_type = type;
3347
3348         if (peer_irk)
3349                 memcpy(entry->peer_irk, peer_irk, 16);
3350
3351         if (local_irk)
3352                 memcpy(entry->local_irk, local_irk, 16);
3353
3354         list_add(&entry->list, list);
3355
3356         return 0;
3357 }
3358
3359 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3360                                    u8 type, u32 flags)
3361 {
3362         struct bdaddr_list_with_flags *entry;
3363
3364         if (!bacmp(bdaddr, BDADDR_ANY))
3365                 return -EBADF;
3366
3367         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3368                 return -EEXIST;
3369
3370         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3371         if (!entry)
3372                 return -ENOMEM;
3373
3374         bacpy(&entry->bdaddr, bdaddr);
3375         entry->bdaddr_type = type;
3376         entry->current_flags = flags;
3377
3378         list_add(&entry->list, list);
3379
3380         return 0;
3381 }
3382
3383 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3384 {
3385         struct bdaddr_list *entry;
3386
3387         if (!bacmp(bdaddr, BDADDR_ANY)) {
3388                 hci_bdaddr_list_clear(list);
3389                 return 0;
3390         }
3391
3392         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3393         if (!entry)
3394                 return -ENOENT;
3395
3396         list_del(&entry->list);
3397         kfree(entry);
3398
3399         return 0;
3400 }
3401
3402 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3403                                                         u8 type)
3404 {
3405         struct bdaddr_list_with_irk *entry;
3406
3407         if (!bacmp(bdaddr, BDADDR_ANY)) {
3408                 hci_bdaddr_list_clear(list);
3409                 return 0;
3410         }
3411
3412         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3413         if (!entry)
3414                 return -ENOENT;
3415
3416         list_del(&entry->list);
3417         kfree(entry);
3418
3419         return 0;
3420 }
3421
3422 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3423                                    u8 type)
3424 {
3425         struct bdaddr_list_with_flags *entry;
3426
3427         if (!bacmp(bdaddr, BDADDR_ANY)) {
3428                 hci_bdaddr_list_clear(list);
3429                 return 0;
3430         }
3431
3432         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3433         if (!entry)
3434                 return -ENOENT;
3435
3436         list_del(&entry->list);
3437         kfree(entry);
3438
3439         return 0;
3440 }
3441
3442 /* This function requires the caller holds hdev->lock */
3443 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3444                                                bdaddr_t *addr, u8 addr_type)
3445 {
3446         struct hci_conn_params *params;
3447
3448         list_for_each_entry(params, &hdev->le_conn_params, list) {
3449                 if (bacmp(&params->addr, addr) == 0 &&
3450                     params->addr_type == addr_type) {
3451                         return params;
3452                 }
3453         }
3454
3455         return NULL;
3456 }
3457
3458 /* This function requires the caller holds hdev->lock */
3459 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3460                                                   bdaddr_t *addr, u8 addr_type)
3461 {
3462         struct hci_conn_params *param;
3463
3464         switch (addr_type) {
3465         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3466                 addr_type = ADDR_LE_DEV_PUBLIC;
3467                 break;
3468         case ADDR_LE_DEV_RANDOM_RESOLVED:
3469                 addr_type = ADDR_LE_DEV_RANDOM;
3470                 break;
3471         }
3472
3473         list_for_each_entry(param, list, action) {
3474                 if (bacmp(&param->addr, addr) == 0 &&
3475                     param->addr_type == addr_type)
3476                         return param;
3477         }
3478
3479         return NULL;
3480 }
3481
3482 /* This function requires the caller holds hdev->lock */
3483 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3484                                             bdaddr_t *addr, u8 addr_type)
3485 {
3486         struct hci_conn_params *params;
3487
3488         params = hci_conn_params_lookup(hdev, addr, addr_type);
3489         if (params)
3490                 return params;
3491
3492         params = kzalloc(sizeof(*params), GFP_KERNEL);
3493         if (!params) {
3494                 bt_dev_err(hdev, "out of memory");
3495                 return NULL;
3496         }
3497
3498         bacpy(&params->addr, addr);
3499         params->addr_type = addr_type;
3500
3501         list_add(&params->list, &hdev->le_conn_params);
3502         INIT_LIST_HEAD(&params->action);
3503
3504         params->conn_min_interval = hdev->le_conn_min_interval;
3505         params->conn_max_interval = hdev->le_conn_max_interval;
3506         params->conn_latency = hdev->le_conn_latency;
3507         params->supervision_timeout = hdev->le_supv_timeout;
3508         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3509
3510         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3511
3512         return params;
3513 }
3514
3515 static void hci_conn_params_free(struct hci_conn_params *params)
3516 {
3517         if (params->conn) {
3518                 hci_conn_drop(params->conn);
3519                 hci_conn_put(params->conn);
3520         }
3521
3522         list_del(&params->action);
3523         list_del(&params->list);
3524         kfree(params);
3525 }
3526
3527 /* This function requires the caller holds hdev->lock */
3528 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3529 {
3530         struct hci_conn_params *params;
3531
3532         params = hci_conn_params_lookup(hdev, addr, addr_type);
3533         if (!params)
3534                 return;
3535
3536         hci_conn_params_free(params);
3537
3538         hci_update_background_scan(hdev);
3539
3540         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3541 }
3542
3543 /* This function requires the caller holds hdev->lock */
3544 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3545 {
3546         struct hci_conn_params *params, *tmp;
3547
3548         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3549                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3550                         continue;
3551
3552                 /* If trying to estabilish one time connection to disabled
3553                  * device, leave the params, but mark them as just once.
3554                  */
3555                 if (params->explicit_connect) {
3556                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3557                         continue;
3558                 }
3559
3560                 list_del(&params->list);
3561                 kfree(params);
3562         }
3563
3564         BT_DBG("All LE disabled connection parameters were removed");
3565 }
3566
3567 /* This function requires the caller holds hdev->lock */
3568 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3569 {
3570         struct hci_conn_params *params, *tmp;
3571
3572         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3573                 hci_conn_params_free(params);
3574
3575         BT_DBG("All LE connection parameters were removed");
3576 }
3577
3578 /* Copy the Identity Address of the controller.
3579  *
3580  * If the controller has a public BD_ADDR, then by default use that one.
3581  * If this is a LE only controller without a public address, default to
3582  * the static random address.
3583  *
3584  * For debugging purposes it is possible to force controllers with a
3585  * public address to use the static random address instead.
3586  *
3587  * In case BR/EDR has been disabled on a dual-mode controller and
3588  * userspace has configured a static address, then that address
3589  * becomes the identity address instead of the public BR/EDR address.
3590  */
3591 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3592                                u8 *bdaddr_type)
3593 {
3594         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3595             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3596             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3597              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3598                 bacpy(bdaddr, &hdev->static_addr);
3599                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3600         } else {
3601                 bacpy(bdaddr, &hdev->bdaddr);
3602                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3603         }
3604 }
3605
3606 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3607 {
3608         int i;
3609
3610         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3611                 clear_bit(i, hdev->suspend_tasks);
3612
3613         wake_up(&hdev->suspend_wait_q);
3614 }
3615
3616 static int hci_suspend_wait_event(struct hci_dev *hdev)
3617 {
3618 #define WAKE_COND                                                              \
3619         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3620          __SUSPEND_NUM_TASKS)
3621
3622         int i;
3623         int ret = wait_event_timeout(hdev->suspend_wait_q,
3624                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3625
3626         if (ret == 0) {
3627                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3628                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3629                         if (test_bit(i, hdev->suspend_tasks))
3630                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3631                         clear_bit(i, hdev->suspend_tasks);
3632                 }
3633
3634                 ret = -ETIMEDOUT;
3635         } else {
3636                 ret = 0;
3637         }
3638
3639         return ret;
3640 }
3641
3642 static void hci_prepare_suspend(struct work_struct *work)
3643 {
3644         struct hci_dev *hdev =
3645                 container_of(work, struct hci_dev, suspend_prepare);
3646
3647         hci_dev_lock(hdev);
3648         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3649         hci_dev_unlock(hdev);
3650 }
3651
3652 static int hci_change_suspend_state(struct hci_dev *hdev,
3653                                     enum suspended_state next)
3654 {
3655         hdev->suspend_state_next = next;
3656         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3657         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3658         return hci_suspend_wait_event(hdev);
3659 }
3660
3661 static void hci_clear_wake_reason(struct hci_dev *hdev)
3662 {
3663         hci_dev_lock(hdev);
3664
3665         hdev->wake_reason = 0;
3666         bacpy(&hdev->wake_addr, BDADDR_ANY);
3667         hdev->wake_addr_type = 0;
3668
3669         hci_dev_unlock(hdev);
3670 }
3671
3672 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3673                                 void *data)
3674 {
3675         struct hci_dev *hdev =
3676                 container_of(nb, struct hci_dev, suspend_notifier);
3677         int ret = 0;
3678         u8 state = BT_RUNNING;
3679
3680         /* If powering down, wait for completion. */
3681         if (mgmt_powering_down(hdev)) {
3682                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3683                 ret = hci_suspend_wait_event(hdev);
3684                 if (ret)
3685                         goto done;
3686         }
3687
3688         /* Suspend notifier should only act on events when powered. */
3689         if (!hdev_is_powered(hdev) ||
3690             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3691                 goto done;
3692
3693         if (action == PM_SUSPEND_PREPARE) {
3694                 /* Suspend consists of two actions:
3695                  *  - First, disconnect everything and make the controller not
3696                  *    connectable (disabling scanning)
3697                  *  - Second, program event filter/whitelist and enable scan
3698                  */
3699                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3700                 if (!ret)
3701                         state = BT_SUSPEND_DISCONNECT;
3702
3703                 /* Only configure whitelist if disconnect succeeded and wake
3704                  * isn't being prevented.
3705                  */
3706                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3707                         ret = hci_change_suspend_state(hdev,
3708                                                 BT_SUSPEND_CONFIGURE_WAKE);
3709                         if (!ret)
3710                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3711                 }
3712
3713                 hci_clear_wake_reason(hdev);
3714                 mgmt_suspending(hdev, state);
3715
3716         } else if (action == PM_POST_SUSPEND) {
3717                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3718
3719                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3720                               hdev->wake_addr_type);
3721         }
3722
3723 done:
3724         /* We always allow suspend even if suspend preparation failed and
3725          * attempt to recover in resume.
3726          */
3727         if (ret)
3728                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3729                            action, ret);
3730
3731         return NOTIFY_DONE;
3732 }
3733
3734 /* Alloc HCI device */
3735 struct hci_dev *hci_alloc_dev(void)
3736 {
3737         struct hci_dev *hdev;
3738
3739         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3740         if (!hdev)
3741                 return NULL;
3742
3743         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3744         hdev->esco_type = (ESCO_HV1);
3745         hdev->link_mode = (HCI_LM_ACCEPT);
3746         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3747         hdev->io_capability = 0x03;     /* No Input No Output */
3748         hdev->manufacturer = 0xffff;    /* Default to internal use */
3749         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3750         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3751         hdev->adv_instance_cnt = 0;
3752         hdev->cur_adv_instance = 0x00;
3753         hdev->adv_instance_timeout = 0;
3754
3755         hdev->advmon_allowlist_duration = 300;
3756         hdev->advmon_no_filter_duration = 500;
3757         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3758
3759         hdev->sniff_max_interval = 800;
3760         hdev->sniff_min_interval = 80;
3761
3762         hdev->le_adv_channel_map = 0x07;
3763         hdev->le_adv_min_interval = 0x0800;
3764         hdev->le_adv_max_interval = 0x0800;
3765         hdev->le_scan_interval = 0x0060;
3766         hdev->le_scan_window = 0x0030;
3767         hdev->le_scan_int_suspend = 0x0400;
3768         hdev->le_scan_window_suspend = 0x0012;
3769         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3770         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3771         hdev->le_scan_int_adv_monitor = 0x0060;
3772         hdev->le_scan_window_adv_monitor = 0x0030;
3773         hdev->le_scan_int_connect = 0x0060;
3774         hdev->le_scan_window_connect = 0x0060;
3775         hdev->le_conn_min_interval = 0x0018;
3776         hdev->le_conn_max_interval = 0x0028;
3777         hdev->le_conn_latency = 0x0000;
3778         hdev->le_supv_timeout = 0x002a;
3779         hdev->le_def_tx_len = 0x001b;
3780         hdev->le_def_tx_time = 0x0148;
3781         hdev->le_max_tx_len = 0x001b;
3782         hdev->le_max_tx_time = 0x0148;
3783         hdev->le_max_rx_len = 0x001b;
3784         hdev->le_max_rx_time = 0x0148;
3785         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3786         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3787         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3788         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3789         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3790         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3791         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3792         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3793         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3794
3795         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3796         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3797         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3798         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3799         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3800         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3801
3802         /* default 1.28 sec page scan */
3803         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3804         hdev->def_page_scan_int = 0x0800;
3805         hdev->def_page_scan_window = 0x0012;
3806
3807         mutex_init(&hdev->lock);
3808         mutex_init(&hdev->req_lock);
3809
3810         INIT_LIST_HEAD(&hdev->mgmt_pending);
3811         INIT_LIST_HEAD(&hdev->blacklist);
3812         INIT_LIST_HEAD(&hdev->whitelist);
3813         INIT_LIST_HEAD(&hdev->uuids);
3814         INIT_LIST_HEAD(&hdev->link_keys);
3815         INIT_LIST_HEAD(&hdev->long_term_keys);
3816         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3817         INIT_LIST_HEAD(&hdev->remote_oob_data);
3818         INIT_LIST_HEAD(&hdev->le_white_list);
3819         INIT_LIST_HEAD(&hdev->le_resolv_list);
3820         INIT_LIST_HEAD(&hdev->le_conn_params);
3821         INIT_LIST_HEAD(&hdev->pend_le_conns);
3822         INIT_LIST_HEAD(&hdev->pend_le_reports);
3823         INIT_LIST_HEAD(&hdev->conn_hash.list);
3824         INIT_LIST_HEAD(&hdev->adv_instances);
3825         INIT_LIST_HEAD(&hdev->blocked_keys);
3826
3827         INIT_WORK(&hdev->rx_work, hci_rx_work);
3828         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3829         INIT_WORK(&hdev->tx_work, hci_tx_work);
3830         INIT_WORK(&hdev->power_on, hci_power_on);
3831         INIT_WORK(&hdev->error_reset, hci_error_reset);
3832         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3833
3834         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3835
3836         skb_queue_head_init(&hdev->rx_q);
3837         skb_queue_head_init(&hdev->cmd_q);
3838         skb_queue_head_init(&hdev->raw_q);
3839
3840         init_waitqueue_head(&hdev->req_wait_q);
3841         init_waitqueue_head(&hdev->suspend_wait_q);
3842
3843         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3844
3845         hci_request_setup(hdev);
3846
3847         hci_init_sysfs(hdev);
3848         discovery_init(hdev);
3849
3850         return hdev;
3851 }
3852 EXPORT_SYMBOL(hci_alloc_dev);
3853
3854 /* Free HCI device */
3855 void hci_free_dev(struct hci_dev *hdev)
3856 {
3857         /* will free via device release */
3858         put_device(&hdev->dev);
3859 }
3860 EXPORT_SYMBOL(hci_free_dev);
3861
3862 /* Register HCI device */
3863 int hci_register_dev(struct hci_dev *hdev)
3864 {
3865         int id, error;
3866
3867         if (!hdev->open || !hdev->close || !hdev->send)
3868                 return -EINVAL;
3869
3870         /* Do not allow HCI_AMP devices to register at index 0,
3871          * so the index can be used as the AMP controller ID.
3872          */
3873         switch (hdev->dev_type) {
3874         case HCI_PRIMARY:
3875                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3876                 break;
3877         case HCI_AMP:
3878                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3879                 break;
3880         default:
3881                 return -EINVAL;
3882         }
3883
3884         if (id < 0)
3885                 return id;
3886
3887         sprintf(hdev->name, "hci%d", id);
3888         hdev->id = id;
3889
3890         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3891
3892         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3893         if (!hdev->workqueue) {
3894                 error = -ENOMEM;
3895                 goto err;
3896         }
3897
3898         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3899                                                       hdev->name);
3900         if (!hdev->req_workqueue) {
3901                 destroy_workqueue(hdev->workqueue);
3902                 error = -ENOMEM;
3903                 goto err;
3904         }
3905
3906         if (!IS_ERR_OR_NULL(bt_debugfs))
3907                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3908
3909         dev_set_name(&hdev->dev, "%s", hdev->name);
3910
3911         error = device_add(&hdev->dev);
3912         if (error < 0)
3913                 goto err_wqueue;
3914
3915         hci_leds_init(hdev);
3916
3917         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3918                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3919                                     hdev);
3920         if (hdev->rfkill) {
3921                 if (rfkill_register(hdev->rfkill) < 0) {
3922                         rfkill_destroy(hdev->rfkill);
3923                         hdev->rfkill = NULL;
3924                 }
3925         }
3926
3927         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3928                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3929
3930         hci_dev_set_flag(hdev, HCI_SETUP);
3931         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3932
3933         if (hdev->dev_type == HCI_PRIMARY) {
3934                 /* Assume BR/EDR support until proven otherwise (such as
3935                  * through reading supported features during init.
3936                  */
3937                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3938         }
3939
3940         write_lock(&hci_dev_list_lock);
3941         list_add(&hdev->list, &hci_dev_list);
3942         write_unlock(&hci_dev_list_lock);
3943
3944         /* Devices that are marked for raw-only usage are unconfigured
3945          * and should not be included in normal operation.
3946          */
3947         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3948                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3949
3950         hci_sock_dev_event(hdev, HCI_DEV_REG);
3951         hci_dev_hold(hdev);
3952
3953         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3954                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3955                 error = register_pm_notifier(&hdev->suspend_notifier);
3956                 if (error)
3957                         goto err_wqueue;
3958         }
3959
3960         queue_work(hdev->req_workqueue, &hdev->power_on);
3961
3962         idr_init(&hdev->adv_monitors_idr);
3963
3964         return id;
3965
3966 err_wqueue:
3967         destroy_workqueue(hdev->workqueue);
3968         destroy_workqueue(hdev->req_workqueue);
3969 err:
3970         ida_simple_remove(&hci_index_ida, hdev->id);
3971
3972         return error;
3973 }
3974 EXPORT_SYMBOL(hci_register_dev);
3975
3976 /* Unregister HCI device */
3977 void hci_unregister_dev(struct hci_dev *hdev)
3978 {
3979         int id;
3980
3981         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3982
3983         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3984
3985         id = hdev->id;
3986
3987         write_lock(&hci_dev_list_lock);
3988         list_del(&hdev->list);
3989         write_unlock(&hci_dev_list_lock);
3990
3991         cancel_work_sync(&hdev->power_on);
3992
3993         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3994                 hci_suspend_clear_tasks(hdev);
3995                 unregister_pm_notifier(&hdev->suspend_notifier);
3996                 cancel_work_sync(&hdev->suspend_prepare);
3997         }
3998
3999         hci_dev_do_close(hdev);
4000
4001         if (!test_bit(HCI_INIT, &hdev->flags) &&
4002             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4003             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4004                 hci_dev_lock(hdev);
4005                 mgmt_index_removed(hdev);
4006                 hci_dev_unlock(hdev);
4007         }
4008
4009         /* mgmt_index_removed should take care of emptying the
4010          * pending list */
4011         BUG_ON(!list_empty(&hdev->mgmt_pending));
4012
4013         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4014
4015         if (hdev->rfkill) {
4016                 rfkill_unregister(hdev->rfkill);
4017                 rfkill_destroy(hdev->rfkill);
4018         }
4019
4020         device_del(&hdev->dev);
4021
4022         debugfs_remove_recursive(hdev->debugfs);
4023         kfree_const(hdev->hw_info);
4024         kfree_const(hdev->fw_info);
4025
4026         destroy_workqueue(hdev->workqueue);
4027         destroy_workqueue(hdev->req_workqueue);
4028
4029         hci_dev_lock(hdev);
4030         hci_bdaddr_list_clear(&hdev->blacklist);
4031         hci_bdaddr_list_clear(&hdev->whitelist);
4032         hci_uuids_clear(hdev);
4033         hci_link_keys_clear(hdev);
4034         hci_smp_ltks_clear(hdev);
4035         hci_smp_irks_clear(hdev);
4036         hci_remote_oob_data_clear(hdev);
4037         hci_adv_instances_clear(hdev);
4038         hci_adv_monitors_clear(hdev);
4039         hci_bdaddr_list_clear(&hdev->le_white_list);
4040         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4041         hci_conn_params_clear_all(hdev);
4042         hci_discovery_filter_clear(hdev);
4043         hci_blocked_keys_clear(hdev);
4044         hci_dev_unlock(hdev);
4045
4046         hci_dev_put(hdev);
4047
4048         ida_simple_remove(&hci_index_ida, id);
4049 }
4050 EXPORT_SYMBOL(hci_unregister_dev);
4051
4052 /* Suspend HCI device */
4053 int hci_suspend_dev(struct hci_dev *hdev)
4054 {
4055         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4056         return 0;
4057 }
4058 EXPORT_SYMBOL(hci_suspend_dev);
4059
4060 /* Resume HCI device */
4061 int hci_resume_dev(struct hci_dev *hdev)
4062 {
4063         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4064         return 0;
4065 }
4066 EXPORT_SYMBOL(hci_resume_dev);
4067
4068 /* Reset HCI device */
4069 int hci_reset_dev(struct hci_dev *hdev)
4070 {
4071         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4072         struct sk_buff *skb;
4073
4074         skb = bt_skb_alloc(3, GFP_ATOMIC);
4075         if (!skb)
4076                 return -ENOMEM;
4077
4078         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4079         skb_put_data(skb, hw_err, 3);
4080
4081         /* Send Hardware Error to upper stack */
4082         return hci_recv_frame(hdev, skb);
4083 }
4084 EXPORT_SYMBOL(hci_reset_dev);
4085
4086 /* Receive frame from HCI drivers */
4087 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4088 {
4089         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4090                       && !test_bit(HCI_INIT, &hdev->flags))) {
4091                 kfree_skb(skb);
4092                 return -ENXIO;
4093         }
4094
4095         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4096             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4097             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4098             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4099                 kfree_skb(skb);
4100                 return -EINVAL;
4101         }
4102
4103         /* Incoming skb */
4104         bt_cb(skb)->incoming = 1;
4105
4106         /* Time stamp */
4107         __net_timestamp(skb);
4108
4109         skb_queue_tail(&hdev->rx_q, skb);
4110         queue_work(hdev->workqueue, &hdev->rx_work);
4111
4112         return 0;
4113 }
4114 EXPORT_SYMBOL(hci_recv_frame);
4115
4116 /* Receive diagnostic message from HCI drivers */
4117 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4118 {
4119         /* Mark as diagnostic packet */
4120         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4121
4122         /* Time stamp */
4123         __net_timestamp(skb);
4124
4125         skb_queue_tail(&hdev->rx_q, skb);
4126         queue_work(hdev->workqueue, &hdev->rx_work);
4127
4128         return 0;
4129 }
4130 EXPORT_SYMBOL(hci_recv_diag);
4131
4132 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4133 {
4134         va_list vargs;
4135
4136         va_start(vargs, fmt);
4137         kfree_const(hdev->hw_info);
4138         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4139         va_end(vargs);
4140 }
4141 EXPORT_SYMBOL(hci_set_hw_info);
4142
4143 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4144 {
4145         va_list vargs;
4146
4147         va_start(vargs, fmt);
4148         kfree_const(hdev->fw_info);
4149         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4150         va_end(vargs);
4151 }
4152 EXPORT_SYMBOL(hci_set_fw_info);
4153
4154 /* ---- Interface to upper protocols ---- */
4155
4156 int hci_register_cb(struct hci_cb *cb)
4157 {
4158         BT_DBG("%p name %s", cb, cb->name);
4159
4160         mutex_lock(&hci_cb_list_lock);
4161         list_add_tail(&cb->list, &hci_cb_list);
4162         mutex_unlock(&hci_cb_list_lock);
4163
4164         return 0;
4165 }
4166 EXPORT_SYMBOL(hci_register_cb);
4167
4168 int hci_unregister_cb(struct hci_cb *cb)
4169 {
4170         BT_DBG("%p name %s", cb, cb->name);
4171
4172         mutex_lock(&hci_cb_list_lock);
4173         list_del(&cb->list);
4174         mutex_unlock(&hci_cb_list_lock);
4175
4176         return 0;
4177 }
4178 EXPORT_SYMBOL(hci_unregister_cb);
4179
4180 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4181 {
4182         int err;
4183
4184         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4185                skb->len);
4186
4187         /* Time stamp */
4188         __net_timestamp(skb);
4189
4190         /* Send copy to monitor */
4191         hci_send_to_monitor(hdev, skb);
4192
4193         if (atomic_read(&hdev->promisc)) {
4194                 /* Send copy to the sockets */
4195                 hci_send_to_sock(hdev, skb);
4196         }
4197
4198         /* Get rid of skb owner, prior to sending to the driver. */
4199         skb_orphan(skb);
4200
4201         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4202                 kfree_skb(skb);
4203                 return;
4204         }
4205
4206         err = hdev->send(hdev, skb);
4207         if (err < 0) {
4208                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4209                 kfree_skb(skb);
4210         }
4211 }
4212
4213 /* Send HCI command */
4214 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4215                  const void *param)
4216 {
4217         struct sk_buff *skb;
4218
4219         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4220
4221         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4222         if (!skb) {
4223                 bt_dev_err(hdev, "no memory for command");
4224                 return -ENOMEM;
4225         }
4226
4227         /* Stand-alone HCI commands must be flagged as
4228          * single-command requests.
4229          */
4230         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4231
4232         skb_queue_tail(&hdev->cmd_q, skb);
4233         queue_work(hdev->workqueue, &hdev->cmd_work);
4234
4235         return 0;
4236 }
4237
4238 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4239                    const void *param)
4240 {
4241         struct sk_buff *skb;
4242
4243         if (hci_opcode_ogf(opcode) != 0x3f) {
4244                 /* A controller receiving a command shall respond with either
4245                  * a Command Status Event or a Command Complete Event.
4246                  * Therefore, all standard HCI commands must be sent via the
4247                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4248                  * Some vendors do not comply with this rule for vendor-specific
4249                  * commands and do not return any event. We want to support
4250                  * unresponded commands for such cases only.
4251                  */
4252                 bt_dev_err(hdev, "unresponded command not supported");
4253                 return -EINVAL;
4254         }
4255
4256         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4257         if (!skb) {
4258                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4259                            opcode);
4260                 return -ENOMEM;
4261         }
4262
4263         hci_send_frame(hdev, skb);
4264
4265         return 0;
4266 }
4267 EXPORT_SYMBOL(__hci_cmd_send);
4268
4269 /* Get data from the previously sent command */
4270 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4271 {
4272         struct hci_command_hdr *hdr;
4273
4274         if (!hdev->sent_cmd)
4275                 return NULL;
4276
4277         hdr = (void *) hdev->sent_cmd->data;
4278
4279         if (hdr->opcode != cpu_to_le16(opcode))
4280                 return NULL;
4281
4282         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4283
4284         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4285 }
4286
4287 /* Send HCI command and wait for command commplete event */
4288 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4289                              const void *param, u32 timeout)
4290 {
4291         struct sk_buff *skb;
4292
4293         if (!test_bit(HCI_UP, &hdev->flags))
4294                 return ERR_PTR(-ENETDOWN);
4295
4296         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4297
4298         hci_req_sync_lock(hdev);
4299         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4300         hci_req_sync_unlock(hdev);
4301
4302         return skb;
4303 }
4304 EXPORT_SYMBOL(hci_cmd_sync);
4305
4306 /* Send ACL data */
4307 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4308 {
4309         struct hci_acl_hdr *hdr;
4310         int len = skb->len;
4311
4312         skb_push(skb, HCI_ACL_HDR_SIZE);
4313         skb_reset_transport_header(skb);
4314         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4315         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4316         hdr->dlen   = cpu_to_le16(len);
4317 }
4318
4319 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4320                           struct sk_buff *skb, __u16 flags)
4321 {
4322         struct hci_conn *conn = chan->conn;
4323         struct hci_dev *hdev = conn->hdev;
4324         struct sk_buff *list;
4325
4326         skb->len = skb_headlen(skb);
4327         skb->data_len = 0;
4328
4329         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4330
4331         switch (hdev->dev_type) {
4332         case HCI_PRIMARY:
4333                 hci_add_acl_hdr(skb, conn->handle, flags);
4334                 break;
4335         case HCI_AMP:
4336                 hci_add_acl_hdr(skb, chan->handle, flags);
4337                 break;
4338         default:
4339                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4340                 return;
4341         }
4342
4343         list = skb_shinfo(skb)->frag_list;
4344         if (!list) {
4345                 /* Non fragmented */
4346                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4347
4348                 skb_queue_tail(queue, skb);
4349         } else {
4350                 /* Fragmented */
4351                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4352
4353                 skb_shinfo(skb)->frag_list = NULL;
4354
4355                 /* Queue all fragments atomically. We need to use spin_lock_bh
4356                  * here because of 6LoWPAN links, as there this function is
4357                  * called from softirq and using normal spin lock could cause
4358                  * deadlocks.
4359                  */
4360                 spin_lock_bh(&queue->lock);
4361
4362                 __skb_queue_tail(queue, skb);
4363
4364                 flags &= ~ACL_START;
4365                 flags |= ACL_CONT;
4366                 do {
4367                         skb = list; list = list->next;
4368
4369                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4370                         hci_add_acl_hdr(skb, conn->handle, flags);
4371
4372                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4373
4374                         __skb_queue_tail(queue, skb);
4375                 } while (list);
4376
4377                 spin_unlock_bh(&queue->lock);
4378         }
4379 }
4380
4381 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4382 {
4383         struct hci_dev *hdev = chan->conn->hdev;
4384
4385         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4386
4387         hci_queue_acl(chan, &chan->data_q, skb, flags);
4388
4389         queue_work(hdev->workqueue, &hdev->tx_work);
4390 }
4391
4392 /* Send SCO data */
4393 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4394 {
4395         struct hci_dev *hdev = conn->hdev;
4396         struct hci_sco_hdr hdr;
4397
4398         BT_DBG("%s len %d", hdev->name, skb->len);
4399
4400         hdr.handle = cpu_to_le16(conn->handle);
4401         hdr.dlen   = skb->len;
4402
4403         skb_push(skb, HCI_SCO_HDR_SIZE);
4404         skb_reset_transport_header(skb);
4405         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4406
4407         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4408
4409         skb_queue_tail(&conn->data_q, skb);
4410         queue_work(hdev->workqueue, &hdev->tx_work);
4411 }
4412
4413 /* ---- HCI TX task (outgoing data) ---- */
4414
4415 /* HCI Connection scheduler */
4416 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4417                                      int *quote)
4418 {
4419         struct hci_conn_hash *h = &hdev->conn_hash;
4420         struct hci_conn *conn = NULL, *c;
4421         unsigned int num = 0, min = ~0;
4422
4423         /* We don't have to lock device here. Connections are always
4424          * added and removed with TX task disabled. */
4425
4426         rcu_read_lock();
4427
4428         list_for_each_entry_rcu(c, &h->list, list) {
4429                 if (c->type != type || skb_queue_empty(&c->data_q))
4430                         continue;
4431
4432                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4433                         continue;
4434
4435                 num++;
4436
4437                 if (c->sent < min) {
4438                         min  = c->sent;
4439                         conn = c;
4440                 }
4441
4442                 if (hci_conn_num(hdev, type) == num)
4443                         break;
4444         }
4445
4446         rcu_read_unlock();
4447
4448         if (conn) {
4449                 int cnt, q;
4450
4451                 switch (conn->type) {
4452                 case ACL_LINK:
4453                         cnt = hdev->acl_cnt;
4454                         break;
4455                 case SCO_LINK:
4456                 case ESCO_LINK:
4457                         cnt = hdev->sco_cnt;
4458                         break;
4459                 case LE_LINK:
4460                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4461                         break;
4462                 default:
4463                         cnt = 0;
4464                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4465                 }
4466
4467                 q = cnt / num;
4468                 *quote = q ? q : 1;
4469         } else
4470                 *quote = 0;
4471
4472         BT_DBG("conn %p quote %d", conn, *quote);
4473         return conn;
4474 }
4475
4476 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4477 {
4478         struct hci_conn_hash *h = &hdev->conn_hash;
4479         struct hci_conn *c;
4480
4481         bt_dev_err(hdev, "link tx timeout");
4482
4483         rcu_read_lock();
4484
4485         /* Kill stalled connections */
4486         list_for_each_entry_rcu(c, &h->list, list) {
4487                 if (c->type == type && c->sent) {
4488                         bt_dev_err(hdev, "killing stalled connection %pMR",
4489                                    &c->dst);
4490                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4491                 }
4492         }
4493
4494         rcu_read_unlock();
4495 }
4496
4497 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4498                                       int *quote)
4499 {
4500         struct hci_conn_hash *h = &hdev->conn_hash;
4501         struct hci_chan *chan = NULL;
4502         unsigned int num = 0, min = ~0, cur_prio = 0;
4503         struct hci_conn *conn;
4504         int cnt, q, conn_num = 0;
4505
4506         BT_DBG("%s", hdev->name);
4507
4508         rcu_read_lock();
4509
4510         list_for_each_entry_rcu(conn, &h->list, list) {
4511                 struct hci_chan *tmp;
4512
4513                 if (conn->type != type)
4514                         continue;
4515
4516                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4517                         continue;
4518
4519                 conn_num++;
4520
4521                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4522                         struct sk_buff *skb;
4523
4524                         if (skb_queue_empty(&tmp->data_q))
4525                                 continue;
4526
4527                         skb = skb_peek(&tmp->data_q);
4528                         if (skb->priority < cur_prio)
4529                                 continue;
4530
4531                         if (skb->priority > cur_prio) {
4532                                 num = 0;
4533                                 min = ~0;
4534                                 cur_prio = skb->priority;
4535                         }
4536
4537                         num++;
4538
4539                         if (conn->sent < min) {
4540                                 min  = conn->sent;
4541                                 chan = tmp;
4542                         }
4543                 }
4544
4545                 if (hci_conn_num(hdev, type) == conn_num)
4546                         break;
4547         }
4548
4549         rcu_read_unlock();
4550
4551         if (!chan)
4552                 return NULL;
4553
4554         switch (chan->conn->type) {
4555         case ACL_LINK:
4556                 cnt = hdev->acl_cnt;
4557                 break;
4558         case AMP_LINK:
4559                 cnt = hdev->block_cnt;
4560                 break;
4561         case SCO_LINK:
4562         case ESCO_LINK:
4563                 cnt = hdev->sco_cnt;
4564                 break;
4565         case LE_LINK:
4566                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4567                 break;
4568         default:
4569                 cnt = 0;
4570                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4571         }
4572
4573         q = cnt / num;
4574         *quote = q ? q : 1;
4575         BT_DBG("chan %p quote %d", chan, *quote);
4576         return chan;
4577 }
4578
4579 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4580 {
4581         struct hci_conn_hash *h = &hdev->conn_hash;
4582         struct hci_conn *conn;
4583         int num = 0;
4584
4585         BT_DBG("%s", hdev->name);
4586
4587         rcu_read_lock();
4588
4589         list_for_each_entry_rcu(conn, &h->list, list) {
4590                 struct hci_chan *chan;
4591
4592                 if (conn->type != type)
4593                         continue;
4594
4595                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4596                         continue;
4597
4598                 num++;
4599
4600                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4601                         struct sk_buff *skb;
4602
4603                         if (chan->sent) {
4604                                 chan->sent = 0;
4605                                 continue;
4606                         }
4607
4608                         if (skb_queue_empty(&chan->data_q))
4609                                 continue;
4610
4611                         skb = skb_peek(&chan->data_q);
4612                         if (skb->priority >= HCI_PRIO_MAX - 1)
4613                                 continue;
4614
4615                         skb->priority = HCI_PRIO_MAX - 1;
4616
4617                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4618                                skb->priority);
4619                 }
4620
4621                 if (hci_conn_num(hdev, type) == num)
4622                         break;
4623         }
4624
4625         rcu_read_unlock();
4626
4627 }
4628
4629 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4630 {
4631         /* Calculate count of blocks used by this packet */
4632         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4633 }
4634
4635 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4636 {
4637         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4638                 /* ACL tx timeout must be longer than maximum
4639                  * link supervision timeout (40.9 seconds) */
4640                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4641                                        HCI_ACL_TX_TIMEOUT))
4642                         hci_link_tx_to(hdev, ACL_LINK);
4643         }
4644 }
4645
4646 /* Schedule SCO */
4647 static void hci_sched_sco(struct hci_dev *hdev)
4648 {
4649         struct hci_conn *conn;
4650         struct sk_buff *skb;
4651         int quote;
4652
4653         BT_DBG("%s", hdev->name);
4654
4655         if (!hci_conn_num(hdev, SCO_LINK))
4656                 return;
4657
4658         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4659                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4660                         BT_DBG("skb %p len %d", skb, skb->len);
4661                         hci_send_frame(hdev, skb);
4662
4663                         conn->sent++;
4664                         if (conn->sent == ~0)
4665                                 conn->sent = 0;
4666                 }
4667         }
4668 }
4669
4670 static void hci_sched_esco(struct hci_dev *hdev)
4671 {
4672         struct hci_conn *conn;
4673         struct sk_buff *skb;
4674         int quote;
4675
4676         BT_DBG("%s", hdev->name);
4677
4678         if (!hci_conn_num(hdev, ESCO_LINK))
4679                 return;
4680
4681         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4682                                                      &quote))) {
4683                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4684                         BT_DBG("skb %p len %d", skb, skb->len);
4685                         hci_send_frame(hdev, skb);
4686
4687                         conn->sent++;
4688                         if (conn->sent == ~0)
4689                                 conn->sent = 0;
4690                 }
4691         }
4692 }
4693
4694 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4695 {
4696         unsigned int cnt = hdev->acl_cnt;
4697         struct hci_chan *chan;
4698         struct sk_buff *skb;
4699         int quote;
4700
4701         __check_timeout(hdev, cnt);
4702
4703         while (hdev->acl_cnt &&
4704                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4705                 u32 priority = (skb_peek(&chan->data_q))->priority;
4706                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4707                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4708                                skb->len, skb->priority);
4709
4710                         /* Stop if priority has changed */
4711                         if (skb->priority < priority)
4712                                 break;
4713
4714                         skb = skb_dequeue(&chan->data_q);
4715
4716                         hci_conn_enter_active_mode(chan->conn,
4717                                                    bt_cb(skb)->force_active);
4718
4719                         hci_send_frame(hdev, skb);
4720                         hdev->acl_last_tx = jiffies;
4721
4722                         hdev->acl_cnt--;
4723                         chan->sent++;
4724                         chan->conn->sent++;
4725
4726                         /* Send pending SCO packets right away */
4727                         hci_sched_sco(hdev);
4728                         hci_sched_esco(hdev);
4729                 }
4730         }
4731
4732         if (cnt != hdev->acl_cnt)
4733                 hci_prio_recalculate(hdev, ACL_LINK);
4734 }
4735
4736 static void hci_sched_acl_blk(struct hci_dev *hdev)
4737 {
4738         unsigned int cnt = hdev->block_cnt;
4739         struct hci_chan *chan;
4740         struct sk_buff *skb;
4741         int quote;
4742         u8 type;
4743
4744         __check_timeout(hdev, cnt);
4745
4746         BT_DBG("%s", hdev->name);
4747
4748         if (hdev->dev_type == HCI_AMP)
4749                 type = AMP_LINK;
4750         else
4751                 type = ACL_LINK;
4752
4753         while (hdev->block_cnt > 0 &&
4754                (chan = hci_chan_sent(hdev, type, &quote))) {
4755                 u32 priority = (skb_peek(&chan->data_q))->priority;
4756                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4757                         int blocks;
4758
4759                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4760                                skb->len, skb->priority);
4761
4762                         /* Stop if priority has changed */
4763                         if (skb->priority < priority)
4764                                 break;
4765
4766                         skb = skb_dequeue(&chan->data_q);
4767
4768                         blocks = __get_blocks(hdev, skb);
4769                         if (blocks > hdev->block_cnt)
4770                                 return;
4771
4772                         hci_conn_enter_active_mode(chan->conn,
4773                                                    bt_cb(skb)->force_active);
4774
4775                         hci_send_frame(hdev, skb);
4776                         hdev->acl_last_tx = jiffies;
4777
4778                         hdev->block_cnt -= blocks;
4779                         quote -= blocks;
4780
4781                         chan->sent += blocks;
4782                         chan->conn->sent += blocks;
4783                 }
4784         }
4785
4786         if (cnt != hdev->block_cnt)
4787                 hci_prio_recalculate(hdev, type);
4788 }
4789
4790 static void hci_sched_acl(struct hci_dev *hdev)
4791 {
4792         BT_DBG("%s", hdev->name);
4793
4794         /* No ACL link over BR/EDR controller */
4795         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4796                 return;
4797
4798         /* No AMP link over AMP controller */
4799         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4800                 return;
4801
4802         switch (hdev->flow_ctl_mode) {
4803         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4804                 hci_sched_acl_pkt(hdev);
4805                 break;
4806
4807         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4808                 hci_sched_acl_blk(hdev);
4809                 break;
4810         }
4811 }
4812
4813 static void hci_sched_le(struct hci_dev *hdev)
4814 {
4815         struct hci_chan *chan;
4816         struct sk_buff *skb;
4817         int quote, cnt, tmp;
4818
4819         BT_DBG("%s", hdev->name);
4820
4821         if (!hci_conn_num(hdev, LE_LINK))
4822                 return;
4823
4824         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4825
4826         __check_timeout(hdev, cnt);
4827
4828         tmp = cnt;
4829         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4830                 u32 priority = (skb_peek(&chan->data_q))->priority;
4831                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4832                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4833                                skb->len, skb->priority);
4834
4835                         /* Stop if priority has changed */
4836                         if (skb->priority < priority)
4837                                 break;
4838
4839                         skb = skb_dequeue(&chan->data_q);
4840
4841                         hci_send_frame(hdev, skb);
4842                         hdev->le_last_tx = jiffies;
4843
4844                         cnt--;
4845                         chan->sent++;
4846                         chan->conn->sent++;
4847
4848                         /* Send pending SCO packets right away */
4849                         hci_sched_sco(hdev);
4850                         hci_sched_esco(hdev);
4851                 }
4852         }
4853
4854         if (hdev->le_pkts)
4855                 hdev->le_cnt = cnt;
4856         else
4857                 hdev->acl_cnt = cnt;
4858
4859         if (cnt != tmp)
4860                 hci_prio_recalculate(hdev, LE_LINK);
4861 }
4862
4863 static void hci_tx_work(struct work_struct *work)
4864 {
4865         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4866         struct sk_buff *skb;
4867
4868         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4869                hdev->sco_cnt, hdev->le_cnt);
4870
4871         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4872                 /* Schedule queues and send stuff to HCI driver */
4873                 hci_sched_sco(hdev);
4874                 hci_sched_esco(hdev);
4875                 hci_sched_acl(hdev);
4876                 hci_sched_le(hdev);
4877         }
4878
4879         /* Send next queued raw (unknown type) packet */
4880         while ((skb = skb_dequeue(&hdev->raw_q)))
4881                 hci_send_frame(hdev, skb);
4882 }
4883
4884 /* ----- HCI RX task (incoming data processing) ----- */
4885
4886 /* ACL data packet */
4887 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4888 {
4889         struct hci_acl_hdr *hdr = (void *) skb->data;
4890         struct hci_conn *conn;
4891         __u16 handle, flags;
4892
4893         skb_pull(skb, HCI_ACL_HDR_SIZE);
4894
4895         handle = __le16_to_cpu(hdr->handle);
4896         flags  = hci_flags(handle);
4897         handle = hci_handle(handle);
4898
4899         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4900                handle, flags);
4901
4902         hdev->stat.acl_rx++;
4903
4904         hci_dev_lock(hdev);
4905         conn = hci_conn_hash_lookup_handle(hdev, handle);
4906         hci_dev_unlock(hdev);
4907
4908         if (conn) {
4909                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4910
4911                 /* Send to upper protocol */
4912                 l2cap_recv_acldata(conn, skb, flags);
4913                 return;
4914         } else {
4915                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4916                            handle);
4917         }
4918
4919         kfree_skb(skb);
4920 }
4921
4922 /* SCO data packet */
4923 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4924 {
4925         struct hci_sco_hdr *hdr = (void *) skb->data;
4926         struct hci_conn *conn;
4927         __u16 handle, flags;
4928
4929         skb_pull(skb, HCI_SCO_HDR_SIZE);
4930
4931         handle = __le16_to_cpu(hdr->handle);
4932         flags  = hci_flags(handle);
4933         handle = hci_handle(handle);
4934
4935         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4936                handle, flags);
4937
4938         hdev->stat.sco_rx++;
4939
4940         hci_dev_lock(hdev);
4941         conn = hci_conn_hash_lookup_handle(hdev, handle);
4942         hci_dev_unlock(hdev);
4943
4944         if (conn) {
4945                 /* Send to upper protocol */
4946                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4947                 sco_recv_scodata(conn, skb);
4948                 return;
4949         } else {
4950                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4951                            handle);
4952         }
4953
4954         kfree_skb(skb);
4955 }
4956
4957 static bool hci_req_is_complete(struct hci_dev *hdev)
4958 {
4959         struct sk_buff *skb;
4960
4961         skb = skb_peek(&hdev->cmd_q);
4962         if (!skb)
4963                 return true;
4964
4965         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4966 }
4967
4968 static void hci_resend_last(struct hci_dev *hdev)
4969 {
4970         struct hci_command_hdr *sent;
4971         struct sk_buff *skb;
4972         u16 opcode;
4973
4974         if (!hdev->sent_cmd)
4975                 return;
4976
4977         sent = (void *) hdev->sent_cmd->data;
4978         opcode = __le16_to_cpu(sent->opcode);
4979         if (opcode == HCI_OP_RESET)
4980                 return;
4981
4982         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4983         if (!skb)
4984                 return;
4985
4986         skb_queue_head(&hdev->cmd_q, skb);
4987         queue_work(hdev->workqueue, &hdev->cmd_work);
4988 }
4989
4990 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4991                           hci_req_complete_t *req_complete,
4992                           hci_req_complete_skb_t *req_complete_skb)
4993 {
4994         struct sk_buff *skb;
4995         unsigned long flags;
4996
4997         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4998
4999         /* If the completed command doesn't match the last one that was
5000          * sent we need to do special handling of it.
5001          */
5002         if (!hci_sent_cmd_data(hdev, opcode)) {
5003                 /* Some CSR based controllers generate a spontaneous
5004                  * reset complete event during init and any pending
5005                  * command will never be completed. In such a case we
5006                  * need to resend whatever was the last sent
5007                  * command.
5008                  */
5009                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5010                         hci_resend_last(hdev);
5011
5012                 return;
5013         }
5014
5015         /* If we reach this point this event matches the last command sent */
5016         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5017
5018         /* If the command succeeded and there's still more commands in
5019          * this request the request is not yet complete.
5020          */
5021         if (!status && !hci_req_is_complete(hdev))
5022                 return;
5023
5024         /* If this was the last command in a request the complete
5025          * callback would be found in hdev->sent_cmd instead of the
5026          * command queue (hdev->cmd_q).
5027          */
5028         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5029                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5030                 return;
5031         }
5032
5033         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5034                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5035                 return;
5036         }
5037
5038         /* Remove all pending commands belonging to this request */
5039         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5040         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5041                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5042                         __skb_queue_head(&hdev->cmd_q, skb);
5043                         break;
5044                 }
5045
5046                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5047                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5048                 else
5049                         *req_complete = bt_cb(skb)->hci.req_complete;
5050                 kfree_skb(skb);
5051         }
5052         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5053 }
5054
5055 static void hci_rx_work(struct work_struct *work)
5056 {
5057         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5058         struct sk_buff *skb;
5059
5060         BT_DBG("%s", hdev->name);
5061
5062         while ((skb = skb_dequeue(&hdev->rx_q))) {
5063                 /* Send copy to monitor */
5064                 hci_send_to_monitor(hdev, skb);
5065
5066                 if (atomic_read(&hdev->promisc)) {
5067                         /* Send copy to the sockets */
5068                         hci_send_to_sock(hdev, skb);
5069                 }
5070
5071                 /* If the device has been opened in HCI_USER_CHANNEL,
5072                  * the userspace has exclusive access to device.
5073                  * When device is HCI_INIT, we still need to process
5074                  * the data packets to the driver in order
5075                  * to complete its setup().
5076                  */
5077                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5078                     !test_bit(HCI_INIT, &hdev->flags)) {
5079                         kfree_skb(skb);
5080                         continue;
5081                 }
5082
5083                 if (test_bit(HCI_INIT, &hdev->flags)) {
5084                         /* Don't process data packets in this states. */
5085                         switch (hci_skb_pkt_type(skb)) {
5086                         case HCI_ACLDATA_PKT:
5087                         case HCI_SCODATA_PKT:
5088                         case HCI_ISODATA_PKT:
5089                                 kfree_skb(skb);
5090                                 continue;
5091                         }
5092                 }
5093
5094                 /* Process frame */
5095                 switch (hci_skb_pkt_type(skb)) {
5096                 case HCI_EVENT_PKT:
5097                         BT_DBG("%s Event packet", hdev->name);
5098                         hci_event_packet(hdev, skb);
5099                         break;
5100
5101                 case HCI_ACLDATA_PKT:
5102                         BT_DBG("%s ACL data packet", hdev->name);
5103                         hci_acldata_packet(hdev, skb);
5104                         break;
5105
5106                 case HCI_SCODATA_PKT:
5107                         BT_DBG("%s SCO data packet", hdev->name);
5108                         hci_scodata_packet(hdev, skb);
5109                         break;
5110
5111                 default:
5112                         kfree_skb(skb);
5113                         break;
5114                 }
5115         }
5116 }
5117
5118 static void hci_cmd_work(struct work_struct *work)
5119 {
5120         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5121         struct sk_buff *skb;
5122
5123         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5124                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5125
5126         /* Send queued commands */
5127         if (atomic_read(&hdev->cmd_cnt)) {
5128                 skb = skb_dequeue(&hdev->cmd_q);
5129                 if (!skb)
5130                         return;
5131
5132                 kfree_skb(hdev->sent_cmd);
5133
5134                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5135                 if (hdev->sent_cmd) {
5136                         if (hci_req_status_pend(hdev))
5137                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5138                         atomic_dec(&hdev->cmd_cnt);
5139                         hci_send_frame(hdev, skb);
5140                         if (test_bit(HCI_RESET, &hdev->flags))
5141                                 cancel_delayed_work(&hdev->cmd_timer);
5142                         else
5143                                 schedule_delayed_work(&hdev->cmd_timer,
5144                                                       HCI_CMD_TIMEOUT);
5145                 } else {
5146                         skb_queue_head(&hdev->cmd_q, skb);
5147                         queue_work(hdev->workqueue, &hdev->cmd_work);
5148                 }
5149         }
5150 }