Linux 6.9-rc1
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Peripheral Broadcast central role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_cpb_central_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Peripheral Page Response Timeout */
555                 events[2] |= 0x20;      /* CPB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Peripheral Broadcast peripheral role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_cpb_peripheral_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CPB Receive */
565                 events[2] |= 0x04;      /* CPB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the corresponding event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if (hdev->commands[38] & 0x80) {
746                         /* Read LE Min/Max Tx Power*/
747                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748                                     0, NULL);
749                 }
750
751                 if (hdev->commands[26] & 0x40) {
752                         /* Read LE Accept List Size */
753                         hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
754                                     0, NULL);
755                 }
756
757                 if (hdev->commands[26] & 0x80) {
758                         /* Clear LE Accept List */
759                         hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
760                 }
761
762                 if (hdev->commands[34] & 0x40) {
763                         /* Read LE Resolving List Size */
764                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765                                     0, NULL);
766                 }
767
768                 if (hdev->commands[34] & 0x20) {
769                         /* Clear LE Resolving List */
770                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771                 }
772
773                 if (hdev->commands[35] & 0x04) {
774                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775
776                         /* Set RPA timeout */
777                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778                                     &rpa_timeout);
779                 }
780
781                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782                         /* Read LE Maximum Data Length */
783                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784
785                         /* Read LE Suggested Default Data Length */
786                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787                 }
788
789                 if (ext_adv_capable(hdev)) {
790                         /* Read LE Number of Supported Advertising Sets */
791                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792                                     0, NULL);
793                 }
794
795                 hci_set_le_support(req);
796         }
797
798         /* Read features beyond page 1 if available */
799         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800                 struct hci_cp_read_local_ext_features cp;
801
802                 cp.page = p;
803                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804                             sizeof(cp), &cp);
805         }
806
807         return 0;
808 }
809
810 static int hci_init4_req(struct hci_request *req, unsigned long opt)
811 {
812         struct hci_dev *hdev = req->hdev;
813
814         /* Some Broadcom based Bluetooth controllers do not support the
815          * Delete Stored Link Key command. They are clearly indicating its
816          * absence in the bit mask of supported commands.
817          *
818          * Check the supported commands and only if the command is marked
819          * as supported send it. If not supported assume that the controller
820          * does not have actual support for stored link keys which makes this
821          * command redundant anyway.
822          *
823          * Some controllers indicate that they support handling deleting
824          * stored link keys, but they don't. The quirk lets a driver
825          * just disable this command.
826          */
827         if (hdev->commands[6] & 0x80 &&
828             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829                 struct hci_cp_delete_stored_link_key cp;
830
831                 bacpy(&cp.bdaddr, BDADDR_ANY);
832                 cp.delete_all = 0x01;
833                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834                             sizeof(cp), &cp);
835         }
836
837         /* Set event mask page 2 if the HCI command for it is supported */
838         if (hdev->commands[22] & 0x04)
839                 hci_set_event_mask_page_2(req);
840
841         /* Read local codec list if the HCI command is supported */
842         if (hdev->commands[29] & 0x20)
843                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844
845         /* Read local pairing options if the HCI command is supported */
846         if (hdev->commands[41] & 0x08)
847                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848
849         /* Get MWS transport configuration if the HCI command is supported */
850         if (hdev->commands[30] & 0x08)
851                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852
853         /* Check for Synchronization Train support */
854         if (lmp_sync_train_capable(hdev))
855                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
856
857         /* Enable Secure Connections if supported and configured */
858         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
859             bredr_sc_enabled(hdev)) {
860                 u8 support = 0x01;
861
862                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863                             sizeof(support), &support);
864         }
865
866         /* Set erroneous data reporting if supported to the wideband speech
867          * setting value
868          */
869         if (hdev->commands[18] & 0x08 &&
870             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
871                 bool enabled = hci_dev_test_flag(hdev,
872                                                  HCI_WIDEBAND_SPEECH_ENABLED);
873
874                 if (enabled !=
875                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876                         struct hci_cp_write_def_err_data_reporting cp;
877
878                         cp.err_data_reporting = enabled ?
879                                                 ERR_DATA_REPORTING_ENABLED :
880                                                 ERR_DATA_REPORTING_DISABLED;
881
882                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883                                     sizeof(cp), &cp);
884                 }
885         }
886
887         /* Set Suggested Default Data Length to maximum if supported */
888         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889                 struct hci_cp_le_write_def_data_len cp;
890
891                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
893                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894         }
895
896         /* Set Default PHY parameters if command is supported */
897         if (hdev->commands[35] & 0x20) {
898                 struct hci_cp_le_set_default_phy cp;
899
900                 cp.all_phys = 0x00;
901                 cp.tx_phys = hdev->le_tx_def_phys;
902                 cp.rx_phys = hdev->le_rx_def_phys;
903
904                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905         }
906
907         return 0;
908 }
909
910 static int __hci_init(struct hci_dev *hdev)
911 {
912         int err;
913
914         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917
918         if (hci_dev_test_flag(hdev, HCI_SETUP))
919                 hci_debugfs_create_basic(hdev);
920
921         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
922         if (err < 0)
923                 return err;
924
925         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
926          * BR/EDR/LE type controllers. AMP controllers only need the
927          * first two stages of init.
928          */
929         if (hdev->dev_type != HCI_PRIMARY)
930                 return 0;
931
932         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
933         if (err < 0)
934                 return err;
935
936         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
937         if (err < 0)
938                 return err;
939
940         /* This function is only called when the controller is actually in
941          * configured state. When the controller is marked as unconfigured,
942          * this initialization procedure is not run.
943          *
944          * It means that it is possible that a controller runs through its
945          * setup phase and then discovers missing settings. If that is the
946          * case, then this function will not be called. It then will only
947          * be called during the config phase.
948          *
949          * So only when in setup phase or config phase, create the debugfs
950          * entries and register the SMP channels.
951          */
952         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953             !hci_dev_test_flag(hdev, HCI_CONFIG))
954                 return 0;
955
956         hci_debugfs_create_common(hdev);
957
958         if (lmp_bredr_capable(hdev))
959                 hci_debugfs_create_bredr(hdev);
960
961         if (lmp_le_capable(hdev))
962                 hci_debugfs_create_le(hdev);
963
964         return 0;
965 }
966
967 static int hci_init0_req(struct hci_request *req, unsigned long opt)
968 {
969         struct hci_dev *hdev = req->hdev;
970
971         BT_DBG("%s %ld", hdev->name, opt);
972
973         /* Reset */
974         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975                 hci_reset_req(req, 0);
976
977         /* Read Local Version */
978         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979
980         /* Read BD Address */
981         if (hdev->set_bdaddr)
982                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
983
984         return 0;
985 }
986
987 static int __hci_unconf_init(struct hci_dev *hdev)
988 {
989         int err;
990
991         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992                 return 0;
993
994         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
995         if (err < 0)
996                 return err;
997
998         if (hci_dev_test_flag(hdev, HCI_SETUP))
999                 hci_debugfs_create_basic(hdev);
1000
1001         return 0;
1002 }
1003
1004 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1005 {
1006         __u8 scan = opt;
1007
1008         BT_DBG("%s %x", req->hdev->name, scan);
1009
1010         /* Inquiry and Page scans */
1011         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1012         return 0;
1013 }
1014
1015 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1016 {
1017         __u8 auth = opt;
1018
1019         BT_DBG("%s %x", req->hdev->name, auth);
1020
1021         /* Authentication */
1022         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1023         return 0;
1024 }
1025
1026 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027 {
1028         __u8 encrypt = opt;
1029
1030         BT_DBG("%s %x", req->hdev->name, encrypt);
1031
1032         /* Encryption */
1033         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1034         return 0;
1035 }
1036
1037 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038 {
1039         __le16 policy = cpu_to_le16(opt);
1040
1041         BT_DBG("%s %x", req->hdev->name, policy);
1042
1043         /* Default link policy */
1044         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1045         return 0;
1046 }
1047
1048 /* Get HCI device by index.
1049  * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1051 {
1052         struct hci_dev *hdev = NULL, *d;
1053
1054         BT_DBG("%d", index);
1055
1056         if (index < 0)
1057                 return NULL;
1058
1059         read_lock(&hci_dev_list_lock);
1060         list_for_each_entry(d, &hci_dev_list, list) {
1061                 if (d->id == index) {
1062                         hdev = hci_dev_hold(d);
1063                         break;
1064                 }
1065         }
1066         read_unlock(&hci_dev_list_lock);
1067         return hdev;
1068 }
1069
1070 /* ---- Inquiry support ---- */
1071
1072 bool hci_discovery_active(struct hci_dev *hdev)
1073 {
1074         struct discovery_state *discov = &hdev->discovery;
1075
1076         switch (discov->state) {
1077         case DISCOVERY_FINDING:
1078         case DISCOVERY_RESOLVING:
1079                 return true;
1080
1081         default:
1082                 return false;
1083         }
1084 }
1085
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 {
1088         int old_state = hdev->discovery.state;
1089
1090         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
1092         if (old_state == state)
1093                 return;
1094
1095         hdev->discovery.state = state;
1096
1097         switch (state) {
1098         case DISCOVERY_STOPPED:
1099                 hci_update_background_scan(hdev);
1100
1101                 if (old_state != DISCOVERY_STARTING)
1102                         mgmt_discovering(hdev, 0);
1103                 break;
1104         case DISCOVERY_STARTING:
1105                 break;
1106         case DISCOVERY_FINDING:
1107                 mgmt_discovering(hdev, 1);
1108                 break;
1109         case DISCOVERY_RESOLVING:
1110                 break;
1111         case DISCOVERY_STOPPING:
1112                 break;
1113         }
1114 }
1115
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *p, *n;
1120
1121         list_for_each_entry_safe(p, n, &cache->all, all) {
1122                 list_del(&p->all);
1123                 kfree(p);
1124         }
1125
1126         INIT_LIST_HEAD(&cache->unknown);
1127         INIT_LIST_HEAD(&cache->resolve);
1128 }
1129
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131                                                bdaddr_t *bdaddr)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *e;
1135
1136         BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138         list_for_each_entry(e, &cache->all, all) {
1139                 if (!bacmp(&e->data.bdaddr, bdaddr))
1140                         return e;
1141         }
1142
1143         return NULL;
1144 }
1145
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147                                                        bdaddr_t *bdaddr)
1148 {
1149         struct discovery_state *cache = &hdev->discovery;
1150         struct inquiry_entry *e;
1151
1152         BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154         list_for_each_entry(e, &cache->unknown, list) {
1155                 if (!bacmp(&e->data.bdaddr, bdaddr))
1156                         return e;
1157         }
1158
1159         return NULL;
1160 }
1161
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163                                                        bdaddr_t *bdaddr,
1164                                                        int state)
1165 {
1166         struct discovery_state *cache = &hdev->discovery;
1167         struct inquiry_entry *e;
1168
1169         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170
1171         list_for_each_entry(e, &cache->resolve, list) {
1172                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173                         return e;
1174                 if (!bacmp(&e->data.bdaddr, bdaddr))
1175                         return e;
1176         }
1177
1178         return NULL;
1179 }
1180
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182                                       struct inquiry_entry *ie)
1183 {
1184         struct discovery_state *cache = &hdev->discovery;
1185         struct list_head *pos = &cache->resolve;
1186         struct inquiry_entry *p;
1187
1188         list_del(&ie->list);
1189
1190         list_for_each_entry(p, &cache->resolve, list) {
1191                 if (p->name_state != NAME_PENDING &&
1192                     abs(p->data.rssi) >= abs(ie->data.rssi))
1193                         break;
1194                 pos = &p->list;
1195         }
1196
1197         list_add(&ie->list, pos);
1198 }
1199
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201                              bool name_known)
1202 {
1203         struct discovery_state *cache = &hdev->discovery;
1204         struct inquiry_entry *ie;
1205         u32 flags = 0;
1206
1207         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208
1209         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210
1211         if (!data->ssp_mode)
1212                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213
1214         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215         if (ie) {
1216                 if (!ie->data.ssp_mode)
1217                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219                 if (ie->name_state == NAME_NEEDED &&
1220                     data->rssi != ie->data.rssi) {
1221                         ie->data.rssi = data->rssi;
1222                         hci_inquiry_cache_update_resolve(hdev, ie);
1223                 }
1224
1225                 goto update;
1226         }
1227
1228         /* Entry not in the cache. Add new one. */
1229         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230         if (!ie) {
1231                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232                 goto done;
1233         }
1234
1235         list_add(&ie->all, &cache->all);
1236
1237         if (name_known) {
1238                 ie->name_state = NAME_KNOWN;
1239         } else {
1240                 ie->name_state = NAME_NOT_KNOWN;
1241                 list_add(&ie->list, &cache->unknown);
1242         }
1243
1244 update:
1245         if (name_known && ie->name_state != NAME_KNOWN &&
1246             ie->name_state != NAME_PENDING) {
1247                 ie->name_state = NAME_KNOWN;
1248                 list_del(&ie->list);
1249         }
1250
1251         memcpy(&ie->data, data, sizeof(*data));
1252         ie->timestamp = jiffies;
1253         cache->timestamp = jiffies;
1254
1255         if (ie->name_state == NAME_NOT_KNOWN)
1256                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257
1258 done:
1259         return flags;
1260 }
1261
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 {
1264         struct discovery_state *cache = &hdev->discovery;
1265         struct inquiry_info *info = (struct inquiry_info *) buf;
1266         struct inquiry_entry *e;
1267         int copied = 0;
1268
1269         list_for_each_entry(e, &cache->all, all) {
1270                 struct inquiry_data *data = &e->data;
1271
1272                 if (copied >= num)
1273                         break;
1274
1275                 bacpy(&info->bdaddr, &data->bdaddr);
1276                 info->pscan_rep_mode    = data->pscan_rep_mode;
1277                 info->pscan_period_mode = data->pscan_period_mode;
1278                 info->pscan_mode        = data->pscan_mode;
1279                 memcpy(info->dev_class, data->dev_class, 3);
1280                 info->clock_offset      = data->clock_offset;
1281
1282                 info++;
1283                 copied++;
1284         }
1285
1286         BT_DBG("cache %p, copied %d", cache, copied);
1287         return copied;
1288 }
1289
1290 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1291 {
1292         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293         struct hci_dev *hdev = req->hdev;
1294         struct hci_cp_inquiry cp;
1295
1296         BT_DBG("%s", hdev->name);
1297
1298         if (test_bit(HCI_INQUIRY, &hdev->flags))
1299                 return 0;
1300
1301         /* Start Inquiry */
1302         memcpy(&cp.lap, &ir->lap, 3);
1303         cp.length  = ir->length;
1304         cp.num_rsp = ir->num_rsp;
1305         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306
1307         return 0;
1308 }
1309
1310 int hci_inquiry(void __user *arg)
1311 {
1312         __u8 __user *ptr = arg;
1313         struct hci_inquiry_req ir;
1314         struct hci_dev *hdev;
1315         int err = 0, do_inquiry = 0, max_rsp;
1316         long timeo;
1317         __u8 *buf;
1318
1319         if (copy_from_user(&ir, ptr, sizeof(ir)))
1320                 return -EFAULT;
1321
1322         hdev = hci_dev_get(ir.dev_id);
1323         if (!hdev)
1324                 return -ENODEV;
1325
1326         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1327                 err = -EBUSY;
1328                 goto done;
1329         }
1330
1331         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1332                 err = -EOPNOTSUPP;
1333                 goto done;
1334         }
1335
1336         if (hdev->dev_type != HCI_PRIMARY) {
1337                 err = -EOPNOTSUPP;
1338                 goto done;
1339         }
1340
1341         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1342                 err = -EOPNOTSUPP;
1343                 goto done;
1344         }
1345
1346         /* Restrict maximum inquiry length to 60 seconds */
1347         if (ir.length > 60) {
1348                 err = -EINVAL;
1349                 goto done;
1350         }
1351
1352         hci_dev_lock(hdev);
1353         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1354             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1355                 hci_inquiry_cache_flush(hdev);
1356                 do_inquiry = 1;
1357         }
1358         hci_dev_unlock(hdev);
1359
1360         timeo = ir.length * msecs_to_jiffies(2000);
1361
1362         if (do_inquiry) {
1363                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1364                                    timeo, NULL);
1365                 if (err < 0)
1366                         goto done;
1367
1368                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1369                  * cleared). If it is interrupted by a signal, return -EINTR.
1370                  */
1371                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1372                                 TASK_INTERRUPTIBLE)) {
1373                         err = -EINTR;
1374                         goto done;
1375                 }
1376         }
1377
1378         /* for unlimited number of responses we will use buffer with
1379          * 255 entries
1380          */
1381         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1382
1383         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1384          * copy it to the user space.
1385          */
1386         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1387         if (!buf) {
1388                 err = -ENOMEM;
1389                 goto done;
1390         }
1391
1392         hci_dev_lock(hdev);
1393         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1394         hci_dev_unlock(hdev);
1395
1396         BT_DBG("num_rsp %d", ir.num_rsp);
1397
1398         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1399                 ptr += sizeof(ir);
1400                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1401                                  ir.num_rsp))
1402                         err = -EFAULT;
1403         } else
1404                 err = -EFAULT;
1405
1406         kfree(buf);
1407
1408 done:
1409         hci_dev_put(hdev);
1410         return err;
1411 }
1412
1413 /**
1414  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1415  *                                     (BD_ADDR) for a HCI device from
1416  *                                     a firmware node property.
1417  * @hdev:       The HCI device
1418  *
1419  * Search the firmware node for 'local-bd-address'.
1420  *
1421  * All-zero BD addresses are rejected, because those could be properties
1422  * that exist in the firmware tables, but were not updated by the firmware. For
1423  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1424  */
1425 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1426 {
1427         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1428         bdaddr_t ba;
1429         int ret;
1430
1431         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1432                                             (u8 *)&ba, sizeof(ba));
1433         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1434                 return;
1435
1436         bacpy(&hdev->public_addr, &ba);
1437 }
1438
1439 static int hci_dev_do_open(struct hci_dev *hdev)
1440 {
1441         int ret = 0;
1442
1443         BT_DBG("%s %p", hdev->name, hdev);
1444
1445         hci_req_sync_lock(hdev);
1446
1447         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1448                 ret = -ENODEV;
1449                 goto done;
1450         }
1451
1452         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1453             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1454                 /* Check for rfkill but allow the HCI setup stage to
1455                  * proceed (which in itself doesn't cause any RF activity).
1456                  */
1457                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1458                         ret = -ERFKILL;
1459                         goto done;
1460                 }
1461
1462                 /* Check for valid public address or a configured static
1463                  * random address, but let the HCI setup proceed to
1464                  * be able to determine if there is a public address
1465                  * or not.
1466                  *
1467                  * In case of user channel usage, it is not important
1468                  * if a public address or static random address is
1469                  * available.
1470                  *
1471                  * This check is only valid for BR/EDR controllers
1472                  * since AMP controllers do not have an address.
1473                  */
1474                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1475                     hdev->dev_type == HCI_PRIMARY &&
1476                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1477                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1478                         ret = -EADDRNOTAVAIL;
1479                         goto done;
1480                 }
1481         }
1482
1483         if (test_bit(HCI_UP, &hdev->flags)) {
1484                 ret = -EALREADY;
1485                 goto done;
1486         }
1487
1488         if (hdev->open(hdev)) {
1489                 ret = -EIO;
1490                 goto done;
1491         }
1492
1493         set_bit(HCI_RUNNING, &hdev->flags);
1494         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1495
1496         atomic_set(&hdev->cmd_cnt, 1);
1497         set_bit(HCI_INIT, &hdev->flags);
1498
1499         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1500             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1501                 bool invalid_bdaddr;
1502
1503                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1504
1505                 if (hdev->setup)
1506                         ret = hdev->setup(hdev);
1507
1508                 /* The transport driver can set the quirk to mark the
1509                  * BD_ADDR invalid before creating the HCI device or in
1510                  * its setup callback.
1511                  */
1512                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1513                                           &hdev->quirks);
1514
1515                 if (ret)
1516                         goto setup_failed;
1517
1518                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1519                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1520                                 hci_dev_get_bd_addr_from_property(hdev);
1521
1522                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1523                             hdev->set_bdaddr) {
1524                                 ret = hdev->set_bdaddr(hdev,
1525                                                        &hdev->public_addr);
1526
1527                                 /* If setting of the BD_ADDR from the device
1528                                  * property succeeds, then treat the address
1529                                  * as valid even if the invalid BD_ADDR
1530                                  * quirk indicates otherwise.
1531                                  */
1532                                 if (!ret)
1533                                         invalid_bdaddr = false;
1534                         }
1535                 }
1536
1537 setup_failed:
1538                 /* The transport driver can set these quirks before
1539                  * creating the HCI device or in its setup callback.
1540                  *
1541                  * For the invalid BD_ADDR quirk it is possible that
1542                  * it becomes a valid address if the bootloader does
1543                  * provide it (see above).
1544                  *
1545                  * In case any of them is set, the controller has to
1546                  * start up as unconfigured.
1547                  */
1548                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1549                     invalid_bdaddr)
1550                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1551
1552                 /* For an unconfigured controller it is required to
1553                  * read at least the version information provided by
1554                  * the Read Local Version Information command.
1555                  *
1556                  * If the set_bdaddr driver callback is provided, then
1557                  * also the original Bluetooth public device address
1558                  * will be read using the Read BD Address command.
1559                  */
1560                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1561                         ret = __hci_unconf_init(hdev);
1562         }
1563
1564         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1565                 /* If public address change is configured, ensure that
1566                  * the address gets programmed. If the driver does not
1567                  * support changing the public address, fail the power
1568                  * on procedure.
1569                  */
1570                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1571                     hdev->set_bdaddr)
1572                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1573                 else
1574                         ret = -EADDRNOTAVAIL;
1575         }
1576
1577         if (!ret) {
1578                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1579                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1580                         ret = __hci_init(hdev);
1581                         if (!ret && hdev->post_init)
1582                                 ret = hdev->post_init(hdev);
1583                 }
1584         }
1585
1586         /* If the HCI Reset command is clearing all diagnostic settings,
1587          * then they need to be reprogrammed after the init procedure
1588          * completed.
1589          */
1590         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1591             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1592             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1593                 ret = hdev->set_diag(hdev, true);
1594
1595         msft_do_open(hdev);
1596         aosp_do_open(hdev);
1597
1598         clear_bit(HCI_INIT, &hdev->flags);
1599
1600         if (!ret) {
1601                 hci_dev_hold(hdev);
1602                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1603                 hci_adv_instances_set_rpa_expired(hdev, true);
1604                 set_bit(HCI_UP, &hdev->flags);
1605                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1606                 hci_leds_update_powered(hdev, true);
1607                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1608                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1609                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1610                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1611                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1612                     hdev->dev_type == HCI_PRIMARY) {
1613                         ret = __hci_req_hci_power_on(hdev);
1614                         mgmt_power_on(hdev, ret);
1615                 }
1616         } else {
1617                 /* Init failed, cleanup */
1618                 flush_work(&hdev->tx_work);
1619
1620                 /* Since hci_rx_work() is possible to awake new cmd_work
1621                  * it should be flushed first to avoid unexpected call of
1622                  * hci_cmd_work()
1623                  */
1624                 flush_work(&hdev->rx_work);
1625                 flush_work(&hdev->cmd_work);
1626
1627                 skb_queue_purge(&hdev->cmd_q);
1628                 skb_queue_purge(&hdev->rx_q);
1629
1630                 if (hdev->flush)
1631                         hdev->flush(hdev);
1632
1633                 if (hdev->sent_cmd) {
1634                         kfree_skb(hdev->sent_cmd);
1635                         hdev->sent_cmd = NULL;
1636                 }
1637
1638                 clear_bit(HCI_RUNNING, &hdev->flags);
1639                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1640
1641                 hdev->close(hdev);
1642                 hdev->flags &= BIT(HCI_RAW);
1643         }
1644
1645 done:
1646         hci_req_sync_unlock(hdev);
1647         return ret;
1648 }
1649
1650 /* ---- HCI ioctl helpers ---- */
1651
1652 int hci_dev_open(__u16 dev)
1653 {
1654         struct hci_dev *hdev;
1655         int err;
1656
1657         hdev = hci_dev_get(dev);
1658         if (!hdev)
1659                 return -ENODEV;
1660
1661         /* Devices that are marked as unconfigured can only be powered
1662          * up as user channel. Trying to bring them up as normal devices
1663          * will result into a failure. Only user channel operation is
1664          * possible.
1665          *
1666          * When this function is called for a user channel, the flag
1667          * HCI_USER_CHANNEL will be set first before attempting to
1668          * open the device.
1669          */
1670         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1671             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1672                 err = -EOPNOTSUPP;
1673                 goto done;
1674         }
1675
1676         /* We need to ensure that no other power on/off work is pending
1677          * before proceeding to call hci_dev_do_open. This is
1678          * particularly important if the setup procedure has not yet
1679          * completed.
1680          */
1681         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1682                 cancel_delayed_work(&hdev->power_off);
1683
1684         /* After this call it is guaranteed that the setup procedure
1685          * has finished. This means that error conditions like RFKILL
1686          * or no valid public or static random address apply.
1687          */
1688         flush_workqueue(hdev->req_workqueue);
1689
1690         /* For controllers not using the management interface and that
1691          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1692          * so that pairing works for them. Once the management interface
1693          * is in use this bit will be cleared again and userspace has
1694          * to explicitly enable it.
1695          */
1696         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1697             !hci_dev_test_flag(hdev, HCI_MGMT))
1698                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1699
1700         err = hci_dev_do_open(hdev);
1701
1702 done:
1703         hci_dev_put(hdev);
1704         return err;
1705 }
1706
1707 /* This function requires the caller holds hdev->lock */
1708 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1709 {
1710         struct hci_conn_params *p;
1711
1712         list_for_each_entry(p, &hdev->le_conn_params, list) {
1713                 if (p->conn) {
1714                         hci_conn_drop(p->conn);
1715                         hci_conn_put(p->conn);
1716                         p->conn = NULL;
1717                 }
1718                 list_del_init(&p->action);
1719         }
1720
1721         BT_DBG("All LE pending actions cleared");
1722 }
1723
1724 int hci_dev_do_close(struct hci_dev *hdev)
1725 {
1726         bool auto_off;
1727         int err = 0;
1728
1729         BT_DBG("%s %p", hdev->name, hdev);
1730
1731         cancel_delayed_work(&hdev->power_off);
1732         cancel_delayed_work(&hdev->ncmd_timer);
1733
1734         hci_request_cancel_all(hdev);
1735         hci_req_sync_lock(hdev);
1736
1737         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1738             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1739             test_bit(HCI_UP, &hdev->flags)) {
1740                 /* Execute vendor specific shutdown routine */
1741                 if (hdev->shutdown)
1742                         err = hdev->shutdown(hdev);
1743         }
1744
1745         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1746                 cancel_delayed_work_sync(&hdev->cmd_timer);
1747                 hci_req_sync_unlock(hdev);
1748                 return err;
1749         }
1750
1751         hci_leds_update_powered(hdev, false);
1752
1753         /* Flush RX and TX works */
1754         flush_work(&hdev->tx_work);
1755         flush_work(&hdev->rx_work);
1756
1757         if (hdev->discov_timeout > 0) {
1758                 hdev->discov_timeout = 0;
1759                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1760                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1761         }
1762
1763         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1764                 cancel_delayed_work(&hdev->service_cache);
1765
1766         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1767                 struct adv_info *adv_instance;
1768
1769                 cancel_delayed_work_sync(&hdev->rpa_expired);
1770
1771                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1772                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1773         }
1774
1775         /* Avoid potential lockdep warnings from the *_flush() calls by
1776          * ensuring the workqueue is empty up front.
1777          */
1778         drain_workqueue(hdev->workqueue);
1779
1780         hci_dev_lock(hdev);
1781
1782         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1783
1784         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1785
1786         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1787             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1788             hci_dev_test_flag(hdev, HCI_MGMT))
1789                 __mgmt_power_off(hdev);
1790
1791         hci_inquiry_cache_flush(hdev);
1792         hci_pend_le_actions_clear(hdev);
1793         hci_conn_hash_flush(hdev);
1794         hci_dev_unlock(hdev);
1795
1796         smp_unregister(hdev);
1797
1798         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1799
1800         aosp_do_close(hdev);
1801         msft_do_close(hdev);
1802
1803         if (hdev->flush)
1804                 hdev->flush(hdev);
1805
1806         /* Reset device */
1807         skb_queue_purge(&hdev->cmd_q);
1808         atomic_set(&hdev->cmd_cnt, 1);
1809         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1810             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1811                 set_bit(HCI_INIT, &hdev->flags);
1812                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1813                 clear_bit(HCI_INIT, &hdev->flags);
1814         }
1815
1816         /* flush cmd  work */
1817         flush_work(&hdev->cmd_work);
1818
1819         /* Drop queues */
1820         skb_queue_purge(&hdev->rx_q);
1821         skb_queue_purge(&hdev->cmd_q);
1822         skb_queue_purge(&hdev->raw_q);
1823
1824         /* Drop last sent command */
1825         if (hdev->sent_cmd) {
1826                 cancel_delayed_work_sync(&hdev->cmd_timer);
1827                 kfree_skb(hdev->sent_cmd);
1828                 hdev->sent_cmd = NULL;
1829         }
1830
1831         clear_bit(HCI_RUNNING, &hdev->flags);
1832         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1833
1834         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1835                 wake_up(&hdev->suspend_wait_q);
1836
1837         /* After this point our queues are empty
1838          * and no tasks are scheduled. */
1839         hdev->close(hdev);
1840
1841         /* Clear flags */
1842         hdev->flags &= BIT(HCI_RAW);
1843         hci_dev_clear_volatile_flags(hdev);
1844
1845         /* Controller radio is available but is currently powered down */
1846         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1847
1848         memset(hdev->eir, 0, sizeof(hdev->eir));
1849         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1850         bacpy(&hdev->random_addr, BDADDR_ANY);
1851
1852         hci_req_sync_unlock(hdev);
1853
1854         hci_dev_put(hdev);
1855         return err;
1856 }
1857
1858 int hci_dev_close(__u16 dev)
1859 {
1860         struct hci_dev *hdev;
1861         int err;
1862
1863         hdev = hci_dev_get(dev);
1864         if (!hdev)
1865                 return -ENODEV;
1866
1867         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1868                 err = -EBUSY;
1869                 goto done;
1870         }
1871
1872         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1873                 cancel_delayed_work(&hdev->power_off);
1874
1875         err = hci_dev_do_close(hdev);
1876
1877 done:
1878         hci_dev_put(hdev);
1879         return err;
1880 }
1881
1882 static int hci_dev_do_reset(struct hci_dev *hdev)
1883 {
1884         int ret;
1885
1886         BT_DBG("%s %p", hdev->name, hdev);
1887
1888         hci_req_sync_lock(hdev);
1889
1890         /* Drop queues */
1891         skb_queue_purge(&hdev->rx_q);
1892         skb_queue_purge(&hdev->cmd_q);
1893
1894         /* Avoid potential lockdep warnings from the *_flush() calls by
1895          * ensuring the workqueue is empty up front.
1896          */
1897         drain_workqueue(hdev->workqueue);
1898
1899         hci_dev_lock(hdev);
1900         hci_inquiry_cache_flush(hdev);
1901         hci_conn_hash_flush(hdev);
1902         hci_dev_unlock(hdev);
1903
1904         if (hdev->flush)
1905                 hdev->flush(hdev);
1906
1907         atomic_set(&hdev->cmd_cnt, 1);
1908         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1909
1910         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1911
1912         hci_req_sync_unlock(hdev);
1913         return ret;
1914 }
1915
1916 int hci_dev_reset(__u16 dev)
1917 {
1918         struct hci_dev *hdev;
1919         int err;
1920
1921         hdev = hci_dev_get(dev);
1922         if (!hdev)
1923                 return -ENODEV;
1924
1925         if (!test_bit(HCI_UP, &hdev->flags)) {
1926                 err = -ENETDOWN;
1927                 goto done;
1928         }
1929
1930         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1931                 err = -EBUSY;
1932                 goto done;
1933         }
1934
1935         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1936                 err = -EOPNOTSUPP;
1937                 goto done;
1938         }
1939
1940         err = hci_dev_do_reset(hdev);
1941
1942 done:
1943         hci_dev_put(hdev);
1944         return err;
1945 }
1946
1947 int hci_dev_reset_stat(__u16 dev)
1948 {
1949         struct hci_dev *hdev;
1950         int ret = 0;
1951
1952         hdev = hci_dev_get(dev);
1953         if (!hdev)
1954                 return -ENODEV;
1955
1956         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1957                 ret = -EBUSY;
1958                 goto done;
1959         }
1960
1961         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1962                 ret = -EOPNOTSUPP;
1963                 goto done;
1964         }
1965
1966         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1967
1968 done:
1969         hci_dev_put(hdev);
1970         return ret;
1971 }
1972
1973 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1974 {
1975         bool conn_changed, discov_changed;
1976
1977         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1978
1979         if ((scan & SCAN_PAGE))
1980                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1981                                                           HCI_CONNECTABLE);
1982         else
1983                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1984                                                            HCI_CONNECTABLE);
1985
1986         if ((scan & SCAN_INQUIRY)) {
1987                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1988                                                             HCI_DISCOVERABLE);
1989         } else {
1990                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1991                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1992                                                              HCI_DISCOVERABLE);
1993         }
1994
1995         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1996                 return;
1997
1998         if (conn_changed || discov_changed) {
1999                 /* In case this was disabled through mgmt */
2000                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2001
2002                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2003                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2004
2005                 mgmt_new_settings(hdev);
2006         }
2007 }
2008
2009 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2010 {
2011         struct hci_dev *hdev;
2012         struct hci_dev_req dr;
2013         int err = 0;
2014
2015         if (copy_from_user(&dr, arg, sizeof(dr)))
2016                 return -EFAULT;
2017
2018         hdev = hci_dev_get(dr.dev_id);
2019         if (!hdev)
2020                 return -ENODEV;
2021
2022         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2023                 err = -EBUSY;
2024                 goto done;
2025         }
2026
2027         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2028                 err = -EOPNOTSUPP;
2029                 goto done;
2030         }
2031
2032         if (hdev->dev_type != HCI_PRIMARY) {
2033                 err = -EOPNOTSUPP;
2034                 goto done;
2035         }
2036
2037         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2038                 err = -EOPNOTSUPP;
2039                 goto done;
2040         }
2041
2042         switch (cmd) {
2043         case HCISETAUTH:
2044                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2045                                    HCI_INIT_TIMEOUT, NULL);
2046                 break;
2047
2048         case HCISETENCRYPT:
2049                 if (!lmp_encrypt_capable(hdev)) {
2050                         err = -EOPNOTSUPP;
2051                         break;
2052                 }
2053
2054                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2055                         /* Auth must be enabled first */
2056                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2057                                            HCI_INIT_TIMEOUT, NULL);
2058                         if (err)
2059                                 break;
2060                 }
2061
2062                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2063                                    HCI_INIT_TIMEOUT, NULL);
2064                 break;
2065
2066         case HCISETSCAN:
2067                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2068                                    HCI_INIT_TIMEOUT, NULL);
2069
2070                 /* Ensure that the connectable and discoverable states
2071                  * get correctly modified as this was a non-mgmt change.
2072                  */
2073                 if (!err)
2074                         hci_update_scan_state(hdev, dr.dev_opt);
2075                 break;
2076
2077         case HCISETLINKPOL:
2078                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2079                                    HCI_INIT_TIMEOUT, NULL);
2080                 break;
2081
2082         case HCISETLINKMODE:
2083                 hdev->link_mode = ((__u16) dr.dev_opt) &
2084                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2085                 break;
2086
2087         case HCISETPTYPE:
2088                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2089                         break;
2090
2091                 hdev->pkt_type = (__u16) dr.dev_opt;
2092                 mgmt_phy_configuration_changed(hdev, NULL);
2093                 break;
2094
2095         case HCISETACLMTU:
2096                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2097                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2098                 break;
2099
2100         case HCISETSCOMTU:
2101                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2102                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2103                 break;
2104
2105         default:
2106                 err = -EINVAL;
2107                 break;
2108         }
2109
2110 done:
2111         hci_dev_put(hdev);
2112         return err;
2113 }
2114
2115 int hci_get_dev_list(void __user *arg)
2116 {
2117         struct hci_dev *hdev;
2118         struct hci_dev_list_req *dl;
2119         struct hci_dev_req *dr;
2120         int n = 0, size, err;
2121         __u16 dev_num;
2122
2123         if (get_user(dev_num, (__u16 __user *) arg))
2124                 return -EFAULT;
2125
2126         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2127                 return -EINVAL;
2128
2129         size = sizeof(*dl) + dev_num * sizeof(*dr);
2130
2131         dl = kzalloc(size, GFP_KERNEL);
2132         if (!dl)
2133                 return -ENOMEM;
2134
2135         dr = dl->dev_req;
2136
2137         read_lock(&hci_dev_list_lock);
2138         list_for_each_entry(hdev, &hci_dev_list, list) {
2139                 unsigned long flags = hdev->flags;
2140
2141                 /* When the auto-off is configured it means the transport
2142                  * is running, but in that case still indicate that the
2143                  * device is actually down.
2144                  */
2145                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2146                         flags &= ~BIT(HCI_UP);
2147
2148                 (dr + n)->dev_id  = hdev->id;
2149                 (dr + n)->dev_opt = flags;
2150
2151                 if (++n >= dev_num)
2152                         break;
2153         }
2154         read_unlock(&hci_dev_list_lock);
2155
2156         dl->dev_num = n;
2157         size = sizeof(*dl) + n * sizeof(*dr);
2158
2159         err = copy_to_user(arg, dl, size);
2160         kfree(dl);
2161
2162         return err ? -EFAULT : 0;
2163 }
2164
2165 int hci_get_dev_info(void __user *arg)
2166 {
2167         struct hci_dev *hdev;
2168         struct hci_dev_info di;
2169         unsigned long flags;
2170         int err = 0;
2171
2172         if (copy_from_user(&di, arg, sizeof(di)))
2173                 return -EFAULT;
2174
2175         hdev = hci_dev_get(di.dev_id);
2176         if (!hdev)
2177                 return -ENODEV;
2178
2179         /* When the auto-off is configured it means the transport
2180          * is running, but in that case still indicate that the
2181          * device is actually down.
2182          */
2183         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2184                 flags = hdev->flags & ~BIT(HCI_UP);
2185         else
2186                 flags = hdev->flags;
2187
2188         strcpy(di.name, hdev->name);
2189         di.bdaddr   = hdev->bdaddr;
2190         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2191         di.flags    = flags;
2192         di.pkt_type = hdev->pkt_type;
2193         if (lmp_bredr_capable(hdev)) {
2194                 di.acl_mtu  = hdev->acl_mtu;
2195                 di.acl_pkts = hdev->acl_pkts;
2196                 di.sco_mtu  = hdev->sco_mtu;
2197                 di.sco_pkts = hdev->sco_pkts;
2198         } else {
2199                 di.acl_mtu  = hdev->le_mtu;
2200                 di.acl_pkts = hdev->le_pkts;
2201                 di.sco_mtu  = 0;
2202                 di.sco_pkts = 0;
2203         }
2204         di.link_policy = hdev->link_policy;
2205         di.link_mode   = hdev->link_mode;
2206
2207         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2208         memcpy(&di.features, &hdev->features, sizeof(di.features));
2209
2210         if (copy_to_user(arg, &di, sizeof(di)))
2211                 err = -EFAULT;
2212
2213         hci_dev_put(hdev);
2214
2215         return err;
2216 }
2217
2218 /* ---- Interface to HCI drivers ---- */
2219
2220 static int hci_rfkill_set_block(void *data, bool blocked)
2221 {
2222         struct hci_dev *hdev = data;
2223
2224         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2225
2226         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2227                 return -EBUSY;
2228
2229         if (blocked) {
2230                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2231                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2232                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2233                         hci_dev_do_close(hdev);
2234         } else {
2235                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2236         }
2237
2238         return 0;
2239 }
2240
2241 static const struct rfkill_ops hci_rfkill_ops = {
2242         .set_block = hci_rfkill_set_block,
2243 };
2244
2245 static void hci_power_on(struct work_struct *work)
2246 {
2247         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2248         int err;
2249
2250         BT_DBG("%s", hdev->name);
2251
2252         if (test_bit(HCI_UP, &hdev->flags) &&
2253             hci_dev_test_flag(hdev, HCI_MGMT) &&
2254             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2255                 cancel_delayed_work(&hdev->power_off);
2256                 hci_req_sync_lock(hdev);
2257                 err = __hci_req_hci_power_on(hdev);
2258                 hci_req_sync_unlock(hdev);
2259                 mgmt_power_on(hdev, err);
2260                 return;
2261         }
2262
2263         err = hci_dev_do_open(hdev);
2264         if (err < 0) {
2265                 hci_dev_lock(hdev);
2266                 mgmt_set_powered_failed(hdev, err);
2267                 hci_dev_unlock(hdev);
2268                 return;
2269         }
2270
2271         /* During the HCI setup phase, a few error conditions are
2272          * ignored and they need to be checked now. If they are still
2273          * valid, it is important to turn the device back off.
2274          */
2275         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2276             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2277             (hdev->dev_type == HCI_PRIMARY &&
2278              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2279              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2280                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2281                 hci_dev_do_close(hdev);
2282         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2283                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2284                                    HCI_AUTO_OFF_TIMEOUT);
2285         }
2286
2287         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2288                 /* For unconfigured devices, set the HCI_RAW flag
2289                  * so that userspace can easily identify them.
2290                  */
2291                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2292                         set_bit(HCI_RAW, &hdev->flags);
2293
2294                 /* For fully configured devices, this will send
2295                  * the Index Added event. For unconfigured devices,
2296                  * it will send Unconfigued Index Added event.
2297                  *
2298                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2299                  * and no event will be send.
2300                  */
2301                 mgmt_index_added(hdev);
2302         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2303                 /* When the controller is now configured, then it
2304                  * is important to clear the HCI_RAW flag.
2305                  */
2306                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2307                         clear_bit(HCI_RAW, &hdev->flags);
2308
2309                 /* Powering on the controller with HCI_CONFIG set only
2310                  * happens with the transition from unconfigured to
2311                  * configured. This will send the Index Added event.
2312                  */
2313                 mgmt_index_added(hdev);
2314         }
2315 }
2316
2317 static void hci_power_off(struct work_struct *work)
2318 {
2319         struct hci_dev *hdev = container_of(work, struct hci_dev,
2320                                             power_off.work);
2321
2322         BT_DBG("%s", hdev->name);
2323
2324         hci_dev_do_close(hdev);
2325 }
2326
2327 static void hci_error_reset(struct work_struct *work)
2328 {
2329         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2330
2331         BT_DBG("%s", hdev->name);
2332
2333         if (hdev->hw_error)
2334                 hdev->hw_error(hdev, hdev->hw_error_code);
2335         else
2336                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2337
2338         if (hci_dev_do_close(hdev))
2339                 return;
2340
2341         hci_dev_do_open(hdev);
2342 }
2343
2344 void hci_uuids_clear(struct hci_dev *hdev)
2345 {
2346         struct bt_uuid *uuid, *tmp;
2347
2348         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2349                 list_del(&uuid->list);
2350                 kfree(uuid);
2351         }
2352 }
2353
2354 void hci_link_keys_clear(struct hci_dev *hdev)
2355 {
2356         struct link_key *key;
2357
2358         list_for_each_entry(key, &hdev->link_keys, list) {
2359                 list_del_rcu(&key->list);
2360                 kfree_rcu(key, rcu);
2361         }
2362 }
2363
2364 void hci_smp_ltks_clear(struct hci_dev *hdev)
2365 {
2366         struct smp_ltk *k;
2367
2368         list_for_each_entry(k, &hdev->long_term_keys, list) {
2369                 list_del_rcu(&k->list);
2370                 kfree_rcu(k, rcu);
2371         }
2372 }
2373
2374 void hci_smp_irks_clear(struct hci_dev *hdev)
2375 {
2376         struct smp_irk *k;
2377
2378         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2379                 list_del_rcu(&k->list);
2380                 kfree_rcu(k, rcu);
2381         }
2382 }
2383
2384 void hci_blocked_keys_clear(struct hci_dev *hdev)
2385 {
2386         struct blocked_key *b;
2387
2388         list_for_each_entry(b, &hdev->blocked_keys, list) {
2389                 list_del_rcu(&b->list);
2390                 kfree_rcu(b, rcu);
2391         }
2392 }
2393
2394 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2395 {
2396         bool blocked = false;
2397         struct blocked_key *b;
2398
2399         rcu_read_lock();
2400         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2401                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2402                         blocked = true;
2403                         break;
2404                 }
2405         }
2406
2407         rcu_read_unlock();
2408         return blocked;
2409 }
2410
2411 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2412 {
2413         struct link_key *k;
2414
2415         rcu_read_lock();
2416         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2417                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2418                         rcu_read_unlock();
2419
2420                         if (hci_is_blocked_key(hdev,
2421                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2422                                                k->val)) {
2423                                 bt_dev_warn_ratelimited(hdev,
2424                                                         "Link key blocked for %pMR",
2425                                                         &k->bdaddr);
2426                                 return NULL;
2427                         }
2428
2429                         return k;
2430                 }
2431         }
2432         rcu_read_unlock();
2433
2434         return NULL;
2435 }
2436
2437 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2438                                u8 key_type, u8 old_key_type)
2439 {
2440         /* Legacy key */
2441         if (key_type < 0x03)
2442                 return true;
2443
2444         /* Debug keys are insecure so don't store them persistently */
2445         if (key_type == HCI_LK_DEBUG_COMBINATION)
2446                 return false;
2447
2448         /* Changed combination key and there's no previous one */
2449         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2450                 return false;
2451
2452         /* Security mode 3 case */
2453         if (!conn)
2454                 return true;
2455
2456         /* BR/EDR key derived using SC from an LE link */
2457         if (conn->type == LE_LINK)
2458                 return true;
2459
2460         /* Neither local nor remote side had no-bonding as requirement */
2461         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2462                 return true;
2463
2464         /* Local side had dedicated bonding as requirement */
2465         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2466                 return true;
2467
2468         /* Remote side had dedicated bonding as requirement */
2469         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2470                 return true;
2471
2472         /* If none of the above criteria match, then don't store the key
2473          * persistently */
2474         return false;
2475 }
2476
2477 static u8 ltk_role(u8 type)
2478 {
2479         if (type == SMP_LTK)
2480                 return HCI_ROLE_MASTER;
2481
2482         return HCI_ROLE_SLAVE;
2483 }
2484
2485 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2486                              u8 addr_type, u8 role)
2487 {
2488         struct smp_ltk *k;
2489
2490         rcu_read_lock();
2491         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2492                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2493                         continue;
2494
2495                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2496                         rcu_read_unlock();
2497
2498                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2499                                                k->val)) {
2500                                 bt_dev_warn_ratelimited(hdev,
2501                                                         "LTK blocked for %pMR",
2502                                                         &k->bdaddr);
2503                                 return NULL;
2504                         }
2505
2506                         return k;
2507                 }
2508         }
2509         rcu_read_unlock();
2510
2511         return NULL;
2512 }
2513
2514 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2515 {
2516         struct smp_irk *irk_to_return = NULL;
2517         struct smp_irk *irk;
2518
2519         rcu_read_lock();
2520         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2521                 if (!bacmp(&irk->rpa, rpa)) {
2522                         irk_to_return = irk;
2523                         goto done;
2524                 }
2525         }
2526
2527         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2528                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2529                         bacpy(&irk->rpa, rpa);
2530                         irk_to_return = irk;
2531                         goto done;
2532                 }
2533         }
2534
2535 done:
2536         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2537                                                 irk_to_return->val)) {
2538                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2539                                         &irk_to_return->bdaddr);
2540                 irk_to_return = NULL;
2541         }
2542
2543         rcu_read_unlock();
2544
2545         return irk_to_return;
2546 }
2547
2548 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2549                                      u8 addr_type)
2550 {
2551         struct smp_irk *irk_to_return = NULL;
2552         struct smp_irk *irk;
2553
2554         /* Identity Address must be public or static random */
2555         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2556                 return NULL;
2557
2558         rcu_read_lock();
2559         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2560                 if (addr_type == irk->addr_type &&
2561                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2562                         irk_to_return = irk;
2563                         goto done;
2564                 }
2565         }
2566
2567 done:
2568
2569         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2570                                                 irk_to_return->val)) {
2571                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2572                                         &irk_to_return->bdaddr);
2573                 irk_to_return = NULL;
2574         }
2575
2576         rcu_read_unlock();
2577
2578         return irk_to_return;
2579 }
2580
2581 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2582                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2583                                   u8 pin_len, bool *persistent)
2584 {
2585         struct link_key *key, *old_key;
2586         u8 old_key_type;
2587
2588         old_key = hci_find_link_key(hdev, bdaddr);
2589         if (old_key) {
2590                 old_key_type = old_key->type;
2591                 key = old_key;
2592         } else {
2593                 old_key_type = conn ? conn->key_type : 0xff;
2594                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2595                 if (!key)
2596                         return NULL;
2597                 list_add_rcu(&key->list, &hdev->link_keys);
2598         }
2599
2600         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2601
2602         /* Some buggy controller combinations generate a changed
2603          * combination key for legacy pairing even when there's no
2604          * previous key */
2605         if (type == HCI_LK_CHANGED_COMBINATION &&
2606             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2607                 type = HCI_LK_COMBINATION;
2608                 if (conn)
2609                         conn->key_type = type;
2610         }
2611
2612         bacpy(&key->bdaddr, bdaddr);
2613         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2614         key->pin_len = pin_len;
2615
2616         if (type == HCI_LK_CHANGED_COMBINATION)
2617                 key->type = old_key_type;
2618         else
2619                 key->type = type;
2620
2621         if (persistent)
2622                 *persistent = hci_persistent_key(hdev, conn, type,
2623                                                  old_key_type);
2624
2625         return key;
2626 }
2627
2628 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2629                             u8 addr_type, u8 type, u8 authenticated,
2630                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2631 {
2632         struct smp_ltk *key, *old_key;
2633         u8 role = ltk_role(type);
2634
2635         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2636         if (old_key)
2637                 key = old_key;
2638         else {
2639                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2640                 if (!key)
2641                         return NULL;
2642                 list_add_rcu(&key->list, &hdev->long_term_keys);
2643         }
2644
2645         bacpy(&key->bdaddr, bdaddr);
2646         key->bdaddr_type = addr_type;
2647         memcpy(key->val, tk, sizeof(key->val));
2648         key->authenticated = authenticated;
2649         key->ediv = ediv;
2650         key->rand = rand;
2651         key->enc_size = enc_size;
2652         key->type = type;
2653
2654         return key;
2655 }
2656
2657 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2658                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2659 {
2660         struct smp_irk *irk;
2661
2662         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2663         if (!irk) {
2664                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2665                 if (!irk)
2666                         return NULL;
2667
2668                 bacpy(&irk->bdaddr, bdaddr);
2669                 irk->addr_type = addr_type;
2670
2671                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2672         }
2673
2674         memcpy(irk->val, val, 16);
2675         bacpy(&irk->rpa, rpa);
2676
2677         return irk;
2678 }
2679
2680 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2681 {
2682         struct link_key *key;
2683
2684         key = hci_find_link_key(hdev, bdaddr);
2685         if (!key)
2686                 return -ENOENT;
2687
2688         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2689
2690         list_del_rcu(&key->list);
2691         kfree_rcu(key, rcu);
2692
2693         return 0;
2694 }
2695
2696 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2697 {
2698         struct smp_ltk *k;
2699         int removed = 0;
2700
2701         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2702                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2703                         continue;
2704
2705                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2706
2707                 list_del_rcu(&k->list);
2708                 kfree_rcu(k, rcu);
2709                 removed++;
2710         }
2711
2712         return removed ? 0 : -ENOENT;
2713 }
2714
2715 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2716 {
2717         struct smp_irk *k;
2718
2719         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2720                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2721                         continue;
2722
2723                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2724
2725                 list_del_rcu(&k->list);
2726                 kfree_rcu(k, rcu);
2727         }
2728 }
2729
2730 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2731 {
2732         struct smp_ltk *k;
2733         struct smp_irk *irk;
2734         u8 addr_type;
2735
2736         if (type == BDADDR_BREDR) {
2737                 if (hci_find_link_key(hdev, bdaddr))
2738                         return true;
2739                 return false;
2740         }
2741
2742         /* Convert to HCI addr type which struct smp_ltk uses */
2743         if (type == BDADDR_LE_PUBLIC)
2744                 addr_type = ADDR_LE_DEV_PUBLIC;
2745         else
2746                 addr_type = ADDR_LE_DEV_RANDOM;
2747
2748         irk = hci_get_irk(hdev, bdaddr, addr_type);
2749         if (irk) {
2750                 bdaddr = &irk->bdaddr;
2751                 addr_type = irk->addr_type;
2752         }
2753
2754         rcu_read_lock();
2755         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2756                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2757                         rcu_read_unlock();
2758                         return true;
2759                 }
2760         }
2761         rcu_read_unlock();
2762
2763         return false;
2764 }
2765
2766 /* HCI command timer function */
2767 static void hci_cmd_timeout(struct work_struct *work)
2768 {
2769         struct hci_dev *hdev = container_of(work, struct hci_dev,
2770                                             cmd_timer.work);
2771
2772         if (hdev->sent_cmd) {
2773                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2774                 u16 opcode = __le16_to_cpu(sent->opcode);
2775
2776                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2777         } else {
2778                 bt_dev_err(hdev, "command tx timeout");
2779         }
2780
2781         if (hdev->cmd_timeout)
2782                 hdev->cmd_timeout(hdev);
2783
2784         atomic_set(&hdev->cmd_cnt, 1);
2785         queue_work(hdev->workqueue, &hdev->cmd_work);
2786 }
2787
2788 /* HCI ncmd timer function */
2789 static void hci_ncmd_timeout(struct work_struct *work)
2790 {
2791         struct hci_dev *hdev = container_of(work, struct hci_dev,
2792                                             ncmd_timer.work);
2793
2794         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2795
2796         /* During HCI_INIT phase no events can be injected if the ncmd timer
2797          * triggers since the procedure has its own timeout handling.
2798          */
2799         if (test_bit(HCI_INIT, &hdev->flags))
2800                 return;
2801
2802         /* This is an irrecoverable state, inject hardware error event */
2803         hci_reset_dev(hdev);
2804 }
2805
2806 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2807                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2808 {
2809         struct oob_data *data;
2810
2811         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2812                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2813                         continue;
2814                 if (data->bdaddr_type != bdaddr_type)
2815                         continue;
2816                 return data;
2817         }
2818
2819         return NULL;
2820 }
2821
2822 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2823                                u8 bdaddr_type)
2824 {
2825         struct oob_data *data;
2826
2827         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2828         if (!data)
2829                 return -ENOENT;
2830
2831         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2832
2833         list_del(&data->list);
2834         kfree(data);
2835
2836         return 0;
2837 }
2838
2839 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2840 {
2841         struct oob_data *data, *n;
2842
2843         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2844                 list_del(&data->list);
2845                 kfree(data);
2846         }
2847 }
2848
2849 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2850                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2851                             u8 *hash256, u8 *rand256)
2852 {
2853         struct oob_data *data;
2854
2855         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2856         if (!data) {
2857                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2858                 if (!data)
2859                         return -ENOMEM;
2860
2861                 bacpy(&data->bdaddr, bdaddr);
2862                 data->bdaddr_type = bdaddr_type;
2863                 list_add(&data->list, &hdev->remote_oob_data);
2864         }
2865
2866         if (hash192 && rand192) {
2867                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2868                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2869                 if (hash256 && rand256)
2870                         data->present = 0x03;
2871         } else {
2872                 memset(data->hash192, 0, sizeof(data->hash192));
2873                 memset(data->rand192, 0, sizeof(data->rand192));
2874                 if (hash256 && rand256)
2875                         data->present = 0x02;
2876                 else
2877                         data->present = 0x00;
2878         }
2879
2880         if (hash256 && rand256) {
2881                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2882                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2883         } else {
2884                 memset(data->hash256, 0, sizeof(data->hash256));
2885                 memset(data->rand256, 0, sizeof(data->rand256));
2886                 if (hash192 && rand192)
2887                         data->present = 0x01;
2888         }
2889
2890         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2891
2892         return 0;
2893 }
2894
2895 /* This function requires the caller holds hdev->lock */
2896 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2897 {
2898         struct adv_info *adv_instance;
2899
2900         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2901                 if (adv_instance->instance == instance)
2902                         return adv_instance;
2903         }
2904
2905         return NULL;
2906 }
2907
2908 /* This function requires the caller holds hdev->lock */
2909 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2910 {
2911         struct adv_info *cur_instance;
2912
2913         cur_instance = hci_find_adv_instance(hdev, instance);
2914         if (!cur_instance)
2915                 return NULL;
2916
2917         if (cur_instance == list_last_entry(&hdev->adv_instances,
2918                                             struct adv_info, list))
2919                 return list_first_entry(&hdev->adv_instances,
2920                                                  struct adv_info, list);
2921         else
2922                 return list_next_entry(cur_instance, list);
2923 }
2924
2925 /* This function requires the caller holds hdev->lock */
2926 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2927 {
2928         struct adv_info *adv_instance;
2929
2930         adv_instance = hci_find_adv_instance(hdev, instance);
2931         if (!adv_instance)
2932                 return -ENOENT;
2933
2934         BT_DBG("%s removing %dMR", hdev->name, instance);
2935
2936         if (hdev->cur_adv_instance == instance) {
2937                 if (hdev->adv_instance_timeout) {
2938                         cancel_delayed_work(&hdev->adv_instance_expire);
2939                         hdev->adv_instance_timeout = 0;
2940                 }
2941                 hdev->cur_adv_instance = 0x00;
2942         }
2943
2944         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2945
2946         list_del(&adv_instance->list);
2947         kfree(adv_instance);
2948
2949         hdev->adv_instance_cnt--;
2950
2951         return 0;
2952 }
2953
2954 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2955 {
2956         struct adv_info *adv_instance, *n;
2957
2958         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2959                 adv_instance->rpa_expired = rpa_expired;
2960 }
2961
2962 /* This function requires the caller holds hdev->lock */
2963 void hci_adv_instances_clear(struct hci_dev *hdev)
2964 {
2965         struct adv_info *adv_instance, *n;
2966
2967         if (hdev->adv_instance_timeout) {
2968                 cancel_delayed_work(&hdev->adv_instance_expire);
2969                 hdev->adv_instance_timeout = 0;
2970         }
2971
2972         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2973                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2974                 list_del(&adv_instance->list);
2975                 kfree(adv_instance);
2976         }
2977
2978         hdev->adv_instance_cnt = 0;
2979         hdev->cur_adv_instance = 0x00;
2980 }
2981
2982 static void adv_instance_rpa_expired(struct work_struct *work)
2983 {
2984         struct adv_info *adv_instance = container_of(work, struct adv_info,
2985                                                      rpa_expired_cb.work);
2986
2987         BT_DBG("");
2988
2989         adv_instance->rpa_expired = true;
2990 }
2991
2992 /* This function requires the caller holds hdev->lock */
2993 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2994                          u16 adv_data_len, u8 *adv_data,
2995                          u16 scan_rsp_len, u8 *scan_rsp_data,
2996                          u16 timeout, u16 duration, s8 tx_power,
2997                          u32 min_interval, u32 max_interval)
2998 {
2999         struct adv_info *adv_instance;
3000
3001         adv_instance = hci_find_adv_instance(hdev, instance);
3002         if (adv_instance) {
3003                 memset(adv_instance->adv_data, 0,
3004                        sizeof(adv_instance->adv_data));
3005                 memset(adv_instance->scan_rsp_data, 0,
3006                        sizeof(adv_instance->scan_rsp_data));
3007         } else {
3008                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3009                     instance < 1 || instance > hdev->le_num_of_adv_sets)
3010                         return -EOVERFLOW;
3011
3012                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3013                 if (!adv_instance)
3014                         return -ENOMEM;
3015
3016                 adv_instance->pending = true;
3017                 adv_instance->instance = instance;
3018                 list_add(&adv_instance->list, &hdev->adv_instances);
3019                 hdev->adv_instance_cnt++;
3020         }
3021
3022         adv_instance->flags = flags;
3023         adv_instance->adv_data_len = adv_data_len;
3024         adv_instance->scan_rsp_len = scan_rsp_len;
3025         adv_instance->min_interval = min_interval;
3026         adv_instance->max_interval = max_interval;
3027         adv_instance->tx_power = tx_power;
3028
3029         if (adv_data_len)
3030                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3031
3032         if (scan_rsp_len)
3033                 memcpy(adv_instance->scan_rsp_data,
3034                        scan_rsp_data, scan_rsp_len);
3035
3036         adv_instance->timeout = timeout;
3037         adv_instance->remaining_time = timeout;
3038
3039         if (duration == 0)
3040                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3041         else
3042                 adv_instance->duration = duration;
3043
3044         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3045                           adv_instance_rpa_expired);
3046
3047         BT_DBG("%s for %dMR", hdev->name, instance);
3048
3049         return 0;
3050 }
3051
3052 /* This function requires the caller holds hdev->lock */
3053 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3054                               u16 adv_data_len, u8 *adv_data,
3055                               u16 scan_rsp_len, u8 *scan_rsp_data)
3056 {
3057         struct adv_info *adv_instance;
3058
3059         adv_instance = hci_find_adv_instance(hdev, instance);
3060
3061         /* If advertisement doesn't exist, we can't modify its data */
3062         if (!adv_instance)
3063                 return -ENOENT;
3064
3065         if (adv_data_len) {
3066                 memset(adv_instance->adv_data, 0,
3067                        sizeof(adv_instance->adv_data));
3068                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3069                 adv_instance->adv_data_len = adv_data_len;
3070         }
3071
3072         if (scan_rsp_len) {
3073                 memset(adv_instance->scan_rsp_data, 0,
3074                        sizeof(adv_instance->scan_rsp_data));
3075                 memcpy(adv_instance->scan_rsp_data,
3076                        scan_rsp_data, scan_rsp_len);
3077                 adv_instance->scan_rsp_len = scan_rsp_len;
3078         }
3079
3080         return 0;
3081 }
3082
3083 /* This function requires the caller holds hdev->lock */
3084 void hci_adv_monitors_clear(struct hci_dev *hdev)
3085 {
3086         struct adv_monitor *monitor;
3087         int handle;
3088
3089         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3090                 hci_free_adv_monitor(hdev, monitor);
3091
3092         idr_destroy(&hdev->adv_monitors_idr);
3093 }
3094
3095 /* Frees the monitor structure and do some bookkeepings.
3096  * This function requires the caller holds hdev->lock.
3097  */
3098 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3099 {
3100         struct adv_pattern *pattern;
3101         struct adv_pattern *tmp;
3102
3103         if (!monitor)
3104                 return;
3105
3106         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3107                 list_del(&pattern->list);
3108                 kfree(pattern);
3109         }
3110
3111         if (monitor->handle)
3112                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3113
3114         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3115                 hdev->adv_monitors_cnt--;
3116                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3117         }
3118
3119         kfree(monitor);
3120 }
3121
3122 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3123 {
3124         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3125 }
3126
3127 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3128 {
3129         return mgmt_remove_adv_monitor_complete(hdev, status);
3130 }
3131
3132 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3133  * also attempts to forward the request to the controller.
3134  * Returns true if request is forwarded (result is pending), false otherwise.
3135  * This function requires the caller holds hdev->lock.
3136  */
3137 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3138                          int *err)
3139 {
3140         int min, max, handle;
3141
3142         *err = 0;
3143
3144         if (!monitor) {
3145                 *err = -EINVAL;
3146                 return false;
3147         }
3148
3149         min = HCI_MIN_ADV_MONITOR_HANDLE;
3150         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3151         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3152                            GFP_KERNEL);
3153         if (handle < 0) {
3154                 *err = handle;
3155                 return false;
3156         }
3157
3158         monitor->handle = handle;
3159
3160         if (!hdev_is_powered(hdev))
3161                 return false;
3162
3163         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3164         case HCI_ADV_MONITOR_EXT_NONE:
3165                 hci_update_background_scan(hdev);
3166                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3167                 /* Message was not forwarded to controller - not an error */
3168                 return false;
3169         case HCI_ADV_MONITOR_EXT_MSFT:
3170                 *err = msft_add_monitor_pattern(hdev, monitor);
3171                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3172                            *err);
3173                 break;
3174         }
3175
3176         return (*err == 0);
3177 }
3178
3179 /* Attempts to tell the controller and free the monitor. If somehow the
3180  * controller doesn't have a corresponding handle, remove anyway.
3181  * Returns true if request is forwarded (result is pending), false otherwise.
3182  * This function requires the caller holds hdev->lock.
3183  */
3184 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3185                                    struct adv_monitor *monitor,
3186                                    u16 handle, int *err)
3187 {
3188         *err = 0;
3189
3190         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3191         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3192                 goto free_monitor;
3193         case HCI_ADV_MONITOR_EXT_MSFT:
3194                 *err = msft_remove_monitor(hdev, monitor, handle);
3195                 break;
3196         }
3197
3198         /* In case no matching handle registered, just free the monitor */
3199         if (*err == -ENOENT)
3200                 goto free_monitor;
3201
3202         return (*err == 0);
3203
3204 free_monitor:
3205         if (*err == -ENOENT)
3206                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3207                             monitor->handle);
3208         hci_free_adv_monitor(hdev, monitor);
3209
3210         *err = 0;
3211         return false;
3212 }
3213
3214 /* Returns true if request is forwarded (result is pending), false otherwise.
3215  * This function requires the caller holds hdev->lock.
3216  */
3217 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3218 {
3219         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3220         bool pending;
3221
3222         if (!monitor) {
3223                 *err = -EINVAL;
3224                 return false;
3225         }
3226
3227         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3228         if (!*err && !pending)
3229                 hci_update_background_scan(hdev);
3230
3231         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3232                    hdev->name, handle, *err, pending ? "" : "not ");
3233
3234         return pending;
3235 }
3236
3237 /* Returns true if request is forwarded (result is pending), false otherwise.
3238  * This function requires the caller holds hdev->lock.
3239  */
3240 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3241 {
3242         struct adv_monitor *monitor;
3243         int idr_next_id = 0;
3244         bool pending = false;
3245         bool update = false;
3246
3247         *err = 0;
3248
3249         while (!*err && !pending) {
3250                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3251                 if (!monitor)
3252                         break;
3253
3254                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3255
3256                 if (!*err && !pending)
3257                         update = true;
3258         }
3259
3260         if (update)
3261                 hci_update_background_scan(hdev);
3262
3263         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3264                    hdev->name, *err, pending ? "" : "not ");
3265
3266         return pending;
3267 }
3268
3269 /* This function requires the caller holds hdev->lock */
3270 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3271 {
3272         return !idr_is_empty(&hdev->adv_monitors_idr);
3273 }
3274
3275 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3276 {
3277         if (msft_monitor_supported(hdev))
3278                 return HCI_ADV_MONITOR_EXT_MSFT;
3279
3280         return HCI_ADV_MONITOR_EXT_NONE;
3281 }
3282
3283 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3284                                          bdaddr_t *bdaddr, u8 type)
3285 {
3286         struct bdaddr_list *b;
3287
3288         list_for_each_entry(b, bdaddr_list, list) {
3289                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3290                         return b;
3291         }
3292
3293         return NULL;
3294 }
3295
3296 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3297                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3298                                 u8 type)
3299 {
3300         struct bdaddr_list_with_irk *b;
3301
3302         list_for_each_entry(b, bdaddr_list, list) {
3303                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3304                         return b;
3305         }
3306
3307         return NULL;
3308 }
3309
3310 struct bdaddr_list_with_flags *
3311 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3312                                   bdaddr_t *bdaddr, u8 type)
3313 {
3314         struct bdaddr_list_with_flags *b;
3315
3316         list_for_each_entry(b, bdaddr_list, list) {
3317                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3318                         return b;
3319         }
3320
3321         return NULL;
3322 }
3323
3324 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3325 {
3326         struct bdaddr_list *b, *n;
3327
3328         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3329                 list_del(&b->list);
3330                 kfree(b);
3331         }
3332 }
3333
3334 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3335 {
3336         struct bdaddr_list *entry;
3337
3338         if (!bacmp(bdaddr, BDADDR_ANY))
3339                 return -EBADF;
3340
3341         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3342                 return -EEXIST;
3343
3344         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3345         if (!entry)
3346                 return -ENOMEM;
3347
3348         bacpy(&entry->bdaddr, bdaddr);
3349         entry->bdaddr_type = type;
3350
3351         list_add(&entry->list, list);
3352
3353         return 0;
3354 }
3355
3356 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3357                                         u8 type, u8 *peer_irk, u8 *local_irk)
3358 {
3359         struct bdaddr_list_with_irk *entry;
3360
3361         if (!bacmp(bdaddr, BDADDR_ANY))
3362                 return -EBADF;
3363
3364         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3365                 return -EEXIST;
3366
3367         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3368         if (!entry)
3369                 return -ENOMEM;
3370
3371         bacpy(&entry->bdaddr, bdaddr);
3372         entry->bdaddr_type = type;
3373
3374         if (peer_irk)
3375                 memcpy(entry->peer_irk, peer_irk, 16);
3376
3377         if (local_irk)
3378                 memcpy(entry->local_irk, local_irk, 16);
3379
3380         list_add(&entry->list, list);
3381
3382         return 0;
3383 }
3384
3385 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3386                                    u8 type, u32 flags)
3387 {
3388         struct bdaddr_list_with_flags *entry;
3389
3390         if (!bacmp(bdaddr, BDADDR_ANY))
3391                 return -EBADF;
3392
3393         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3394                 return -EEXIST;
3395
3396         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3397         if (!entry)
3398                 return -ENOMEM;
3399
3400         bacpy(&entry->bdaddr, bdaddr);
3401         entry->bdaddr_type = type;
3402         entry->current_flags = flags;
3403
3404         list_add(&entry->list, list);
3405
3406         return 0;
3407 }
3408
3409 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3410 {
3411         struct bdaddr_list *entry;
3412
3413         if (!bacmp(bdaddr, BDADDR_ANY)) {
3414                 hci_bdaddr_list_clear(list);
3415                 return 0;
3416         }
3417
3418         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3419         if (!entry)
3420                 return -ENOENT;
3421
3422         list_del(&entry->list);
3423         kfree(entry);
3424
3425         return 0;
3426 }
3427
3428 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3429                                                         u8 type)
3430 {
3431         struct bdaddr_list_with_irk *entry;
3432
3433         if (!bacmp(bdaddr, BDADDR_ANY)) {
3434                 hci_bdaddr_list_clear(list);
3435                 return 0;
3436         }
3437
3438         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3439         if (!entry)
3440                 return -ENOENT;
3441
3442         list_del(&entry->list);
3443         kfree(entry);
3444
3445         return 0;
3446 }
3447
3448 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3449                                    u8 type)
3450 {
3451         struct bdaddr_list_with_flags *entry;
3452
3453         if (!bacmp(bdaddr, BDADDR_ANY)) {
3454                 hci_bdaddr_list_clear(list);
3455                 return 0;
3456         }
3457
3458         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3459         if (!entry)
3460                 return -ENOENT;
3461
3462         list_del(&entry->list);
3463         kfree(entry);
3464
3465         return 0;
3466 }
3467
3468 /* This function requires the caller holds hdev->lock */
3469 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3470                                                bdaddr_t *addr, u8 addr_type)
3471 {
3472         struct hci_conn_params *params;
3473
3474         list_for_each_entry(params, &hdev->le_conn_params, list) {
3475                 if (bacmp(&params->addr, addr) == 0 &&
3476                     params->addr_type == addr_type) {
3477                         return params;
3478                 }
3479         }
3480
3481         return NULL;
3482 }
3483
3484 /* This function requires the caller holds hdev->lock */
3485 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3486                                                   bdaddr_t *addr, u8 addr_type)
3487 {
3488         struct hci_conn_params *param;
3489
3490         switch (addr_type) {
3491         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3492                 addr_type = ADDR_LE_DEV_PUBLIC;
3493                 break;
3494         case ADDR_LE_DEV_RANDOM_RESOLVED:
3495                 addr_type = ADDR_LE_DEV_RANDOM;
3496                 break;
3497         }
3498
3499         list_for_each_entry(param, list, action) {
3500                 if (bacmp(&param->addr, addr) == 0 &&
3501                     param->addr_type == addr_type)
3502                         return param;
3503         }
3504
3505         return NULL;
3506 }
3507
3508 /* This function requires the caller holds hdev->lock */
3509 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3510                                             bdaddr_t *addr, u8 addr_type)
3511 {
3512         struct hci_conn_params *params;
3513
3514         params = hci_conn_params_lookup(hdev, addr, addr_type);
3515         if (params)
3516                 return params;
3517
3518         params = kzalloc(sizeof(*params), GFP_KERNEL);
3519         if (!params) {
3520                 bt_dev_err(hdev, "out of memory");
3521                 return NULL;
3522         }
3523
3524         bacpy(&params->addr, addr);
3525         params->addr_type = addr_type;
3526
3527         list_add(&params->list, &hdev->le_conn_params);
3528         INIT_LIST_HEAD(&params->action);
3529
3530         params->conn_min_interval = hdev->le_conn_min_interval;
3531         params->conn_max_interval = hdev->le_conn_max_interval;
3532         params->conn_latency = hdev->le_conn_latency;
3533         params->supervision_timeout = hdev->le_supv_timeout;
3534         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3535
3536         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3537
3538         return params;
3539 }
3540
3541 static void hci_conn_params_free(struct hci_conn_params *params)
3542 {
3543         if (params->conn) {
3544                 hci_conn_drop(params->conn);
3545                 hci_conn_put(params->conn);
3546         }
3547
3548         list_del(&params->action);
3549         list_del(&params->list);
3550         kfree(params);
3551 }
3552
3553 /* This function requires the caller holds hdev->lock */
3554 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3555 {
3556         struct hci_conn_params *params;
3557
3558         params = hci_conn_params_lookup(hdev, addr, addr_type);
3559         if (!params)
3560                 return;
3561
3562         hci_conn_params_free(params);
3563
3564         hci_update_background_scan(hdev);
3565
3566         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3567 }
3568
3569 /* This function requires the caller holds hdev->lock */
3570 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3571 {
3572         struct hci_conn_params *params, *tmp;
3573
3574         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3575                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3576                         continue;
3577
3578                 /* If trying to establish one time connection to disabled
3579                  * device, leave the params, but mark them as just once.
3580                  */
3581                 if (params->explicit_connect) {
3582                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3583                         continue;
3584                 }
3585
3586                 list_del(&params->list);
3587                 kfree(params);
3588         }
3589
3590         BT_DBG("All LE disabled connection parameters were removed");
3591 }
3592
3593 /* This function requires the caller holds hdev->lock */
3594 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3595 {
3596         struct hci_conn_params *params, *tmp;
3597
3598         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3599                 hci_conn_params_free(params);
3600
3601         BT_DBG("All LE connection parameters were removed");
3602 }
3603
3604 /* Copy the Identity Address of the controller.
3605  *
3606  * If the controller has a public BD_ADDR, then by default use that one.
3607  * If this is a LE only controller without a public address, default to
3608  * the static random address.
3609  *
3610  * For debugging purposes it is possible to force controllers with a
3611  * public address to use the static random address instead.
3612  *
3613  * In case BR/EDR has been disabled on a dual-mode controller and
3614  * userspace has configured a static address, then that address
3615  * becomes the identity address instead of the public BR/EDR address.
3616  */
3617 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3618                                u8 *bdaddr_type)
3619 {
3620         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3621             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3622             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3623              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3624                 bacpy(bdaddr, &hdev->static_addr);
3625                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3626         } else {
3627                 bacpy(bdaddr, &hdev->bdaddr);
3628                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3629         }
3630 }
3631
3632 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3633 {
3634         int i;
3635
3636         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3637                 clear_bit(i, hdev->suspend_tasks);
3638
3639         wake_up(&hdev->suspend_wait_q);
3640 }
3641
3642 static int hci_suspend_wait_event(struct hci_dev *hdev)
3643 {
3644 #define WAKE_COND                                                              \
3645         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3646          __SUSPEND_NUM_TASKS)
3647
3648         int i;
3649         int ret = wait_event_timeout(hdev->suspend_wait_q,
3650                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3651
3652         if (ret == 0) {
3653                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3654                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3655                         if (test_bit(i, hdev->suspend_tasks))
3656                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3657                         clear_bit(i, hdev->suspend_tasks);
3658                 }
3659
3660                 ret = -ETIMEDOUT;
3661         } else {
3662                 ret = 0;
3663         }
3664
3665         return ret;
3666 }
3667
3668 static void hci_prepare_suspend(struct work_struct *work)
3669 {
3670         struct hci_dev *hdev =
3671                 container_of(work, struct hci_dev, suspend_prepare);
3672
3673         hci_dev_lock(hdev);
3674         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3675         hci_dev_unlock(hdev);
3676 }
3677
3678 static int hci_change_suspend_state(struct hci_dev *hdev,
3679                                     enum suspended_state next)
3680 {
3681         hdev->suspend_state_next = next;
3682         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3683         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3684         return hci_suspend_wait_event(hdev);
3685 }
3686
3687 static void hci_clear_wake_reason(struct hci_dev *hdev)
3688 {
3689         hci_dev_lock(hdev);
3690
3691         hdev->wake_reason = 0;
3692         bacpy(&hdev->wake_addr, BDADDR_ANY);
3693         hdev->wake_addr_type = 0;
3694
3695         hci_dev_unlock(hdev);
3696 }
3697
3698 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3699                                 void *data)
3700 {
3701         struct hci_dev *hdev =
3702                 container_of(nb, struct hci_dev, suspend_notifier);
3703         int ret = 0;
3704         u8 state = BT_RUNNING;
3705
3706         /* If powering down, wait for completion. */
3707         if (mgmt_powering_down(hdev)) {
3708                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3709                 ret = hci_suspend_wait_event(hdev);
3710                 if (ret)
3711                         goto done;
3712         }
3713
3714         /* Suspend notifier should only act on events when powered. */
3715         if (!hdev_is_powered(hdev) ||
3716             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3717                 goto done;
3718
3719         if (action == PM_SUSPEND_PREPARE) {
3720                 /* Suspend consists of two actions:
3721                  *  - First, disconnect everything and make the controller not
3722                  *    connectable (disabling scanning)
3723                  *  - Second, program event filter/accept list and enable scan
3724                  */
3725                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3726                 if (!ret)
3727                         state = BT_SUSPEND_DISCONNECT;
3728
3729                 /* Only configure accept list if disconnect succeeded and wake
3730                  * isn't being prevented.
3731                  */
3732                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3733                         ret = hci_change_suspend_state(hdev,
3734                                                 BT_SUSPEND_CONFIGURE_WAKE);
3735                         if (!ret)
3736                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3737                 }
3738
3739                 hci_clear_wake_reason(hdev);
3740                 mgmt_suspending(hdev, state);
3741
3742         } else if (action == PM_POST_SUSPEND) {
3743                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3744
3745                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3746                               hdev->wake_addr_type);
3747         }
3748
3749 done:
3750         /* We always allow suspend even if suspend preparation failed and
3751          * attempt to recover in resume.
3752          */
3753         if (ret)
3754                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3755                            action, ret);
3756
3757         return NOTIFY_DONE;
3758 }
3759
3760 /* Alloc HCI device */
3761 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3762 {
3763         struct hci_dev *hdev;
3764         unsigned int alloc_size;
3765
3766         alloc_size = sizeof(*hdev);
3767         if (sizeof_priv) {
3768                 /* Fixme: May need ALIGN-ment? */
3769                 alloc_size += sizeof_priv;
3770         }
3771
3772         hdev = kzalloc(alloc_size, GFP_KERNEL);
3773         if (!hdev)
3774                 return NULL;
3775
3776         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3777         hdev->esco_type = (ESCO_HV1);
3778         hdev->link_mode = (HCI_LM_ACCEPT);
3779         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3780         hdev->io_capability = 0x03;     /* No Input No Output */
3781         hdev->manufacturer = 0xffff;    /* Default to internal use */
3782         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3783         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3784         hdev->adv_instance_cnt = 0;
3785         hdev->cur_adv_instance = 0x00;
3786         hdev->adv_instance_timeout = 0;
3787
3788         hdev->advmon_allowlist_duration = 300;
3789         hdev->advmon_no_filter_duration = 500;
3790         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3791
3792         hdev->sniff_max_interval = 800;
3793         hdev->sniff_min_interval = 80;
3794
3795         hdev->le_adv_channel_map = 0x07;
3796         hdev->le_adv_min_interval = 0x0800;
3797         hdev->le_adv_max_interval = 0x0800;
3798         hdev->le_scan_interval = 0x0060;
3799         hdev->le_scan_window = 0x0030;
3800         hdev->le_scan_int_suspend = 0x0400;
3801         hdev->le_scan_window_suspend = 0x0012;
3802         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3803         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3804         hdev->le_scan_int_adv_monitor = 0x0060;
3805         hdev->le_scan_window_adv_monitor = 0x0030;
3806         hdev->le_scan_int_connect = 0x0060;
3807         hdev->le_scan_window_connect = 0x0060;
3808         hdev->le_conn_min_interval = 0x0018;
3809         hdev->le_conn_max_interval = 0x0028;
3810         hdev->le_conn_latency = 0x0000;
3811         hdev->le_supv_timeout = 0x002a;
3812         hdev->le_def_tx_len = 0x001b;
3813         hdev->le_def_tx_time = 0x0148;
3814         hdev->le_max_tx_len = 0x001b;
3815         hdev->le_max_tx_time = 0x0148;
3816         hdev->le_max_rx_len = 0x001b;
3817         hdev->le_max_rx_time = 0x0148;
3818         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3819         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3820         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3821         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3822         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3823         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3824         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3825         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3826         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3827
3828         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3829         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3830         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3831         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3832         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3833         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3834
3835         /* default 1.28 sec page scan */
3836         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3837         hdev->def_page_scan_int = 0x0800;
3838         hdev->def_page_scan_window = 0x0012;
3839
3840         mutex_init(&hdev->lock);
3841         mutex_init(&hdev->req_lock);
3842
3843         INIT_LIST_HEAD(&hdev->mgmt_pending);
3844         INIT_LIST_HEAD(&hdev->reject_list);
3845         INIT_LIST_HEAD(&hdev->accept_list);
3846         INIT_LIST_HEAD(&hdev->uuids);
3847         INIT_LIST_HEAD(&hdev->link_keys);
3848         INIT_LIST_HEAD(&hdev->long_term_keys);
3849         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3850         INIT_LIST_HEAD(&hdev->remote_oob_data);
3851         INIT_LIST_HEAD(&hdev->le_accept_list);
3852         INIT_LIST_HEAD(&hdev->le_resolv_list);
3853         INIT_LIST_HEAD(&hdev->le_conn_params);
3854         INIT_LIST_HEAD(&hdev->pend_le_conns);
3855         INIT_LIST_HEAD(&hdev->pend_le_reports);
3856         INIT_LIST_HEAD(&hdev->conn_hash.list);
3857         INIT_LIST_HEAD(&hdev->adv_instances);
3858         INIT_LIST_HEAD(&hdev->blocked_keys);
3859
3860         INIT_WORK(&hdev->rx_work, hci_rx_work);
3861         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3862         INIT_WORK(&hdev->tx_work, hci_tx_work);
3863         INIT_WORK(&hdev->power_on, hci_power_on);
3864         INIT_WORK(&hdev->error_reset, hci_error_reset);
3865         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3866
3867         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3868
3869         skb_queue_head_init(&hdev->rx_q);
3870         skb_queue_head_init(&hdev->cmd_q);
3871         skb_queue_head_init(&hdev->raw_q);
3872
3873         init_waitqueue_head(&hdev->req_wait_q);
3874         init_waitqueue_head(&hdev->suspend_wait_q);
3875
3876         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3877         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3878
3879         hci_request_setup(hdev);
3880
3881         hci_init_sysfs(hdev);
3882         discovery_init(hdev);
3883
3884         return hdev;
3885 }
3886 EXPORT_SYMBOL(hci_alloc_dev_priv);
3887
3888 /* Free HCI device */
3889 void hci_free_dev(struct hci_dev *hdev)
3890 {
3891         /* will free via device release */
3892         put_device(&hdev->dev);
3893 }
3894 EXPORT_SYMBOL(hci_free_dev);
3895
3896 /* Register HCI device */
3897 int hci_register_dev(struct hci_dev *hdev)
3898 {
3899         int id, error;
3900
3901         if (!hdev->open || !hdev->close || !hdev->send)
3902                 return -EINVAL;
3903
3904         /* Do not allow HCI_AMP devices to register at index 0,
3905          * so the index can be used as the AMP controller ID.
3906          */
3907         switch (hdev->dev_type) {
3908         case HCI_PRIMARY:
3909                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3910                 break;
3911         case HCI_AMP:
3912                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3913                 break;
3914         default:
3915                 return -EINVAL;
3916         }
3917
3918         if (id < 0)
3919                 return id;
3920
3921         sprintf(hdev->name, "hci%d", id);
3922         hdev->id = id;
3923
3924         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3925
3926         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3927         if (!hdev->workqueue) {
3928                 error = -ENOMEM;
3929                 goto err;
3930         }
3931
3932         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3933                                                       hdev->name);
3934         if (!hdev->req_workqueue) {
3935                 destroy_workqueue(hdev->workqueue);
3936                 error = -ENOMEM;
3937                 goto err;
3938         }
3939
3940         if (!IS_ERR_OR_NULL(bt_debugfs))
3941                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3942
3943         dev_set_name(&hdev->dev, "%s", hdev->name);
3944
3945         error = device_add(&hdev->dev);
3946         if (error < 0)
3947                 goto err_wqueue;
3948
3949         hci_leds_init(hdev);
3950
3951         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3952                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3953                                     hdev);
3954         if (hdev->rfkill) {
3955                 if (rfkill_register(hdev->rfkill) < 0) {
3956                         rfkill_destroy(hdev->rfkill);
3957                         hdev->rfkill = NULL;
3958                 }
3959         }
3960
3961         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3962                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3963
3964         hci_dev_set_flag(hdev, HCI_SETUP);
3965         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3966
3967         if (hdev->dev_type == HCI_PRIMARY) {
3968                 /* Assume BR/EDR support until proven otherwise (such as
3969                  * through reading supported features during init.
3970                  */
3971                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3972         }
3973
3974         write_lock(&hci_dev_list_lock);
3975         list_add(&hdev->list, &hci_dev_list);
3976         write_unlock(&hci_dev_list_lock);
3977
3978         /* Devices that are marked for raw-only usage are unconfigured
3979          * and should not be included in normal operation.
3980          */
3981         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3982                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3983
3984         hci_sock_dev_event(hdev, HCI_DEV_REG);
3985         hci_dev_hold(hdev);
3986
3987         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3988                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3989                 error = register_pm_notifier(&hdev->suspend_notifier);
3990                 if (error)
3991                         goto err_wqueue;
3992         }
3993
3994         queue_work(hdev->req_workqueue, &hdev->power_on);
3995
3996         idr_init(&hdev->adv_monitors_idr);
3997
3998         return id;
3999
4000 err_wqueue:
4001         destroy_workqueue(hdev->workqueue);
4002         destroy_workqueue(hdev->req_workqueue);
4003 err:
4004         ida_simple_remove(&hci_index_ida, hdev->id);
4005
4006         return error;
4007 }
4008 EXPORT_SYMBOL(hci_register_dev);
4009
4010 /* Unregister HCI device */
4011 void hci_unregister_dev(struct hci_dev *hdev)
4012 {
4013         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4014
4015         hci_dev_set_flag(hdev, HCI_UNREGISTER);
4016
4017         write_lock(&hci_dev_list_lock);
4018         list_del(&hdev->list);
4019         write_unlock(&hci_dev_list_lock);
4020
4021         cancel_work_sync(&hdev->power_on);
4022
4023         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4024                 hci_suspend_clear_tasks(hdev);
4025                 unregister_pm_notifier(&hdev->suspend_notifier);
4026                 cancel_work_sync(&hdev->suspend_prepare);
4027         }
4028
4029         hci_dev_do_close(hdev);
4030
4031         if (!test_bit(HCI_INIT, &hdev->flags) &&
4032             !hci_dev_test_flag(hdev, HCI_SETUP) &&
4033             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4034                 hci_dev_lock(hdev);
4035                 mgmt_index_removed(hdev);
4036                 hci_dev_unlock(hdev);
4037         }
4038
4039         /* mgmt_index_removed should take care of emptying the
4040          * pending list */
4041         BUG_ON(!list_empty(&hdev->mgmt_pending));
4042
4043         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4044
4045         if (hdev->rfkill) {
4046                 rfkill_unregister(hdev->rfkill);
4047                 rfkill_destroy(hdev->rfkill);
4048         }
4049
4050         device_del(&hdev->dev);
4051         /* Actual cleanup is deferred until hci_release_dev(). */
4052         hci_dev_put(hdev);
4053 }
4054 EXPORT_SYMBOL(hci_unregister_dev);
4055
4056 /* Release HCI device */
4057 void hci_release_dev(struct hci_dev *hdev)
4058 {
4059         debugfs_remove_recursive(hdev->debugfs);
4060         kfree_const(hdev->hw_info);
4061         kfree_const(hdev->fw_info);
4062
4063         destroy_workqueue(hdev->workqueue);
4064         destroy_workqueue(hdev->req_workqueue);
4065
4066         hci_dev_lock(hdev);
4067         hci_bdaddr_list_clear(&hdev->reject_list);
4068         hci_bdaddr_list_clear(&hdev->accept_list);
4069         hci_uuids_clear(hdev);
4070         hci_link_keys_clear(hdev);
4071         hci_smp_ltks_clear(hdev);
4072         hci_smp_irks_clear(hdev);
4073         hci_remote_oob_data_clear(hdev);
4074         hci_adv_instances_clear(hdev);
4075         hci_adv_monitors_clear(hdev);
4076         hci_bdaddr_list_clear(&hdev->le_accept_list);
4077         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4078         hci_conn_params_clear_all(hdev);
4079         hci_discovery_filter_clear(hdev);
4080         hci_blocked_keys_clear(hdev);
4081         hci_dev_unlock(hdev);
4082
4083         ida_simple_remove(&hci_index_ida, hdev->id);
4084         kfree(hdev);
4085 }
4086 EXPORT_SYMBOL(hci_release_dev);
4087
4088 /* Suspend HCI device */
4089 int hci_suspend_dev(struct hci_dev *hdev)
4090 {
4091         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4092         return 0;
4093 }
4094 EXPORT_SYMBOL(hci_suspend_dev);
4095
4096 /* Resume HCI device */
4097 int hci_resume_dev(struct hci_dev *hdev)
4098 {
4099         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4100         return 0;
4101 }
4102 EXPORT_SYMBOL(hci_resume_dev);
4103
4104 /* Reset HCI device */
4105 int hci_reset_dev(struct hci_dev *hdev)
4106 {
4107         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4108         struct sk_buff *skb;
4109
4110         skb = bt_skb_alloc(3, GFP_ATOMIC);
4111         if (!skb)
4112                 return -ENOMEM;
4113
4114         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4115         skb_put_data(skb, hw_err, 3);
4116
4117         bt_dev_err(hdev, "Injecting HCI hardware error event");
4118
4119         /* Send Hardware Error to upper stack */
4120         return hci_recv_frame(hdev, skb);
4121 }
4122 EXPORT_SYMBOL(hci_reset_dev);
4123
4124 /* Receive frame from HCI drivers */
4125 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4126 {
4127         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4128                       && !test_bit(HCI_INIT, &hdev->flags))) {
4129                 kfree_skb(skb);
4130                 return -ENXIO;
4131         }
4132
4133         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4134             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4135             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4136             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4137                 kfree_skb(skb);
4138                 return -EINVAL;
4139         }
4140
4141         /* Incoming skb */
4142         bt_cb(skb)->incoming = 1;
4143
4144         /* Time stamp */
4145         __net_timestamp(skb);
4146
4147         skb_queue_tail(&hdev->rx_q, skb);
4148         queue_work(hdev->workqueue, &hdev->rx_work);
4149
4150         return 0;
4151 }
4152 EXPORT_SYMBOL(hci_recv_frame);
4153
4154 /* Receive diagnostic message from HCI drivers */
4155 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4156 {
4157         /* Mark as diagnostic packet */
4158         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4159
4160         /* Time stamp */
4161         __net_timestamp(skb);
4162
4163         skb_queue_tail(&hdev->rx_q, skb);
4164         queue_work(hdev->workqueue, &hdev->rx_work);
4165
4166         return 0;
4167 }
4168 EXPORT_SYMBOL(hci_recv_diag);
4169
4170 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4171 {
4172         va_list vargs;
4173
4174         va_start(vargs, fmt);
4175         kfree_const(hdev->hw_info);
4176         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4177         va_end(vargs);
4178 }
4179 EXPORT_SYMBOL(hci_set_hw_info);
4180
4181 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4182 {
4183         va_list vargs;
4184
4185         va_start(vargs, fmt);
4186         kfree_const(hdev->fw_info);
4187         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4188         va_end(vargs);
4189 }
4190 EXPORT_SYMBOL(hci_set_fw_info);
4191
4192 /* ---- Interface to upper protocols ---- */
4193
4194 int hci_register_cb(struct hci_cb *cb)
4195 {
4196         BT_DBG("%p name %s", cb, cb->name);
4197
4198         mutex_lock(&hci_cb_list_lock);
4199         list_add_tail(&cb->list, &hci_cb_list);
4200         mutex_unlock(&hci_cb_list_lock);
4201
4202         return 0;
4203 }
4204 EXPORT_SYMBOL(hci_register_cb);
4205
4206 int hci_unregister_cb(struct hci_cb *cb)
4207 {
4208         BT_DBG("%p name %s", cb, cb->name);
4209
4210         mutex_lock(&hci_cb_list_lock);
4211         list_del(&cb->list);
4212         mutex_unlock(&hci_cb_list_lock);
4213
4214         return 0;
4215 }
4216 EXPORT_SYMBOL(hci_unregister_cb);
4217
4218 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4219 {
4220         int err;
4221
4222         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4223                skb->len);
4224
4225         /* Time stamp */
4226         __net_timestamp(skb);
4227
4228         /* Send copy to monitor */
4229         hci_send_to_monitor(hdev, skb);
4230
4231         if (atomic_read(&hdev->promisc)) {
4232                 /* Send copy to the sockets */
4233                 hci_send_to_sock(hdev, skb);
4234         }
4235
4236         /* Get rid of skb owner, prior to sending to the driver. */
4237         skb_orphan(skb);
4238
4239         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4240                 kfree_skb(skb);
4241                 return;
4242         }
4243
4244         err = hdev->send(hdev, skb);
4245         if (err < 0) {
4246                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4247                 kfree_skb(skb);
4248         }
4249 }
4250
4251 /* Send HCI command */
4252 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4253                  const void *param)
4254 {
4255         struct sk_buff *skb;
4256
4257         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4258
4259         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4260         if (!skb) {
4261                 bt_dev_err(hdev, "no memory for command");
4262                 return -ENOMEM;
4263         }
4264
4265         /* Stand-alone HCI commands must be flagged as
4266          * single-command requests.
4267          */
4268         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4269
4270         skb_queue_tail(&hdev->cmd_q, skb);
4271         queue_work(hdev->workqueue, &hdev->cmd_work);
4272
4273         return 0;
4274 }
4275
4276 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4277                    const void *param)
4278 {
4279         struct sk_buff *skb;
4280
4281         if (hci_opcode_ogf(opcode) != 0x3f) {
4282                 /* A controller receiving a command shall respond with either
4283                  * a Command Status Event or a Command Complete Event.
4284                  * Therefore, all standard HCI commands must be sent via the
4285                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4286                  * Some vendors do not comply with this rule for vendor-specific
4287                  * commands and do not return any event. We want to support
4288                  * unresponded commands for such cases only.
4289                  */
4290                 bt_dev_err(hdev, "unresponded command not supported");
4291                 return -EINVAL;
4292         }
4293
4294         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4295         if (!skb) {
4296                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4297                            opcode);
4298                 return -ENOMEM;
4299         }
4300
4301         hci_send_frame(hdev, skb);
4302
4303         return 0;
4304 }
4305 EXPORT_SYMBOL(__hci_cmd_send);
4306
4307 /* Get data from the previously sent command */
4308 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4309 {
4310         struct hci_command_hdr *hdr;
4311
4312         if (!hdev->sent_cmd)
4313                 return NULL;
4314
4315         hdr = (void *) hdev->sent_cmd->data;
4316
4317         if (hdr->opcode != cpu_to_le16(opcode))
4318                 return NULL;
4319
4320         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4321
4322         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4323 }
4324
4325 /* Send HCI command and wait for command complete event */
4326 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4327                              const void *param, u32 timeout)
4328 {
4329         struct sk_buff *skb;
4330
4331         if (!test_bit(HCI_UP, &hdev->flags))
4332                 return ERR_PTR(-ENETDOWN);
4333
4334         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4335
4336         hci_req_sync_lock(hdev);
4337         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4338         hci_req_sync_unlock(hdev);
4339
4340         return skb;
4341 }
4342 EXPORT_SYMBOL(hci_cmd_sync);
4343
4344 /* Send ACL data */
4345 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4346 {
4347         struct hci_acl_hdr *hdr;
4348         int len = skb->len;
4349
4350         skb_push(skb, HCI_ACL_HDR_SIZE);
4351         skb_reset_transport_header(skb);
4352         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4353         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4354         hdr->dlen   = cpu_to_le16(len);
4355 }
4356
4357 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4358                           struct sk_buff *skb, __u16 flags)
4359 {
4360         struct hci_conn *conn = chan->conn;
4361         struct hci_dev *hdev = conn->hdev;
4362         struct sk_buff *list;
4363
4364         skb->len = skb_headlen(skb);
4365         skb->data_len = 0;
4366
4367         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4368
4369         switch (hdev->dev_type) {
4370         case HCI_PRIMARY:
4371                 hci_add_acl_hdr(skb, conn->handle, flags);
4372                 break;
4373         case HCI_AMP:
4374                 hci_add_acl_hdr(skb, chan->handle, flags);
4375                 break;
4376         default:
4377                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4378                 return;
4379         }
4380
4381         list = skb_shinfo(skb)->frag_list;
4382         if (!list) {
4383                 /* Non fragmented */
4384                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4385
4386                 skb_queue_tail(queue, skb);
4387         } else {
4388                 /* Fragmented */
4389                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4390
4391                 skb_shinfo(skb)->frag_list = NULL;
4392
4393                 /* Queue all fragments atomically. We need to use spin_lock_bh
4394                  * here because of 6LoWPAN links, as there this function is
4395                  * called from softirq and using normal spin lock could cause
4396                  * deadlocks.
4397                  */
4398                 spin_lock_bh(&queue->lock);
4399
4400                 __skb_queue_tail(queue, skb);
4401
4402                 flags &= ~ACL_START;
4403                 flags |= ACL_CONT;
4404                 do {
4405                         skb = list; list = list->next;
4406
4407                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4408                         hci_add_acl_hdr(skb, conn->handle, flags);
4409
4410                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4411
4412                         __skb_queue_tail(queue, skb);
4413                 } while (list);
4414
4415                 spin_unlock_bh(&queue->lock);
4416         }
4417 }
4418
4419 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4420 {
4421         struct hci_dev *hdev = chan->conn->hdev;
4422
4423         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4424
4425         hci_queue_acl(chan, &chan->data_q, skb, flags);
4426
4427         queue_work(hdev->workqueue, &hdev->tx_work);
4428 }
4429
4430 /* Send SCO data */
4431 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4432 {
4433         struct hci_dev *hdev = conn->hdev;
4434         struct hci_sco_hdr hdr;
4435
4436         BT_DBG("%s len %d", hdev->name, skb->len);
4437
4438         hdr.handle = cpu_to_le16(conn->handle);
4439         hdr.dlen   = skb->len;
4440
4441         skb_push(skb, HCI_SCO_HDR_SIZE);
4442         skb_reset_transport_header(skb);
4443         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4444
4445         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4446
4447         skb_queue_tail(&conn->data_q, skb);
4448         queue_work(hdev->workqueue, &hdev->tx_work);
4449 }
4450
4451 /* ---- HCI TX task (outgoing data) ---- */
4452
4453 /* HCI Connection scheduler */
4454 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4455                                      int *quote)
4456 {
4457         struct hci_conn_hash *h = &hdev->conn_hash;
4458         struct hci_conn *conn = NULL, *c;
4459         unsigned int num = 0, min = ~0;
4460
4461         /* We don't have to lock device here. Connections are always
4462          * added and removed with TX task disabled. */
4463
4464         rcu_read_lock();
4465
4466         list_for_each_entry_rcu(c, &h->list, list) {
4467                 if (c->type != type || skb_queue_empty(&c->data_q))
4468                         continue;
4469
4470                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4471                         continue;
4472
4473                 num++;
4474
4475                 if (c->sent < min) {
4476                         min  = c->sent;
4477                         conn = c;
4478                 }
4479
4480                 if (hci_conn_num(hdev, type) == num)
4481                         break;
4482         }
4483
4484         rcu_read_unlock();
4485
4486         if (conn) {
4487                 int cnt, q;
4488
4489                 switch (conn->type) {
4490                 case ACL_LINK:
4491                         cnt = hdev->acl_cnt;
4492                         break;
4493                 case SCO_LINK:
4494                 case ESCO_LINK:
4495                         cnt = hdev->sco_cnt;
4496                         break;
4497                 case LE_LINK:
4498                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4499                         break;
4500                 default:
4501                         cnt = 0;
4502                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4503                 }
4504
4505                 q = cnt / num;
4506                 *quote = q ? q : 1;
4507         } else
4508                 *quote = 0;
4509
4510         BT_DBG("conn %p quote %d", conn, *quote);
4511         return conn;
4512 }
4513
4514 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4515 {
4516         struct hci_conn_hash *h = &hdev->conn_hash;
4517         struct hci_conn *c;
4518
4519         bt_dev_err(hdev, "link tx timeout");
4520
4521         rcu_read_lock();
4522
4523         /* Kill stalled connections */
4524         list_for_each_entry_rcu(c, &h->list, list) {
4525                 if (c->type == type && c->sent) {
4526                         bt_dev_err(hdev, "killing stalled connection %pMR",
4527                                    &c->dst);
4528                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4529                 }
4530         }
4531
4532         rcu_read_unlock();
4533 }
4534
4535 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4536                                       int *quote)
4537 {
4538         struct hci_conn_hash *h = &hdev->conn_hash;
4539         struct hci_chan *chan = NULL;
4540         unsigned int num = 0, min = ~0, cur_prio = 0;
4541         struct hci_conn *conn;
4542         int cnt, q, conn_num = 0;
4543
4544         BT_DBG("%s", hdev->name);
4545
4546         rcu_read_lock();
4547
4548         list_for_each_entry_rcu(conn, &h->list, list) {
4549                 struct hci_chan *tmp;
4550
4551                 if (conn->type != type)
4552                         continue;
4553
4554                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4555                         continue;
4556
4557                 conn_num++;
4558
4559                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4560                         struct sk_buff *skb;
4561
4562                         if (skb_queue_empty(&tmp->data_q))
4563                                 continue;
4564
4565                         skb = skb_peek(&tmp->data_q);
4566                         if (skb->priority < cur_prio)
4567                                 continue;
4568
4569                         if (skb->priority > cur_prio) {
4570                                 num = 0;
4571                                 min = ~0;
4572                                 cur_prio = skb->priority;
4573                         }
4574
4575                         num++;
4576
4577                         if (conn->sent < min) {
4578                                 min  = conn->sent;
4579                                 chan = tmp;
4580                         }
4581                 }
4582
4583                 if (hci_conn_num(hdev, type) == conn_num)
4584                         break;
4585         }
4586
4587         rcu_read_unlock();
4588
4589         if (!chan)
4590                 return NULL;
4591
4592         switch (chan->conn->type) {
4593         case ACL_LINK:
4594                 cnt = hdev->acl_cnt;
4595                 break;
4596         case AMP_LINK:
4597                 cnt = hdev->block_cnt;
4598                 break;
4599         case SCO_LINK:
4600         case ESCO_LINK:
4601                 cnt = hdev->sco_cnt;
4602                 break;
4603         case LE_LINK:
4604                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4605                 break;
4606         default:
4607                 cnt = 0;
4608                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4609         }
4610
4611         q = cnt / num;
4612         *quote = q ? q : 1;
4613         BT_DBG("chan %p quote %d", chan, *quote);
4614         return chan;
4615 }
4616
4617 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4618 {
4619         struct hci_conn_hash *h = &hdev->conn_hash;
4620         struct hci_conn *conn;
4621         int num = 0;
4622
4623         BT_DBG("%s", hdev->name);
4624
4625         rcu_read_lock();
4626
4627         list_for_each_entry_rcu(conn, &h->list, list) {
4628                 struct hci_chan *chan;
4629
4630                 if (conn->type != type)
4631                         continue;
4632
4633                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4634                         continue;
4635
4636                 num++;
4637
4638                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4639                         struct sk_buff *skb;
4640
4641                         if (chan->sent) {
4642                                 chan->sent = 0;
4643                                 continue;
4644                         }
4645
4646                         if (skb_queue_empty(&chan->data_q))
4647                                 continue;
4648
4649                         skb = skb_peek(&chan->data_q);
4650                         if (skb->priority >= HCI_PRIO_MAX - 1)
4651                                 continue;
4652
4653                         skb->priority = HCI_PRIO_MAX - 1;
4654
4655                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4656                                skb->priority);
4657                 }
4658
4659                 if (hci_conn_num(hdev, type) == num)
4660                         break;
4661         }
4662
4663         rcu_read_unlock();
4664
4665 }
4666
4667 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4668 {
4669         /* Calculate count of blocks used by this packet */
4670         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4671 }
4672
4673 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4674 {
4675         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4676                 /* ACL tx timeout must be longer than maximum
4677                  * link supervision timeout (40.9 seconds) */
4678                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4679                                        HCI_ACL_TX_TIMEOUT))
4680                         hci_link_tx_to(hdev, ACL_LINK);
4681         }
4682 }
4683
4684 /* Schedule SCO */
4685 static void hci_sched_sco(struct hci_dev *hdev)
4686 {
4687         struct hci_conn *conn;
4688         struct sk_buff *skb;
4689         int quote;
4690
4691         BT_DBG("%s", hdev->name);
4692
4693         if (!hci_conn_num(hdev, SCO_LINK))
4694                 return;
4695
4696         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4697                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4698                         BT_DBG("skb %p len %d", skb, skb->len);
4699                         hci_send_frame(hdev, skb);
4700
4701                         conn->sent++;
4702                         if (conn->sent == ~0)
4703                                 conn->sent = 0;
4704                 }
4705         }
4706 }
4707
4708 static void hci_sched_esco(struct hci_dev *hdev)
4709 {
4710         struct hci_conn *conn;
4711         struct sk_buff *skb;
4712         int quote;
4713
4714         BT_DBG("%s", hdev->name);
4715
4716         if (!hci_conn_num(hdev, ESCO_LINK))
4717                 return;
4718
4719         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4720                                                      &quote))) {
4721                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4722                         BT_DBG("skb %p len %d", skb, skb->len);
4723                         hci_send_frame(hdev, skb);
4724
4725                         conn->sent++;
4726                         if (conn->sent == ~0)
4727                                 conn->sent = 0;
4728                 }
4729         }
4730 }
4731
4732 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4733 {
4734         unsigned int cnt = hdev->acl_cnt;
4735         struct hci_chan *chan;
4736         struct sk_buff *skb;
4737         int quote;
4738
4739         __check_timeout(hdev, cnt);
4740
4741         while (hdev->acl_cnt &&
4742                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4743                 u32 priority = (skb_peek(&chan->data_q))->priority;
4744                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4745                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4746                                skb->len, skb->priority);
4747
4748                         /* Stop if priority has changed */
4749                         if (skb->priority < priority)
4750                                 break;
4751
4752                         skb = skb_dequeue(&chan->data_q);
4753
4754                         hci_conn_enter_active_mode(chan->conn,
4755                                                    bt_cb(skb)->force_active);
4756
4757                         hci_send_frame(hdev, skb);
4758                         hdev->acl_last_tx = jiffies;
4759
4760                         hdev->acl_cnt--;
4761                         chan->sent++;
4762                         chan->conn->sent++;
4763
4764                         /* Send pending SCO packets right away */
4765                         hci_sched_sco(hdev);
4766                         hci_sched_esco(hdev);
4767                 }
4768         }
4769
4770         if (cnt != hdev->acl_cnt)
4771                 hci_prio_recalculate(hdev, ACL_LINK);
4772 }
4773
4774 static void hci_sched_acl_blk(struct hci_dev *hdev)
4775 {
4776         unsigned int cnt = hdev->block_cnt;
4777         struct hci_chan *chan;
4778         struct sk_buff *skb;
4779         int quote;
4780         u8 type;
4781
4782         __check_timeout(hdev, cnt);
4783
4784         BT_DBG("%s", hdev->name);
4785
4786         if (hdev->dev_type == HCI_AMP)
4787                 type = AMP_LINK;
4788         else
4789                 type = ACL_LINK;
4790
4791         while (hdev->block_cnt > 0 &&
4792                (chan = hci_chan_sent(hdev, type, &quote))) {
4793                 u32 priority = (skb_peek(&chan->data_q))->priority;
4794                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4795                         int blocks;
4796
4797                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4798                                skb->len, skb->priority);
4799
4800                         /* Stop if priority has changed */
4801                         if (skb->priority < priority)
4802                                 break;
4803
4804                         skb = skb_dequeue(&chan->data_q);
4805
4806                         blocks = __get_blocks(hdev, skb);
4807                         if (blocks > hdev->block_cnt)
4808                                 return;
4809
4810                         hci_conn_enter_active_mode(chan->conn,
4811                                                    bt_cb(skb)->force_active);
4812
4813                         hci_send_frame(hdev, skb);
4814                         hdev->acl_last_tx = jiffies;
4815
4816                         hdev->block_cnt -= blocks;
4817                         quote -= blocks;
4818
4819                         chan->sent += blocks;
4820                         chan->conn->sent += blocks;
4821                 }
4822         }
4823
4824         if (cnt != hdev->block_cnt)
4825                 hci_prio_recalculate(hdev, type);
4826 }
4827
4828 static void hci_sched_acl(struct hci_dev *hdev)
4829 {
4830         BT_DBG("%s", hdev->name);
4831
4832         /* No ACL link over BR/EDR controller */
4833         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4834                 return;
4835
4836         /* No AMP link over AMP controller */
4837         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4838                 return;
4839
4840         switch (hdev->flow_ctl_mode) {
4841         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4842                 hci_sched_acl_pkt(hdev);
4843                 break;
4844
4845         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4846                 hci_sched_acl_blk(hdev);
4847                 break;
4848         }
4849 }
4850
4851 static void hci_sched_le(struct hci_dev *hdev)
4852 {
4853         struct hci_chan *chan;
4854         struct sk_buff *skb;
4855         int quote, cnt, tmp;
4856
4857         BT_DBG("%s", hdev->name);
4858
4859         if (!hci_conn_num(hdev, LE_LINK))
4860                 return;
4861
4862         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4863
4864         __check_timeout(hdev, cnt);
4865
4866         tmp = cnt;
4867         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4868                 u32 priority = (skb_peek(&chan->data_q))->priority;
4869                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4870                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4871                                skb->len, skb->priority);
4872
4873                         /* Stop if priority has changed */
4874                         if (skb->priority < priority)
4875                                 break;
4876
4877                         skb = skb_dequeue(&chan->data_q);
4878
4879                         hci_send_frame(hdev, skb);
4880                         hdev->le_last_tx = jiffies;
4881
4882                         cnt--;
4883                         chan->sent++;
4884                         chan->conn->sent++;
4885
4886                         /* Send pending SCO packets right away */
4887                         hci_sched_sco(hdev);
4888                         hci_sched_esco(hdev);
4889                 }
4890         }
4891
4892         if (hdev->le_pkts)
4893                 hdev->le_cnt = cnt;
4894         else
4895                 hdev->acl_cnt = cnt;
4896
4897         if (cnt != tmp)
4898                 hci_prio_recalculate(hdev, LE_LINK);
4899 }
4900
4901 static void hci_tx_work(struct work_struct *work)
4902 {
4903         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4904         struct sk_buff *skb;
4905
4906         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4907                hdev->sco_cnt, hdev->le_cnt);
4908
4909         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4910                 /* Schedule queues and send stuff to HCI driver */
4911                 hci_sched_sco(hdev);
4912                 hci_sched_esco(hdev);
4913                 hci_sched_acl(hdev);
4914                 hci_sched_le(hdev);
4915         }
4916
4917         /* Send next queued raw (unknown type) packet */
4918         while ((skb = skb_dequeue(&hdev->raw_q)))
4919                 hci_send_frame(hdev, skb);
4920 }
4921
4922 /* ----- HCI RX task (incoming data processing) ----- */
4923
4924 /* ACL data packet */
4925 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4926 {
4927         struct hci_acl_hdr *hdr = (void *) skb->data;
4928         struct hci_conn *conn;
4929         __u16 handle, flags;
4930
4931         skb_pull(skb, HCI_ACL_HDR_SIZE);
4932
4933         handle = __le16_to_cpu(hdr->handle);
4934         flags  = hci_flags(handle);
4935         handle = hci_handle(handle);
4936
4937         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4938                handle, flags);
4939
4940         hdev->stat.acl_rx++;
4941
4942         hci_dev_lock(hdev);
4943         conn = hci_conn_hash_lookup_handle(hdev, handle);
4944         hci_dev_unlock(hdev);
4945
4946         if (conn) {
4947                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4948
4949                 /* Send to upper protocol */
4950                 l2cap_recv_acldata(conn, skb, flags);
4951                 return;
4952         } else {
4953                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4954                            handle);
4955         }
4956
4957         kfree_skb(skb);
4958 }
4959
4960 /* SCO data packet */
4961 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4962 {
4963         struct hci_sco_hdr *hdr = (void *) skb->data;
4964         struct hci_conn *conn;
4965         __u16 handle, flags;
4966
4967         skb_pull(skb, HCI_SCO_HDR_SIZE);
4968
4969         handle = __le16_to_cpu(hdr->handle);
4970         flags  = hci_flags(handle);
4971         handle = hci_handle(handle);
4972
4973         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4974                handle, flags);
4975
4976         hdev->stat.sco_rx++;
4977
4978         hci_dev_lock(hdev);
4979         conn = hci_conn_hash_lookup_handle(hdev, handle);
4980         hci_dev_unlock(hdev);
4981
4982         if (conn) {
4983                 /* Send to upper protocol */
4984                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4985                 sco_recv_scodata(conn, skb);
4986                 return;
4987         } else {
4988                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4989                            handle);
4990         }
4991
4992         kfree_skb(skb);
4993 }
4994
4995 static bool hci_req_is_complete(struct hci_dev *hdev)
4996 {
4997         struct sk_buff *skb;
4998
4999         skb = skb_peek(&hdev->cmd_q);
5000         if (!skb)
5001                 return true;
5002
5003         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5004 }
5005
5006 static void hci_resend_last(struct hci_dev *hdev)
5007 {
5008         struct hci_command_hdr *sent;
5009         struct sk_buff *skb;
5010         u16 opcode;
5011
5012         if (!hdev->sent_cmd)
5013                 return;
5014
5015         sent = (void *) hdev->sent_cmd->data;
5016         opcode = __le16_to_cpu(sent->opcode);
5017         if (opcode == HCI_OP_RESET)
5018                 return;
5019
5020         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5021         if (!skb)
5022                 return;
5023
5024         skb_queue_head(&hdev->cmd_q, skb);
5025         queue_work(hdev->workqueue, &hdev->cmd_work);
5026 }
5027
5028 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5029                           hci_req_complete_t *req_complete,
5030                           hci_req_complete_skb_t *req_complete_skb)
5031 {
5032         struct sk_buff *skb;
5033         unsigned long flags;
5034
5035         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5036
5037         /* If the completed command doesn't match the last one that was
5038          * sent we need to do special handling of it.
5039          */
5040         if (!hci_sent_cmd_data(hdev, opcode)) {
5041                 /* Some CSR based controllers generate a spontaneous
5042                  * reset complete event during init and any pending
5043                  * command will never be completed. In such a case we
5044                  * need to resend whatever was the last sent
5045                  * command.
5046                  */
5047                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5048                         hci_resend_last(hdev);
5049
5050                 return;
5051         }
5052
5053         /* If we reach this point this event matches the last command sent */
5054         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5055
5056         /* If the command succeeded and there's still more commands in
5057          * this request the request is not yet complete.
5058          */
5059         if (!status && !hci_req_is_complete(hdev))
5060                 return;
5061
5062         /* If this was the last command in a request the complete
5063          * callback would be found in hdev->sent_cmd instead of the
5064          * command queue (hdev->cmd_q).
5065          */
5066         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5067                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5068                 return;
5069         }
5070
5071         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5072                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5073                 return;
5074         }
5075
5076         /* Remove all pending commands belonging to this request */
5077         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5078         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5079                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5080                         __skb_queue_head(&hdev->cmd_q, skb);
5081                         break;
5082                 }
5083
5084                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5085                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5086                 else
5087                         *req_complete = bt_cb(skb)->hci.req_complete;
5088                 kfree_skb(skb);
5089         }
5090         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5091 }
5092
5093 static void hci_rx_work(struct work_struct *work)
5094 {
5095         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5096         struct sk_buff *skb;
5097
5098         BT_DBG("%s", hdev->name);
5099
5100         while ((skb = skb_dequeue(&hdev->rx_q))) {
5101                 /* Send copy to monitor */
5102                 hci_send_to_monitor(hdev, skb);
5103
5104                 if (atomic_read(&hdev->promisc)) {
5105                         /* Send copy to the sockets */
5106                         hci_send_to_sock(hdev, skb);
5107                 }
5108
5109                 /* If the device has been opened in HCI_USER_CHANNEL,
5110                  * the userspace has exclusive access to device.
5111                  * When device is HCI_INIT, we still need to process
5112                  * the data packets to the driver in order
5113                  * to complete its setup().
5114                  */
5115                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5116                     !test_bit(HCI_INIT, &hdev->flags)) {
5117                         kfree_skb(skb);
5118                         continue;
5119                 }
5120
5121                 if (test_bit(HCI_INIT, &hdev->flags)) {
5122                         /* Don't process data packets in this states. */
5123                         switch (hci_skb_pkt_type(skb)) {
5124                         case HCI_ACLDATA_PKT:
5125                         case HCI_SCODATA_PKT:
5126                         case HCI_ISODATA_PKT:
5127                                 kfree_skb(skb);
5128                                 continue;
5129                         }
5130                 }
5131
5132                 /* Process frame */
5133                 switch (hci_skb_pkt_type(skb)) {
5134                 case HCI_EVENT_PKT:
5135                         BT_DBG("%s Event packet", hdev->name);
5136                         hci_event_packet(hdev, skb);
5137                         break;
5138
5139                 case HCI_ACLDATA_PKT:
5140                         BT_DBG("%s ACL data packet", hdev->name);
5141                         hci_acldata_packet(hdev, skb);
5142                         break;
5143
5144                 case HCI_SCODATA_PKT:
5145                         BT_DBG("%s SCO data packet", hdev->name);
5146                         hci_scodata_packet(hdev, skb);
5147                         break;
5148
5149                 default:
5150                         kfree_skb(skb);
5151                         break;
5152                 }
5153         }
5154 }
5155
5156 static void hci_cmd_work(struct work_struct *work)
5157 {
5158         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5159         struct sk_buff *skb;
5160
5161         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5162                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5163
5164         /* Send queued commands */
5165         if (atomic_read(&hdev->cmd_cnt)) {
5166                 skb = skb_dequeue(&hdev->cmd_q);
5167                 if (!skb)
5168                         return;
5169
5170                 kfree_skb(hdev->sent_cmd);
5171
5172                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5173                 if (hdev->sent_cmd) {
5174                         if (hci_req_status_pend(hdev))
5175                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5176                         atomic_dec(&hdev->cmd_cnt);
5177                         hci_send_frame(hdev, skb);
5178                         if (test_bit(HCI_RESET, &hdev->flags))
5179                                 cancel_delayed_work(&hdev->cmd_timer);
5180                         else
5181                                 schedule_delayed_work(&hdev->cmd_timer,
5182                                                       HCI_CMD_TIMEOUT);
5183                 } else {
5184                         skb_queue_head(&hdev->cmd_q, skb);
5185                         queue_work(hdev->workqueue, &hdev->cmd_work);
5186                 }
5187         }
5188 }