Merge tag 'x86_platform_for_v5.13' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
59
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         bool enable;
83         int err;
84
85         if (!test_bit(HCI_UP, &hdev->flags))
86                 return -ENETDOWN;
87
88         err = kstrtobool_from_user(user_buf, count, &enable);
89         if (err)
90                 return err;
91
92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93                 return -EALREADY;
94
95         hci_req_sync_lock(hdev);
96         if (enable)
97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106
107         kfree_skb(skb);
108
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111         return count;
112 }
113
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         bool enable;
138         int err;
139
140         err = kstrtobool_from_user(user_buf, count, &enable);
141         if (err)
142                 return err;
143
144         /* When the diagnostic flags are not persistent and the transport
145          * is not active or in user channel operation, then there is no need
146          * for the vendor callback. Instead just store the desired value and
147          * the setting will be programmed when the controller gets powered on.
148          */
149         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150             (!test_bit(HCI_RUNNING, &hdev->flags) ||
151              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152                 goto done;
153
154         hci_req_sync_lock(hdev);
155         err = hdev->set_diag(hdev, enable);
156         hci_req_sync_unlock(hdev);
157
158         if (err < 0)
159                 return err;
160
161 done:
162         if (enable)
163                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164         else
165                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167         return count;
168 }
169
170 static const struct file_operations vendor_diag_fops = {
171         .open           = simple_open,
172         .read           = vendor_diag_read,
173         .write          = vendor_diag_write,
174         .llseek         = default_llseek,
175 };
176
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180                             &dut_mode_fops);
181
182         if (hdev->set_diag)
183                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184                                     &vendor_diag_fops);
185 }
186
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189         BT_DBG("%s %ld", req->hdev->name, opt);
190
191         /* Reset device */
192         set_bit(HCI_RESET, &req->hdev->flags);
193         hci_req_add(req, HCI_OP_RESET, 0, NULL);
194         return 0;
195 }
196
197 static void bredr_init(struct hci_request *req)
198 {
199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201         /* Read Local Supported Features */
202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203
204         /* Read Local Version */
205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206
207         /* Read BD Address */
208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210
211 static void amp_init1(struct hci_request *req)
212 {
213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214
215         /* Read Local Version */
216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218         /* Read Local Supported Commands */
219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
221         /* Read Local AMP Info */
222         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223
224         /* Read Data Blk size */
225         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226
227         /* Read Flow Control Mode */
228         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
230         /* Read Location Data */
231         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233
234 static int amp_init2(struct hci_request *req)
235 {
236         /* Read Local Supported Features. Not all AMP controllers
237          * support this so it's placed conditionally in the second
238          * stage init.
239          */
240         if (req->hdev->commands[14] & 0x20)
241                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242
243         return 0;
244 }
245
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248         struct hci_dev *hdev = req->hdev;
249
250         BT_DBG("%s %ld", hdev->name, opt);
251
252         /* Reset */
253         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254                 hci_reset_req(req, 0);
255
256         switch (hdev->dev_type) {
257         case HCI_PRIMARY:
258                 bredr_init(req);
259                 break;
260         case HCI_AMP:
261                 amp_init1(req);
262                 break;
263         default:
264                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265                 break;
266         }
267
268         return 0;
269 }
270
271 static void bredr_setup(struct hci_request *req)
272 {
273         __le16 param;
274         __u8 flt_type;
275
276         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278
279         /* Read Class of Device */
280         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281
282         /* Read Local Name */
283         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284
285         /* Read Voice Setting */
286         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287
288         /* Read Number of Supported IAC */
289         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
291         /* Read Current IAC LAP */
292         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
294         /* Clear Event Filters */
295         flt_type = HCI_FLT_CLEAR_ALL;
296         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297
298         /* Connection accept timeout ~20 secs */
299         param = cpu_to_le16(0x7d00);
300         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302
303 static void le_setup(struct hci_request *req)
304 {
305         struct hci_dev *hdev = req->hdev;
306
307         /* Read LE Buffer Size */
308         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309
310         /* Read LE Local Supported Features */
311         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312
313         /* Read LE Supported States */
314         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
316         /* LE-only controllers have LE implicitly enabled */
317         if (!lmp_bredr_capable(hdev))
318                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323         struct hci_dev *hdev = req->hdev;
324
325         /* The second byte is 0xff instead of 0x9f (two reserved bits
326          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327          * command otherwise.
328          */
329         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332          * any event mask for pre 1.2 devices.
333          */
334         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335                 return;
336
337         if (lmp_bredr_capable(hdev)) {
338                 events[4] |= 0x01; /* Flow Specification Complete */
339         } else {
340                 /* Use a different default for LE-only devices */
341                 memset(events, 0, sizeof(events));
342                 events[1] |= 0x20; /* Command Complete */
343                 events[1] |= 0x40; /* Command Status */
344                 events[1] |= 0x80; /* Hardware Error */
345
346                 /* If the controller supports the Disconnect command, enable
347                  * the corresponding event. In addition enable packet flow
348                  * control related events.
349                  */
350                 if (hdev->commands[0] & 0x20) {
351                         events[0] |= 0x10; /* Disconnection Complete */
352                         events[2] |= 0x04; /* Number of Completed Packets */
353                         events[3] |= 0x02; /* Data Buffer Overflow */
354                 }
355
356                 /* If the controller supports the Read Remote Version
357                  * Information command, enable the corresponding event.
358                  */
359                 if (hdev->commands[2] & 0x80)
360                         events[1] |= 0x08; /* Read Remote Version Information
361                                             * Complete
362                                             */
363
364                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365                         events[0] |= 0x80; /* Encryption Change */
366                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
367                 }
368         }
369
370         if (lmp_inq_rssi_capable(hdev) ||
371             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372                 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
374         if (lmp_ext_feat_capable(hdev))
375                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377         if (lmp_esco_capable(hdev)) {
378                 events[5] |= 0x08; /* Synchronous Connection Complete */
379                 events[5] |= 0x10; /* Synchronous Connection Changed */
380         }
381
382         if (lmp_sniffsubr_capable(hdev))
383                 events[5] |= 0x20; /* Sniff Subrating */
384
385         if (lmp_pause_enc_capable(hdev))
386                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388         if (lmp_ext_inq_capable(hdev))
389                 events[5] |= 0x40; /* Extended Inquiry Result */
390
391         if (lmp_no_flush_capable(hdev))
392                 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394         if (lmp_lsto_capable(hdev))
395                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397         if (lmp_ssp_capable(hdev)) {
398                 events[6] |= 0x01;      /* IO Capability Request */
399                 events[6] |= 0x02;      /* IO Capability Response */
400                 events[6] |= 0x04;      /* User Confirmation Request */
401                 events[6] |= 0x08;      /* User Passkey Request */
402                 events[6] |= 0x10;      /* Remote OOB Data Request */
403                 events[6] |= 0x20;      /* Simple Pairing Complete */
404                 events[7] |= 0x04;      /* User Passkey Notification */
405                 events[7] |= 0x08;      /* Keypress Notification */
406                 events[7] |= 0x10;      /* Remote Host Supported
407                                          * Features Notification
408                                          */
409         }
410
411         if (lmp_le_capable(hdev))
412                 events[7] |= 0x20;      /* LE Meta-Event */
413
414         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419         struct hci_dev *hdev = req->hdev;
420
421         if (hdev->dev_type == HCI_AMP)
422                 return amp_init2(req);
423
424         if (lmp_bredr_capable(hdev))
425                 bredr_setup(req);
426         else
427                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428
429         if (lmp_le_capable(hdev))
430                 le_setup(req);
431
432         /* All Bluetooth 1.2 and later controllers should support the
433          * HCI command for reading the local supported commands.
434          *
435          * Unfortunately some controllers indicate Bluetooth 1.2 support,
436          * but do not have support for this command. If that is the case,
437          * the driver can quirk the behavior and skip reading the local
438          * supported commands.
439          */
440         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443
444         if (lmp_ssp_capable(hdev)) {
445                 /* When SSP is available, then the host features page
446                  * should also be available as well. However some
447                  * controllers list the max_page as 0 as long as SSP
448                  * has not been enabled. To achieve proper debugging
449                  * output, force the minimum max_page to 1 at least.
450                  */
451                 hdev->max_page = 0x01;
452
453                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454                         u8 mode = 0x01;
455
456                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457                                     sizeof(mode), &mode);
458                 } else {
459                         struct hci_cp_write_eir cp;
460
461                         memset(hdev->eir, 0, sizeof(hdev->eir));
462                         memset(&cp, 0, sizeof(cp));
463
464                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465                 }
466         }
467
468         if (lmp_inq_rssi_capable(hdev) ||
469             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470                 u8 mode;
471
472                 /* If Extended Inquiry Result events are supported, then
473                  * they are clearly preferred over Inquiry Result with RSSI
474                  * events.
475                  */
476                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479         }
480
481         if (lmp_inq_tx_pwr_capable(hdev))
482                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483
484         if (lmp_ext_feat_capable(hdev)) {
485                 struct hci_cp_read_local_ext_features cp;
486
487                 cp.page = 0x01;
488                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489                             sizeof(cp), &cp);
490         }
491
492         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493                 u8 enable = 1;
494                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495                             &enable);
496         }
497
498         return 0;
499 }
500
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503         struct hci_dev *hdev = req->hdev;
504         struct hci_cp_write_def_link_policy cp;
505         u16 link_policy = 0;
506
507         if (lmp_rswitch_capable(hdev))
508                 link_policy |= HCI_LP_RSWITCH;
509         if (lmp_hold_capable(hdev))
510                 link_policy |= HCI_LP_HOLD;
511         if (lmp_sniff_capable(hdev))
512                 link_policy |= HCI_LP_SNIFF;
513         if (lmp_park_capable(hdev))
514                 link_policy |= HCI_LP_PARK;
515
516         cp.policy = cpu_to_le16(link_policy);
517         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519
520 static void hci_set_le_support(struct hci_request *req)
521 {
522         struct hci_dev *hdev = req->hdev;
523         struct hci_cp_write_le_host_supported cp;
524
525         /* LE-only devices do not support explicit enablement */
526         if (!lmp_bredr_capable(hdev))
527                 return;
528
529         memset(&cp, 0, sizeof(cp));
530
531         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532                 cp.le = 0x01;
533                 cp.simul = 0x00;
534         }
535
536         if (cp.le != lmp_host_le_capable(hdev))
537                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538                             &cp);
539 }
540
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543         struct hci_dev *hdev = req->hdev;
544         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545         bool changed = false;
546
547         /* If Connectionless Slave Broadcast master role is supported
548          * enable all necessary events for it.
549          */
550         if (lmp_csb_master_capable(hdev)) {
551                 events[1] |= 0x40;      /* Triggered Clock Capture */
552                 events[1] |= 0x80;      /* Synchronization Train Complete */
553                 events[2] |= 0x10;      /* Slave Page Response Timeout */
554                 events[2] |= 0x20;      /* CSB Channel Map Change */
555                 changed = true;
556         }
557
558         /* If Connectionless Slave Broadcast slave role is supported
559          * enable all necessary events for it.
560          */
561         if (lmp_csb_slave_capable(hdev)) {
562                 events[2] |= 0x01;      /* Synchronization Train Received */
563                 events[2] |= 0x02;      /* CSB Receive */
564                 events[2] |= 0x04;      /* CSB Timeout */
565                 events[2] |= 0x08;      /* Truncated Page Complete */
566                 changed = true;
567         }
568
569         /* Enable Authenticated Payload Timeout Expired event if supported */
570         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571                 events[2] |= 0x80;
572                 changed = true;
573         }
574
575         /* Some Broadcom based controllers indicate support for Set Event
576          * Mask Page 2 command, but then actually do not support it. Since
577          * the default value is all bits set to zero, the command is only
578          * required if the event mask has to be changed. In case no change
579          * to the event mask is needed, skip this command.
580          */
581         if (changed)
582                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583                             sizeof(events), events);
584 }
585
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588         struct hci_dev *hdev = req->hdev;
589         u8 p;
590
591         hci_setup_event_mask(req);
592
593         if (hdev->commands[6] & 0x20 &&
594             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595                 struct hci_cp_read_stored_link_key cp;
596
597                 bacpy(&cp.bdaddr, BDADDR_ANY);
598                 cp.read_all = 0x01;
599                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600         }
601
602         if (hdev->commands[5] & 0x10)
603                 hci_setup_link_policy(req);
604
605         if (hdev->commands[8] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
608         if (hdev->commands[18] & 0x04 &&
609             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612         /* Some older Broadcom based Bluetooth 1.2 controllers do not
613          * support the Read Page Scan Type command. Check support for
614          * this command in the bit mask of supported commands.
615          */
616         if (hdev->commands[13] & 0x01)
617                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619         if (lmp_le_capable(hdev)) {
620                 u8 events[8];
621
622                 memset(events, 0, sizeof(events));
623
624                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625                         events[0] |= 0x10;      /* LE Long Term Key Request */
626
627                 /* If controller supports the Connection Parameters Request
628                  * Link Layer Procedure, enable the corresponding event.
629                  */
630                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631                         events[0] |= 0x20;      /* LE Remote Connection
632                                                  * Parameter Request
633                                                  */
634
635                 /* If the controller supports the Data Length Extension
636                  * feature, enable the corresponding event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639                         events[0] |= 0x40;      /* LE Data Length Change */
640
641                 /* If the controller supports LL Privacy feature, enable
642                  * the corresponding event.
643                  */
644                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645                         events[1] |= 0x02;      /* LE Enhanced Connection
646                                                  * Complete
647                                                  */
648
649                 /* If the controller supports Extended Scanner Filter
650                  * Policies, enable the correspondig event.
651                  */
652                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653                         events[1] |= 0x04;      /* LE Direct Advertising
654                                                  * Report
655                                                  */
656
657                 /* If the controller supports Channel Selection Algorithm #2
658                  * feature, enable the corresponding event.
659                  */
660                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661                         events[2] |= 0x08;      /* LE Channel Selection
662                                                  * Algorithm
663                                                  */
664
665                 /* If the controller supports the LE Set Scan Enable command,
666                  * enable the corresponding advertising report event.
667                  */
668                 if (hdev->commands[26] & 0x08)
669                         events[0] |= 0x02;      /* LE Advertising Report */
670
671                 /* If the controller supports the LE Create Connection
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[26] & 0x10)
675                         events[0] |= 0x01;      /* LE Connection Complete */
676
677                 /* If the controller supports the LE Connection Update
678                  * command, enable the corresponding event.
679                  */
680                 if (hdev->commands[27] & 0x04)
681                         events[0] |= 0x04;      /* LE Connection Update
682                                                  * Complete
683                                                  */
684
685                 /* If the controller supports the LE Read Remote Used Features
686                  * command, enable the corresponding event.
687                  */
688                 if (hdev->commands[27] & 0x20)
689                         events[0] |= 0x08;      /* LE Read Remote Used
690                                                  * Features Complete
691                                                  */
692
693                 /* If the controller supports the LE Read Local P-256
694                  * Public Key command, enable the corresponding event.
695                  */
696                 if (hdev->commands[34] & 0x02)
697                         events[0] |= 0x80;      /* LE Read Local P-256
698                                                  * Public Key Complete
699                                                  */
700
701                 /* If the controller supports the LE Generate DHKey
702                  * command, enable the corresponding event.
703                  */
704                 if (hdev->commands[34] & 0x04)
705                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
706
707                 /* If the controller supports the LE Set Default PHY or
708                  * LE Set PHY commands, enable the corresponding event.
709                  */
710                 if (hdev->commands[35] & (0x20 | 0x40))
711                         events[1] |= 0x08;        /* LE PHY Update Complete */
712
713                 /* If the controller supports LE Set Extended Scan Parameters
714                  * and LE Set Extended Scan Enable commands, enable the
715                  * corresponding event.
716                  */
717                 if (use_ext_scan(hdev))
718                         events[1] |= 0x10;      /* LE Extended Advertising
719                                                  * Report
720                                                  */
721
722                 /* If the controller supports the LE Extended Advertising
723                  * command, enable the corresponding event.
724                  */
725                 if (ext_adv_capable(hdev))
726                         events[2] |= 0x02;      /* LE Advertising Set
727                                                  * Terminated
728                                                  */
729
730                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731                             events);
732
733                 /* Read LE Advertising Channel TX Power */
734                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735                         /* HCI TS spec forbids mixing of legacy and extended
736                          * advertising commands wherein READ_ADV_TX_POWER is
737                          * also included. So do not call it if extended adv
738                          * is supported otherwise controller will return
739                          * COMMAND_DISALLOWED for extended commands.
740                          */
741                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742                 }
743
744                 if (hdev->commands[38] & 0x80) {
745                         /* Read LE Min/Max Tx Power*/
746                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
747                                     0, NULL);
748                 }
749
750                 if (hdev->commands[26] & 0x40) {
751                         /* Read LE White List Size */
752                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
753                                     0, NULL);
754                 }
755
756                 if (hdev->commands[26] & 0x80) {
757                         /* Clear LE White List */
758                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
759                 }
760
761                 if (hdev->commands[34] & 0x40) {
762                         /* Read LE Resolving List Size */
763                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
764                                     0, NULL);
765                 }
766
767                 if (hdev->commands[34] & 0x20) {
768                         /* Clear LE Resolving List */
769                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
770                 }
771
772                 if (hdev->commands[35] & 0x04) {
773                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
774
775                         /* Set RPA timeout */
776                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
777                                     &rpa_timeout);
778                 }
779
780                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
781                         /* Read LE Maximum Data Length */
782                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
783
784                         /* Read LE Suggested Default Data Length */
785                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
786                 }
787
788                 if (ext_adv_capable(hdev)) {
789                         /* Read LE Number of Supported Advertising Sets */
790                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
791                                     0, NULL);
792                 }
793
794                 hci_set_le_support(req);
795         }
796
797         /* Read features beyond page 1 if available */
798         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
799                 struct hci_cp_read_local_ext_features cp;
800
801                 cp.page = p;
802                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
803                             sizeof(cp), &cp);
804         }
805
806         return 0;
807 }
808
809 static int hci_init4_req(struct hci_request *req, unsigned long opt)
810 {
811         struct hci_dev *hdev = req->hdev;
812
813         /* Some Broadcom based Bluetooth controllers do not support the
814          * Delete Stored Link Key command. They are clearly indicating its
815          * absence in the bit mask of supported commands.
816          *
817          * Check the supported commands and only if the command is marked
818          * as supported send it. If not supported assume that the controller
819          * does not have actual support for stored link keys which makes this
820          * command redundant anyway.
821          *
822          * Some controllers indicate that they support handling deleting
823          * stored link keys, but they don't. The quirk lets a driver
824          * just disable this command.
825          */
826         if (hdev->commands[6] & 0x80 &&
827             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
828                 struct hci_cp_delete_stored_link_key cp;
829
830                 bacpy(&cp.bdaddr, BDADDR_ANY);
831                 cp.delete_all = 0x01;
832                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
833                             sizeof(cp), &cp);
834         }
835
836         /* Set event mask page 2 if the HCI command for it is supported */
837         if (hdev->commands[22] & 0x04)
838                 hci_set_event_mask_page_2(req);
839
840         /* Read local codec list if the HCI command is supported */
841         if (hdev->commands[29] & 0x20)
842                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
843
844         /* Read local pairing options if the HCI command is supported */
845         if (hdev->commands[41] & 0x08)
846                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
847
848         /* Get MWS transport configuration if the HCI command is supported */
849         if (hdev->commands[30] & 0x08)
850                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
851
852         /* Check for Synchronization Train support */
853         if (lmp_sync_train_capable(hdev))
854                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
855
856         /* Enable Secure Connections if supported and configured */
857         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
858             bredr_sc_enabled(hdev)) {
859                 u8 support = 0x01;
860
861                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
862                             sizeof(support), &support);
863         }
864
865         /* Set erroneous data reporting if supported to the wideband speech
866          * setting value
867          */
868         if (hdev->commands[18] & 0x08 &&
869             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
870                 bool enabled = hci_dev_test_flag(hdev,
871                                                  HCI_WIDEBAND_SPEECH_ENABLED);
872
873                 if (enabled !=
874                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
875                         struct hci_cp_write_def_err_data_reporting cp;
876
877                         cp.err_data_reporting = enabled ?
878                                                 ERR_DATA_REPORTING_ENABLED :
879                                                 ERR_DATA_REPORTING_DISABLED;
880
881                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
882                                     sizeof(cp), &cp);
883                 }
884         }
885
886         /* Set Suggested Default Data Length to maximum if supported */
887         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
888                 struct hci_cp_le_write_def_data_len cp;
889
890                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
891                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
892                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
893         }
894
895         /* Set Default PHY parameters if command is supported */
896         if (hdev->commands[35] & 0x20) {
897                 struct hci_cp_le_set_default_phy cp;
898
899                 cp.all_phys = 0x00;
900                 cp.tx_phys = hdev->le_tx_def_phys;
901                 cp.rx_phys = hdev->le_rx_def_phys;
902
903                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
904         }
905
906         return 0;
907 }
908
909 static int __hci_init(struct hci_dev *hdev)
910 {
911         int err;
912
913         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
914         if (err < 0)
915                 return err;
916
917         if (hci_dev_test_flag(hdev, HCI_SETUP))
918                 hci_debugfs_create_basic(hdev);
919
920         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
921         if (err < 0)
922                 return err;
923
924         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
925          * BR/EDR/LE type controllers. AMP controllers only need the
926          * first two stages of init.
927          */
928         if (hdev->dev_type != HCI_PRIMARY)
929                 return 0;
930
931         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
932         if (err < 0)
933                 return err;
934
935         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
936         if (err < 0)
937                 return err;
938
939         /* This function is only called when the controller is actually in
940          * configured state. When the controller is marked as unconfigured,
941          * this initialization procedure is not run.
942          *
943          * It means that it is possible that a controller runs through its
944          * setup phase and then discovers missing settings. If that is the
945          * case, then this function will not be called. It then will only
946          * be called during the config phase.
947          *
948          * So only when in setup phase or config phase, create the debugfs
949          * entries and register the SMP channels.
950          */
951         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952             !hci_dev_test_flag(hdev, HCI_CONFIG))
953                 return 0;
954
955         hci_debugfs_create_common(hdev);
956
957         if (lmp_bredr_capable(hdev))
958                 hci_debugfs_create_bredr(hdev);
959
960         if (lmp_le_capable(hdev))
961                 hci_debugfs_create_le(hdev);
962
963         return 0;
964 }
965
966 static int hci_init0_req(struct hci_request *req, unsigned long opt)
967 {
968         struct hci_dev *hdev = req->hdev;
969
970         BT_DBG("%s %ld", hdev->name, opt);
971
972         /* Reset */
973         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974                 hci_reset_req(req, 0);
975
976         /* Read Local Version */
977         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
978
979         /* Read BD Address */
980         if (hdev->set_bdaddr)
981                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
982
983         return 0;
984 }
985
986 static int __hci_unconf_init(struct hci_dev *hdev)
987 {
988         int err;
989
990         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
991                 return 0;
992
993         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
994         if (err < 0)
995                 return err;
996
997         if (hci_dev_test_flag(hdev, HCI_SETUP))
998                 hci_debugfs_create_basic(hdev);
999
1000         return 0;
1001 }
1002
1003 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1004 {
1005         __u8 scan = opt;
1006
1007         BT_DBG("%s %x", req->hdev->name, scan);
1008
1009         /* Inquiry and Page scans */
1010         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1011         return 0;
1012 }
1013
1014 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1015 {
1016         __u8 auth = opt;
1017
1018         BT_DBG("%s %x", req->hdev->name, auth);
1019
1020         /* Authentication */
1021         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1022         return 0;
1023 }
1024
1025 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1026 {
1027         __u8 encrypt = opt;
1028
1029         BT_DBG("%s %x", req->hdev->name, encrypt);
1030
1031         /* Encryption */
1032         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1033         return 0;
1034 }
1035
1036 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1037 {
1038         __le16 policy = cpu_to_le16(opt);
1039
1040         BT_DBG("%s %x", req->hdev->name, policy);
1041
1042         /* Default link policy */
1043         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1044         return 0;
1045 }
1046
1047 /* Get HCI device by index.
1048  * Device is held on return. */
1049 struct hci_dev *hci_dev_get(int index)
1050 {
1051         struct hci_dev *hdev = NULL, *d;
1052
1053         BT_DBG("%d", index);
1054
1055         if (index < 0)
1056                 return NULL;
1057
1058         read_lock(&hci_dev_list_lock);
1059         list_for_each_entry(d, &hci_dev_list, list) {
1060                 if (d->id == index) {
1061                         hdev = hci_dev_hold(d);
1062                         break;
1063                 }
1064         }
1065         read_unlock(&hci_dev_list_lock);
1066         return hdev;
1067 }
1068
1069 /* ---- Inquiry support ---- */
1070
1071 bool hci_discovery_active(struct hci_dev *hdev)
1072 {
1073         struct discovery_state *discov = &hdev->discovery;
1074
1075         switch (discov->state) {
1076         case DISCOVERY_FINDING:
1077         case DISCOVERY_RESOLVING:
1078                 return true;
1079
1080         default:
1081                 return false;
1082         }
1083 }
1084
1085 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1086 {
1087         int old_state = hdev->discovery.state;
1088
1089         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1090
1091         if (old_state == state)
1092                 return;
1093
1094         hdev->discovery.state = state;
1095
1096         switch (state) {
1097         case DISCOVERY_STOPPED:
1098                 hci_update_background_scan(hdev);
1099
1100                 if (old_state != DISCOVERY_STARTING)
1101                         mgmt_discovering(hdev, 0);
1102                 break;
1103         case DISCOVERY_STARTING:
1104                 break;
1105         case DISCOVERY_FINDING:
1106                 mgmt_discovering(hdev, 1);
1107                 break;
1108         case DISCOVERY_RESOLVING:
1109                 break;
1110         case DISCOVERY_STOPPING:
1111                 break;
1112         }
1113 }
1114
1115 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1116 {
1117         struct discovery_state *cache = &hdev->discovery;
1118         struct inquiry_entry *p, *n;
1119
1120         list_for_each_entry_safe(p, n, &cache->all, all) {
1121                 list_del(&p->all);
1122                 kfree(p);
1123         }
1124
1125         INIT_LIST_HEAD(&cache->unknown);
1126         INIT_LIST_HEAD(&cache->resolve);
1127 }
1128
1129 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1130                                                bdaddr_t *bdaddr)
1131 {
1132         struct discovery_state *cache = &hdev->discovery;
1133         struct inquiry_entry *e;
1134
1135         BT_DBG("cache %p, %pMR", cache, bdaddr);
1136
1137         list_for_each_entry(e, &cache->all, all) {
1138                 if (!bacmp(&e->data.bdaddr, bdaddr))
1139                         return e;
1140         }
1141
1142         return NULL;
1143 }
1144
1145 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1146                                                        bdaddr_t *bdaddr)
1147 {
1148         struct discovery_state *cache = &hdev->discovery;
1149         struct inquiry_entry *e;
1150
1151         BT_DBG("cache %p, %pMR", cache, bdaddr);
1152
1153         list_for_each_entry(e, &cache->unknown, list) {
1154                 if (!bacmp(&e->data.bdaddr, bdaddr))
1155                         return e;
1156         }
1157
1158         return NULL;
1159 }
1160
1161 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1162                                                        bdaddr_t *bdaddr,
1163                                                        int state)
1164 {
1165         struct discovery_state *cache = &hdev->discovery;
1166         struct inquiry_entry *e;
1167
1168         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1169
1170         list_for_each_entry(e, &cache->resolve, list) {
1171                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1172                         return e;
1173                 if (!bacmp(&e->data.bdaddr, bdaddr))
1174                         return e;
1175         }
1176
1177         return NULL;
1178 }
1179
1180 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1181                                       struct inquiry_entry *ie)
1182 {
1183         struct discovery_state *cache = &hdev->discovery;
1184         struct list_head *pos = &cache->resolve;
1185         struct inquiry_entry *p;
1186
1187         list_del(&ie->list);
1188
1189         list_for_each_entry(p, &cache->resolve, list) {
1190                 if (p->name_state != NAME_PENDING &&
1191                     abs(p->data.rssi) >= abs(ie->data.rssi))
1192                         break;
1193                 pos = &p->list;
1194         }
1195
1196         list_add(&ie->list, pos);
1197 }
1198
1199 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1200                              bool name_known)
1201 {
1202         struct discovery_state *cache = &hdev->discovery;
1203         struct inquiry_entry *ie;
1204         u32 flags = 0;
1205
1206         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1207
1208         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1209
1210         if (!data->ssp_mode)
1211                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1212
1213         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1214         if (ie) {
1215                 if (!ie->data.ssp_mode)
1216                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1217
1218                 if (ie->name_state == NAME_NEEDED &&
1219                     data->rssi != ie->data.rssi) {
1220                         ie->data.rssi = data->rssi;
1221                         hci_inquiry_cache_update_resolve(hdev, ie);
1222                 }
1223
1224                 goto update;
1225         }
1226
1227         /* Entry not in the cache. Add new one. */
1228         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1229         if (!ie) {
1230                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1231                 goto done;
1232         }
1233
1234         list_add(&ie->all, &cache->all);
1235
1236         if (name_known) {
1237                 ie->name_state = NAME_KNOWN;
1238         } else {
1239                 ie->name_state = NAME_NOT_KNOWN;
1240                 list_add(&ie->list, &cache->unknown);
1241         }
1242
1243 update:
1244         if (name_known && ie->name_state != NAME_KNOWN &&
1245             ie->name_state != NAME_PENDING) {
1246                 ie->name_state = NAME_KNOWN;
1247                 list_del(&ie->list);
1248         }
1249
1250         memcpy(&ie->data, data, sizeof(*data));
1251         ie->timestamp = jiffies;
1252         cache->timestamp = jiffies;
1253
1254         if (ie->name_state == NAME_NOT_KNOWN)
1255                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1256
1257 done:
1258         return flags;
1259 }
1260
1261 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1262 {
1263         struct discovery_state *cache = &hdev->discovery;
1264         struct inquiry_info *info = (struct inquiry_info *) buf;
1265         struct inquiry_entry *e;
1266         int copied = 0;
1267
1268         list_for_each_entry(e, &cache->all, all) {
1269                 struct inquiry_data *data = &e->data;
1270
1271                 if (copied >= num)
1272                         break;
1273
1274                 bacpy(&info->bdaddr, &data->bdaddr);
1275                 info->pscan_rep_mode    = data->pscan_rep_mode;
1276                 info->pscan_period_mode = data->pscan_period_mode;
1277                 info->pscan_mode        = data->pscan_mode;
1278                 memcpy(info->dev_class, data->dev_class, 3);
1279                 info->clock_offset      = data->clock_offset;
1280
1281                 info++;
1282                 copied++;
1283         }
1284
1285         BT_DBG("cache %p, copied %d", cache, copied);
1286         return copied;
1287 }
1288
1289 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1290 {
1291         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1292         struct hci_dev *hdev = req->hdev;
1293         struct hci_cp_inquiry cp;
1294
1295         BT_DBG("%s", hdev->name);
1296
1297         if (test_bit(HCI_INQUIRY, &hdev->flags))
1298                 return 0;
1299
1300         /* Start Inquiry */
1301         memcpy(&cp.lap, &ir->lap, 3);
1302         cp.length  = ir->length;
1303         cp.num_rsp = ir->num_rsp;
1304         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1305
1306         return 0;
1307 }
1308
1309 int hci_inquiry(void __user *arg)
1310 {
1311         __u8 __user *ptr = arg;
1312         struct hci_inquiry_req ir;
1313         struct hci_dev *hdev;
1314         int err = 0, do_inquiry = 0, max_rsp;
1315         long timeo;
1316         __u8 *buf;
1317
1318         if (copy_from_user(&ir, ptr, sizeof(ir)))
1319                 return -EFAULT;
1320
1321         hdev = hci_dev_get(ir.dev_id);
1322         if (!hdev)
1323                 return -ENODEV;
1324
1325         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1326                 err = -EBUSY;
1327                 goto done;
1328         }
1329
1330         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1331                 err = -EOPNOTSUPP;
1332                 goto done;
1333         }
1334
1335         if (hdev->dev_type != HCI_PRIMARY) {
1336                 err = -EOPNOTSUPP;
1337                 goto done;
1338         }
1339
1340         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1341                 err = -EOPNOTSUPP;
1342                 goto done;
1343         }
1344
1345         hci_dev_lock(hdev);
1346         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1347             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1348                 hci_inquiry_cache_flush(hdev);
1349                 do_inquiry = 1;
1350         }
1351         hci_dev_unlock(hdev);
1352
1353         timeo = ir.length * msecs_to_jiffies(2000);
1354
1355         if (do_inquiry) {
1356                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1357                                    timeo, NULL);
1358                 if (err < 0)
1359                         goto done;
1360
1361                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1362                  * cleared). If it is interrupted by a signal, return -EINTR.
1363                  */
1364                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1365                                 TASK_INTERRUPTIBLE)) {
1366                         err = -EINTR;
1367                         goto done;
1368                 }
1369         }
1370
1371         /* for unlimited number of responses we will use buffer with
1372          * 255 entries
1373          */
1374         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1375
1376         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1377          * copy it to the user space.
1378          */
1379         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1380         if (!buf) {
1381                 err = -ENOMEM;
1382                 goto done;
1383         }
1384
1385         hci_dev_lock(hdev);
1386         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1387         hci_dev_unlock(hdev);
1388
1389         BT_DBG("num_rsp %d", ir.num_rsp);
1390
1391         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1392                 ptr += sizeof(ir);
1393                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1394                                  ir.num_rsp))
1395                         err = -EFAULT;
1396         } else
1397                 err = -EFAULT;
1398
1399         kfree(buf);
1400
1401 done:
1402         hci_dev_put(hdev);
1403         return err;
1404 }
1405
1406 /**
1407  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1408  *                                     (BD_ADDR) for a HCI device from
1409  *                                     a firmware node property.
1410  * @hdev:       The HCI device
1411  *
1412  * Search the firmware node for 'local-bd-address'.
1413  *
1414  * All-zero BD addresses are rejected, because those could be properties
1415  * that exist in the firmware tables, but were not updated by the firmware. For
1416  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1417  */
1418 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1419 {
1420         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1421         bdaddr_t ba;
1422         int ret;
1423
1424         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1425                                             (u8 *)&ba, sizeof(ba));
1426         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1427                 return;
1428
1429         bacpy(&hdev->public_addr, &ba);
1430 }
1431
1432 static int hci_dev_do_open(struct hci_dev *hdev)
1433 {
1434         int ret = 0;
1435
1436         BT_DBG("%s %p", hdev->name, hdev);
1437
1438         hci_req_sync_lock(hdev);
1439
1440         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1441                 ret = -ENODEV;
1442                 goto done;
1443         }
1444
1445         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1446             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1447                 /* Check for rfkill but allow the HCI setup stage to
1448                  * proceed (which in itself doesn't cause any RF activity).
1449                  */
1450                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1451                         ret = -ERFKILL;
1452                         goto done;
1453                 }
1454
1455                 /* Check for valid public address or a configured static
1456                  * random adddress, but let the HCI setup proceed to
1457                  * be able to determine if there is a public address
1458                  * or not.
1459                  *
1460                  * In case of user channel usage, it is not important
1461                  * if a public address or static random address is
1462                  * available.
1463                  *
1464                  * This check is only valid for BR/EDR controllers
1465                  * since AMP controllers do not have an address.
1466                  */
1467                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1468                     hdev->dev_type == HCI_PRIMARY &&
1469                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1470                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1471                         ret = -EADDRNOTAVAIL;
1472                         goto done;
1473                 }
1474         }
1475
1476         if (test_bit(HCI_UP, &hdev->flags)) {
1477                 ret = -EALREADY;
1478                 goto done;
1479         }
1480
1481         if (hdev->open(hdev)) {
1482                 ret = -EIO;
1483                 goto done;
1484         }
1485
1486         set_bit(HCI_RUNNING, &hdev->flags);
1487         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1488
1489         atomic_set(&hdev->cmd_cnt, 1);
1490         set_bit(HCI_INIT, &hdev->flags);
1491
1492         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1493             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1494                 bool invalid_bdaddr;
1495
1496                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1497
1498                 if (hdev->setup)
1499                         ret = hdev->setup(hdev);
1500
1501                 /* The transport driver can set the quirk to mark the
1502                  * BD_ADDR invalid before creating the HCI device or in
1503                  * its setup callback.
1504                  */
1505                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1506                                           &hdev->quirks);
1507
1508                 if (ret)
1509                         goto setup_failed;
1510
1511                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1512                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1513                                 hci_dev_get_bd_addr_from_property(hdev);
1514
1515                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1516                             hdev->set_bdaddr) {
1517                                 ret = hdev->set_bdaddr(hdev,
1518                                                        &hdev->public_addr);
1519
1520                                 /* If setting of the BD_ADDR from the device
1521                                  * property succeeds, then treat the address
1522                                  * as valid even if the invalid BD_ADDR
1523                                  * quirk indicates otherwise.
1524                                  */
1525                                 if (!ret)
1526                                         invalid_bdaddr = false;
1527                         }
1528                 }
1529
1530 setup_failed:
1531                 /* The transport driver can set these quirks before
1532                  * creating the HCI device or in its setup callback.
1533                  *
1534                  * For the invalid BD_ADDR quirk it is possible that
1535                  * it becomes a valid address if the bootloader does
1536                  * provide it (see above).
1537                  *
1538                  * In case any of them is set, the controller has to
1539                  * start up as unconfigured.
1540                  */
1541                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1542                     invalid_bdaddr)
1543                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1544
1545                 /* For an unconfigured controller it is required to
1546                  * read at least the version information provided by
1547                  * the Read Local Version Information command.
1548                  *
1549                  * If the set_bdaddr driver callback is provided, then
1550                  * also the original Bluetooth public device address
1551                  * will be read using the Read BD Address command.
1552                  */
1553                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1554                         ret = __hci_unconf_init(hdev);
1555         }
1556
1557         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1558                 /* If public address change is configured, ensure that
1559                  * the address gets programmed. If the driver does not
1560                  * support changing the public address, fail the power
1561                  * on procedure.
1562                  */
1563                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1564                     hdev->set_bdaddr)
1565                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1566                 else
1567                         ret = -EADDRNOTAVAIL;
1568         }
1569
1570         if (!ret) {
1571                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1572                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1573                         ret = __hci_init(hdev);
1574                         if (!ret && hdev->post_init)
1575                                 ret = hdev->post_init(hdev);
1576                 }
1577         }
1578
1579         /* If the HCI Reset command is clearing all diagnostic settings,
1580          * then they need to be reprogrammed after the init procedure
1581          * completed.
1582          */
1583         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1584             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1585             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1586                 ret = hdev->set_diag(hdev, true);
1587
1588         msft_do_open(hdev);
1589
1590         clear_bit(HCI_INIT, &hdev->flags);
1591
1592         if (!ret) {
1593                 hci_dev_hold(hdev);
1594                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1595                 hci_adv_instances_set_rpa_expired(hdev, true);
1596                 set_bit(HCI_UP, &hdev->flags);
1597                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1598                 hci_leds_update_powered(hdev, true);
1599                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1600                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1601                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1602                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1603                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1604                     hdev->dev_type == HCI_PRIMARY) {
1605                         ret = __hci_req_hci_power_on(hdev);
1606                         mgmt_power_on(hdev, ret);
1607                 }
1608         } else {
1609                 /* Init failed, cleanup */
1610                 flush_work(&hdev->tx_work);
1611                 flush_work(&hdev->cmd_work);
1612                 flush_work(&hdev->rx_work);
1613
1614                 skb_queue_purge(&hdev->cmd_q);
1615                 skb_queue_purge(&hdev->rx_q);
1616
1617                 if (hdev->flush)
1618                         hdev->flush(hdev);
1619
1620                 if (hdev->sent_cmd) {
1621                         kfree_skb(hdev->sent_cmd);
1622                         hdev->sent_cmd = NULL;
1623                 }
1624
1625                 clear_bit(HCI_RUNNING, &hdev->flags);
1626                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1627
1628                 hdev->close(hdev);
1629                 hdev->flags &= BIT(HCI_RAW);
1630         }
1631
1632 done:
1633         hci_req_sync_unlock(hdev);
1634         return ret;
1635 }
1636
1637 /* ---- HCI ioctl helpers ---- */
1638
1639 int hci_dev_open(__u16 dev)
1640 {
1641         struct hci_dev *hdev;
1642         int err;
1643
1644         hdev = hci_dev_get(dev);
1645         if (!hdev)
1646                 return -ENODEV;
1647
1648         /* Devices that are marked as unconfigured can only be powered
1649          * up as user channel. Trying to bring them up as normal devices
1650          * will result into a failure. Only user channel operation is
1651          * possible.
1652          *
1653          * When this function is called for a user channel, the flag
1654          * HCI_USER_CHANNEL will be set first before attempting to
1655          * open the device.
1656          */
1657         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1658             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1659                 err = -EOPNOTSUPP;
1660                 goto done;
1661         }
1662
1663         /* We need to ensure that no other power on/off work is pending
1664          * before proceeding to call hci_dev_do_open. This is
1665          * particularly important if the setup procedure has not yet
1666          * completed.
1667          */
1668         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1669                 cancel_delayed_work(&hdev->power_off);
1670
1671         /* After this call it is guaranteed that the setup procedure
1672          * has finished. This means that error conditions like RFKILL
1673          * or no valid public or static random address apply.
1674          */
1675         flush_workqueue(hdev->req_workqueue);
1676
1677         /* For controllers not using the management interface and that
1678          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1679          * so that pairing works for them. Once the management interface
1680          * is in use this bit will be cleared again and userspace has
1681          * to explicitly enable it.
1682          */
1683         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1684             !hci_dev_test_flag(hdev, HCI_MGMT))
1685                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1686
1687         err = hci_dev_do_open(hdev);
1688
1689 done:
1690         hci_dev_put(hdev);
1691         return err;
1692 }
1693
1694 /* This function requires the caller holds hdev->lock */
1695 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1696 {
1697         struct hci_conn_params *p;
1698
1699         list_for_each_entry(p, &hdev->le_conn_params, list) {
1700                 if (p->conn) {
1701                         hci_conn_drop(p->conn);
1702                         hci_conn_put(p->conn);
1703                         p->conn = NULL;
1704                 }
1705                 list_del_init(&p->action);
1706         }
1707
1708         BT_DBG("All LE pending actions cleared");
1709 }
1710
1711 int hci_dev_do_close(struct hci_dev *hdev)
1712 {
1713         bool auto_off;
1714
1715         BT_DBG("%s %p", hdev->name, hdev);
1716
1717         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1718             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1719             test_bit(HCI_UP, &hdev->flags)) {
1720                 /* Execute vendor specific shutdown routine */
1721                 if (hdev->shutdown)
1722                         hdev->shutdown(hdev);
1723         }
1724
1725         cancel_delayed_work(&hdev->power_off);
1726
1727         hci_request_cancel_all(hdev);
1728         hci_req_sync_lock(hdev);
1729
1730         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1731                 cancel_delayed_work_sync(&hdev->cmd_timer);
1732                 hci_req_sync_unlock(hdev);
1733                 return 0;
1734         }
1735
1736         hci_leds_update_powered(hdev, false);
1737
1738         /* Flush RX and TX works */
1739         flush_work(&hdev->tx_work);
1740         flush_work(&hdev->rx_work);
1741
1742         if (hdev->discov_timeout > 0) {
1743                 hdev->discov_timeout = 0;
1744                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1745                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1746         }
1747
1748         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1749                 cancel_delayed_work(&hdev->service_cache);
1750
1751         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1752                 struct adv_info *adv_instance;
1753
1754                 cancel_delayed_work_sync(&hdev->rpa_expired);
1755
1756                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1757                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1758         }
1759
1760         /* Avoid potential lockdep warnings from the *_flush() calls by
1761          * ensuring the workqueue is empty up front.
1762          */
1763         drain_workqueue(hdev->workqueue);
1764
1765         hci_dev_lock(hdev);
1766
1767         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1768
1769         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1770
1771         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1772             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1773             hci_dev_test_flag(hdev, HCI_MGMT))
1774                 __mgmt_power_off(hdev);
1775
1776         hci_inquiry_cache_flush(hdev);
1777         hci_pend_le_actions_clear(hdev);
1778         hci_conn_hash_flush(hdev);
1779         hci_dev_unlock(hdev);
1780
1781         smp_unregister(hdev);
1782
1783         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1784
1785         msft_do_close(hdev);
1786
1787         if (hdev->flush)
1788                 hdev->flush(hdev);
1789
1790         /* Reset device */
1791         skb_queue_purge(&hdev->cmd_q);
1792         atomic_set(&hdev->cmd_cnt, 1);
1793         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1794             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1795                 set_bit(HCI_INIT, &hdev->flags);
1796                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1797                 clear_bit(HCI_INIT, &hdev->flags);
1798         }
1799
1800         /* flush cmd  work */
1801         flush_work(&hdev->cmd_work);
1802
1803         /* Drop queues */
1804         skb_queue_purge(&hdev->rx_q);
1805         skb_queue_purge(&hdev->cmd_q);
1806         skb_queue_purge(&hdev->raw_q);
1807
1808         /* Drop last sent command */
1809         if (hdev->sent_cmd) {
1810                 cancel_delayed_work_sync(&hdev->cmd_timer);
1811                 kfree_skb(hdev->sent_cmd);
1812                 hdev->sent_cmd = NULL;
1813         }
1814
1815         clear_bit(HCI_RUNNING, &hdev->flags);
1816         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1817
1818         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1819                 wake_up(&hdev->suspend_wait_q);
1820
1821         /* After this point our queues are empty
1822          * and no tasks are scheduled. */
1823         hdev->close(hdev);
1824
1825         /* Clear flags */
1826         hdev->flags &= BIT(HCI_RAW);
1827         hci_dev_clear_volatile_flags(hdev);
1828
1829         /* Controller radio is available but is currently powered down */
1830         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1831
1832         memset(hdev->eir, 0, sizeof(hdev->eir));
1833         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1834         bacpy(&hdev->random_addr, BDADDR_ANY);
1835
1836         hci_req_sync_unlock(hdev);
1837
1838         hci_dev_put(hdev);
1839         return 0;
1840 }
1841
1842 int hci_dev_close(__u16 dev)
1843 {
1844         struct hci_dev *hdev;
1845         int err;
1846
1847         hdev = hci_dev_get(dev);
1848         if (!hdev)
1849                 return -ENODEV;
1850
1851         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1852                 err = -EBUSY;
1853                 goto done;
1854         }
1855
1856         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1857                 cancel_delayed_work(&hdev->power_off);
1858
1859         err = hci_dev_do_close(hdev);
1860
1861 done:
1862         hci_dev_put(hdev);
1863         return err;
1864 }
1865
1866 static int hci_dev_do_reset(struct hci_dev *hdev)
1867 {
1868         int ret;
1869
1870         BT_DBG("%s %p", hdev->name, hdev);
1871
1872         hci_req_sync_lock(hdev);
1873
1874         /* Drop queues */
1875         skb_queue_purge(&hdev->rx_q);
1876         skb_queue_purge(&hdev->cmd_q);
1877
1878         /* Avoid potential lockdep warnings from the *_flush() calls by
1879          * ensuring the workqueue is empty up front.
1880          */
1881         drain_workqueue(hdev->workqueue);
1882
1883         hci_dev_lock(hdev);
1884         hci_inquiry_cache_flush(hdev);
1885         hci_conn_hash_flush(hdev);
1886         hci_dev_unlock(hdev);
1887
1888         if (hdev->flush)
1889                 hdev->flush(hdev);
1890
1891         atomic_set(&hdev->cmd_cnt, 1);
1892         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1893
1894         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1895
1896         hci_req_sync_unlock(hdev);
1897         return ret;
1898 }
1899
1900 int hci_dev_reset(__u16 dev)
1901 {
1902         struct hci_dev *hdev;
1903         int err;
1904
1905         hdev = hci_dev_get(dev);
1906         if (!hdev)
1907                 return -ENODEV;
1908
1909         if (!test_bit(HCI_UP, &hdev->flags)) {
1910                 err = -ENETDOWN;
1911                 goto done;
1912         }
1913
1914         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1915                 err = -EBUSY;
1916                 goto done;
1917         }
1918
1919         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1920                 err = -EOPNOTSUPP;
1921                 goto done;
1922         }
1923
1924         err = hci_dev_do_reset(hdev);
1925
1926 done:
1927         hci_dev_put(hdev);
1928         return err;
1929 }
1930
1931 int hci_dev_reset_stat(__u16 dev)
1932 {
1933         struct hci_dev *hdev;
1934         int ret = 0;
1935
1936         hdev = hci_dev_get(dev);
1937         if (!hdev)
1938                 return -ENODEV;
1939
1940         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1941                 ret = -EBUSY;
1942                 goto done;
1943         }
1944
1945         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1946                 ret = -EOPNOTSUPP;
1947                 goto done;
1948         }
1949
1950         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1951
1952 done:
1953         hci_dev_put(hdev);
1954         return ret;
1955 }
1956
1957 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1958 {
1959         bool conn_changed, discov_changed;
1960
1961         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1962
1963         if ((scan & SCAN_PAGE))
1964                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1965                                                           HCI_CONNECTABLE);
1966         else
1967                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1968                                                            HCI_CONNECTABLE);
1969
1970         if ((scan & SCAN_INQUIRY)) {
1971                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1972                                                             HCI_DISCOVERABLE);
1973         } else {
1974                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1975                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1976                                                              HCI_DISCOVERABLE);
1977         }
1978
1979         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1980                 return;
1981
1982         if (conn_changed || discov_changed) {
1983                 /* In case this was disabled through mgmt */
1984                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1985
1986                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1987                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1988
1989                 mgmt_new_settings(hdev);
1990         }
1991 }
1992
1993 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1994 {
1995         struct hci_dev *hdev;
1996         struct hci_dev_req dr;
1997         int err = 0;
1998
1999         if (copy_from_user(&dr, arg, sizeof(dr)))
2000                 return -EFAULT;
2001
2002         hdev = hci_dev_get(dr.dev_id);
2003         if (!hdev)
2004                 return -ENODEV;
2005
2006         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2007                 err = -EBUSY;
2008                 goto done;
2009         }
2010
2011         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2012                 err = -EOPNOTSUPP;
2013                 goto done;
2014         }
2015
2016         if (hdev->dev_type != HCI_PRIMARY) {
2017                 err = -EOPNOTSUPP;
2018                 goto done;
2019         }
2020
2021         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2022                 err = -EOPNOTSUPP;
2023                 goto done;
2024         }
2025
2026         switch (cmd) {
2027         case HCISETAUTH:
2028                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2029                                    HCI_INIT_TIMEOUT, NULL);
2030                 break;
2031
2032         case HCISETENCRYPT:
2033                 if (!lmp_encrypt_capable(hdev)) {
2034                         err = -EOPNOTSUPP;
2035                         break;
2036                 }
2037
2038                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2039                         /* Auth must be enabled first */
2040                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2041                                            HCI_INIT_TIMEOUT, NULL);
2042                         if (err)
2043                                 break;
2044                 }
2045
2046                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2047                                    HCI_INIT_TIMEOUT, NULL);
2048                 break;
2049
2050         case HCISETSCAN:
2051                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2052                                    HCI_INIT_TIMEOUT, NULL);
2053
2054                 /* Ensure that the connectable and discoverable states
2055                  * get correctly modified as this was a non-mgmt change.
2056                  */
2057                 if (!err)
2058                         hci_update_scan_state(hdev, dr.dev_opt);
2059                 break;
2060
2061         case HCISETLINKPOL:
2062                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2063                                    HCI_INIT_TIMEOUT, NULL);
2064                 break;
2065
2066         case HCISETLINKMODE:
2067                 hdev->link_mode = ((__u16) dr.dev_opt) &
2068                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2069                 break;
2070
2071         case HCISETPTYPE:
2072                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2073                         break;
2074
2075                 hdev->pkt_type = (__u16) dr.dev_opt;
2076                 mgmt_phy_configuration_changed(hdev, NULL);
2077                 break;
2078
2079         case HCISETACLMTU:
2080                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2081                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2082                 break;
2083
2084         case HCISETSCOMTU:
2085                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2086                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2087                 break;
2088
2089         default:
2090                 err = -EINVAL;
2091                 break;
2092         }
2093
2094 done:
2095         hci_dev_put(hdev);
2096         return err;
2097 }
2098
2099 int hci_get_dev_list(void __user *arg)
2100 {
2101         struct hci_dev *hdev;
2102         struct hci_dev_list_req *dl;
2103         struct hci_dev_req *dr;
2104         int n = 0, size, err;
2105         __u16 dev_num;
2106
2107         if (get_user(dev_num, (__u16 __user *) arg))
2108                 return -EFAULT;
2109
2110         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2111                 return -EINVAL;
2112
2113         size = sizeof(*dl) + dev_num * sizeof(*dr);
2114
2115         dl = kzalloc(size, GFP_KERNEL);
2116         if (!dl)
2117                 return -ENOMEM;
2118
2119         dr = dl->dev_req;
2120
2121         read_lock(&hci_dev_list_lock);
2122         list_for_each_entry(hdev, &hci_dev_list, list) {
2123                 unsigned long flags = hdev->flags;
2124
2125                 /* When the auto-off is configured it means the transport
2126                  * is running, but in that case still indicate that the
2127                  * device is actually down.
2128                  */
2129                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2130                         flags &= ~BIT(HCI_UP);
2131
2132                 (dr + n)->dev_id  = hdev->id;
2133                 (dr + n)->dev_opt = flags;
2134
2135                 if (++n >= dev_num)
2136                         break;
2137         }
2138         read_unlock(&hci_dev_list_lock);
2139
2140         dl->dev_num = n;
2141         size = sizeof(*dl) + n * sizeof(*dr);
2142
2143         err = copy_to_user(arg, dl, size);
2144         kfree(dl);
2145
2146         return err ? -EFAULT : 0;
2147 }
2148
2149 int hci_get_dev_info(void __user *arg)
2150 {
2151         struct hci_dev *hdev;
2152         struct hci_dev_info di;
2153         unsigned long flags;
2154         int err = 0;
2155
2156         if (copy_from_user(&di, arg, sizeof(di)))
2157                 return -EFAULT;
2158
2159         hdev = hci_dev_get(di.dev_id);
2160         if (!hdev)
2161                 return -ENODEV;
2162
2163         /* When the auto-off is configured it means the transport
2164          * is running, but in that case still indicate that the
2165          * device is actually down.
2166          */
2167         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2168                 flags = hdev->flags & ~BIT(HCI_UP);
2169         else
2170                 flags = hdev->flags;
2171
2172         strcpy(di.name, hdev->name);
2173         di.bdaddr   = hdev->bdaddr;
2174         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2175         di.flags    = flags;
2176         di.pkt_type = hdev->pkt_type;
2177         if (lmp_bredr_capable(hdev)) {
2178                 di.acl_mtu  = hdev->acl_mtu;
2179                 di.acl_pkts = hdev->acl_pkts;
2180                 di.sco_mtu  = hdev->sco_mtu;
2181                 di.sco_pkts = hdev->sco_pkts;
2182         } else {
2183                 di.acl_mtu  = hdev->le_mtu;
2184                 di.acl_pkts = hdev->le_pkts;
2185                 di.sco_mtu  = 0;
2186                 di.sco_pkts = 0;
2187         }
2188         di.link_policy = hdev->link_policy;
2189         di.link_mode   = hdev->link_mode;
2190
2191         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2192         memcpy(&di.features, &hdev->features, sizeof(di.features));
2193
2194         if (copy_to_user(arg, &di, sizeof(di)))
2195                 err = -EFAULT;
2196
2197         hci_dev_put(hdev);
2198
2199         return err;
2200 }
2201
2202 /* ---- Interface to HCI drivers ---- */
2203
2204 static int hci_rfkill_set_block(void *data, bool blocked)
2205 {
2206         struct hci_dev *hdev = data;
2207
2208         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2209
2210         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2211                 return -EBUSY;
2212
2213         if (blocked) {
2214                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2215                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2216                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2217                         hci_dev_do_close(hdev);
2218         } else {
2219                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2220         }
2221
2222         return 0;
2223 }
2224
2225 static const struct rfkill_ops hci_rfkill_ops = {
2226         .set_block = hci_rfkill_set_block,
2227 };
2228
2229 static void hci_power_on(struct work_struct *work)
2230 {
2231         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2232         int err;
2233
2234         BT_DBG("%s", hdev->name);
2235
2236         if (test_bit(HCI_UP, &hdev->flags) &&
2237             hci_dev_test_flag(hdev, HCI_MGMT) &&
2238             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2239                 cancel_delayed_work(&hdev->power_off);
2240                 hci_req_sync_lock(hdev);
2241                 err = __hci_req_hci_power_on(hdev);
2242                 hci_req_sync_unlock(hdev);
2243                 mgmt_power_on(hdev, err);
2244                 return;
2245         }
2246
2247         err = hci_dev_do_open(hdev);
2248         if (err < 0) {
2249                 hci_dev_lock(hdev);
2250                 mgmt_set_powered_failed(hdev, err);
2251                 hci_dev_unlock(hdev);
2252                 return;
2253         }
2254
2255         /* During the HCI setup phase, a few error conditions are
2256          * ignored and they need to be checked now. If they are still
2257          * valid, it is important to turn the device back off.
2258          */
2259         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2260             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2261             (hdev->dev_type == HCI_PRIMARY &&
2262              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2264                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2265                 hci_dev_do_close(hdev);
2266         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2267                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2268                                    HCI_AUTO_OFF_TIMEOUT);
2269         }
2270
2271         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2272                 /* For unconfigured devices, set the HCI_RAW flag
2273                  * so that userspace can easily identify them.
2274                  */
2275                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2276                         set_bit(HCI_RAW, &hdev->flags);
2277
2278                 /* For fully configured devices, this will send
2279                  * the Index Added event. For unconfigured devices,
2280                  * it will send Unconfigued Index Added event.
2281                  *
2282                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2283                  * and no event will be send.
2284                  */
2285                 mgmt_index_added(hdev);
2286         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2287                 /* When the controller is now configured, then it
2288                  * is important to clear the HCI_RAW flag.
2289                  */
2290                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2291                         clear_bit(HCI_RAW, &hdev->flags);
2292
2293                 /* Powering on the controller with HCI_CONFIG set only
2294                  * happens with the transition from unconfigured to
2295                  * configured. This will send the Index Added event.
2296                  */
2297                 mgmt_index_added(hdev);
2298         }
2299 }
2300
2301 static void hci_power_off(struct work_struct *work)
2302 {
2303         struct hci_dev *hdev = container_of(work, struct hci_dev,
2304                                             power_off.work);
2305
2306         BT_DBG("%s", hdev->name);
2307
2308         hci_dev_do_close(hdev);
2309 }
2310
2311 static void hci_error_reset(struct work_struct *work)
2312 {
2313         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2314
2315         BT_DBG("%s", hdev->name);
2316
2317         if (hdev->hw_error)
2318                 hdev->hw_error(hdev, hdev->hw_error_code);
2319         else
2320                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2321
2322         if (hci_dev_do_close(hdev))
2323                 return;
2324
2325         hci_dev_do_open(hdev);
2326 }
2327
2328 void hci_uuids_clear(struct hci_dev *hdev)
2329 {
2330         struct bt_uuid *uuid, *tmp;
2331
2332         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2333                 list_del(&uuid->list);
2334                 kfree(uuid);
2335         }
2336 }
2337
2338 void hci_link_keys_clear(struct hci_dev *hdev)
2339 {
2340         struct link_key *key;
2341
2342         list_for_each_entry(key, &hdev->link_keys, list) {
2343                 list_del_rcu(&key->list);
2344                 kfree_rcu(key, rcu);
2345         }
2346 }
2347
2348 void hci_smp_ltks_clear(struct hci_dev *hdev)
2349 {
2350         struct smp_ltk *k;
2351
2352         list_for_each_entry(k, &hdev->long_term_keys, list) {
2353                 list_del_rcu(&k->list);
2354                 kfree_rcu(k, rcu);
2355         }
2356 }
2357
2358 void hci_smp_irks_clear(struct hci_dev *hdev)
2359 {
2360         struct smp_irk *k;
2361
2362         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2363                 list_del_rcu(&k->list);
2364                 kfree_rcu(k, rcu);
2365         }
2366 }
2367
2368 void hci_blocked_keys_clear(struct hci_dev *hdev)
2369 {
2370         struct blocked_key *b;
2371
2372         list_for_each_entry(b, &hdev->blocked_keys, list) {
2373                 list_del_rcu(&b->list);
2374                 kfree_rcu(b, rcu);
2375         }
2376 }
2377
2378 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2379 {
2380         bool blocked = false;
2381         struct blocked_key *b;
2382
2383         rcu_read_lock();
2384         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2385                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2386                         blocked = true;
2387                         break;
2388                 }
2389         }
2390
2391         rcu_read_unlock();
2392         return blocked;
2393 }
2394
2395 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2396 {
2397         struct link_key *k;
2398
2399         rcu_read_lock();
2400         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2401                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2402                         rcu_read_unlock();
2403
2404                         if (hci_is_blocked_key(hdev,
2405                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2406                                                k->val)) {
2407                                 bt_dev_warn_ratelimited(hdev,
2408                                                         "Link key blocked for %pMR",
2409                                                         &k->bdaddr);
2410                                 return NULL;
2411                         }
2412
2413                         return k;
2414                 }
2415         }
2416         rcu_read_unlock();
2417
2418         return NULL;
2419 }
2420
2421 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2422                                u8 key_type, u8 old_key_type)
2423 {
2424         /* Legacy key */
2425         if (key_type < 0x03)
2426                 return true;
2427
2428         /* Debug keys are insecure so don't store them persistently */
2429         if (key_type == HCI_LK_DEBUG_COMBINATION)
2430                 return false;
2431
2432         /* Changed combination key and there's no previous one */
2433         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2434                 return false;
2435
2436         /* Security mode 3 case */
2437         if (!conn)
2438                 return true;
2439
2440         /* BR/EDR key derived using SC from an LE link */
2441         if (conn->type == LE_LINK)
2442                 return true;
2443
2444         /* Neither local nor remote side had no-bonding as requirement */
2445         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2446                 return true;
2447
2448         /* Local side had dedicated bonding as requirement */
2449         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2450                 return true;
2451
2452         /* Remote side had dedicated bonding as requirement */
2453         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2454                 return true;
2455
2456         /* If none of the above criteria match, then don't store the key
2457          * persistently */
2458         return false;
2459 }
2460
2461 static u8 ltk_role(u8 type)
2462 {
2463         if (type == SMP_LTK)
2464                 return HCI_ROLE_MASTER;
2465
2466         return HCI_ROLE_SLAVE;
2467 }
2468
2469 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2470                              u8 addr_type, u8 role)
2471 {
2472         struct smp_ltk *k;
2473
2474         rcu_read_lock();
2475         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2476                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2477                         continue;
2478
2479                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2480                         rcu_read_unlock();
2481
2482                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2483                                                k->val)) {
2484                                 bt_dev_warn_ratelimited(hdev,
2485                                                         "LTK blocked for %pMR",
2486                                                         &k->bdaddr);
2487                                 return NULL;
2488                         }
2489
2490                         return k;
2491                 }
2492         }
2493         rcu_read_unlock();
2494
2495         return NULL;
2496 }
2497
2498 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2499 {
2500         struct smp_irk *irk_to_return = NULL;
2501         struct smp_irk *irk;
2502
2503         rcu_read_lock();
2504         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2505                 if (!bacmp(&irk->rpa, rpa)) {
2506                         irk_to_return = irk;
2507                         goto done;
2508                 }
2509         }
2510
2511         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2512                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2513                         bacpy(&irk->rpa, rpa);
2514                         irk_to_return = irk;
2515                         goto done;
2516                 }
2517         }
2518
2519 done:
2520         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2521                                                 irk_to_return->val)) {
2522                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2523                                         &irk_to_return->bdaddr);
2524                 irk_to_return = NULL;
2525         }
2526
2527         rcu_read_unlock();
2528
2529         return irk_to_return;
2530 }
2531
2532 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2533                                      u8 addr_type)
2534 {
2535         struct smp_irk *irk_to_return = NULL;
2536         struct smp_irk *irk;
2537
2538         /* Identity Address must be public or static random */
2539         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2540                 return NULL;
2541
2542         rcu_read_lock();
2543         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2544                 if (addr_type == irk->addr_type &&
2545                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2546                         irk_to_return = irk;
2547                         goto done;
2548                 }
2549         }
2550
2551 done:
2552
2553         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2554                                                 irk_to_return->val)) {
2555                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2556                                         &irk_to_return->bdaddr);
2557                 irk_to_return = NULL;
2558         }
2559
2560         rcu_read_unlock();
2561
2562         return irk_to_return;
2563 }
2564
2565 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2566                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2567                                   u8 pin_len, bool *persistent)
2568 {
2569         struct link_key *key, *old_key;
2570         u8 old_key_type;
2571
2572         old_key = hci_find_link_key(hdev, bdaddr);
2573         if (old_key) {
2574                 old_key_type = old_key->type;
2575                 key = old_key;
2576         } else {
2577                 old_key_type = conn ? conn->key_type : 0xff;
2578                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2579                 if (!key)
2580                         return NULL;
2581                 list_add_rcu(&key->list, &hdev->link_keys);
2582         }
2583
2584         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2585
2586         /* Some buggy controller combinations generate a changed
2587          * combination key for legacy pairing even when there's no
2588          * previous key */
2589         if (type == HCI_LK_CHANGED_COMBINATION &&
2590             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2591                 type = HCI_LK_COMBINATION;
2592                 if (conn)
2593                         conn->key_type = type;
2594         }
2595
2596         bacpy(&key->bdaddr, bdaddr);
2597         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2598         key->pin_len = pin_len;
2599
2600         if (type == HCI_LK_CHANGED_COMBINATION)
2601                 key->type = old_key_type;
2602         else
2603                 key->type = type;
2604
2605         if (persistent)
2606                 *persistent = hci_persistent_key(hdev, conn, type,
2607                                                  old_key_type);
2608
2609         return key;
2610 }
2611
2612 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2613                             u8 addr_type, u8 type, u8 authenticated,
2614                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2615 {
2616         struct smp_ltk *key, *old_key;
2617         u8 role = ltk_role(type);
2618
2619         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2620         if (old_key)
2621                 key = old_key;
2622         else {
2623                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2624                 if (!key)
2625                         return NULL;
2626                 list_add_rcu(&key->list, &hdev->long_term_keys);
2627         }
2628
2629         bacpy(&key->bdaddr, bdaddr);
2630         key->bdaddr_type = addr_type;
2631         memcpy(key->val, tk, sizeof(key->val));
2632         key->authenticated = authenticated;
2633         key->ediv = ediv;
2634         key->rand = rand;
2635         key->enc_size = enc_size;
2636         key->type = type;
2637
2638         return key;
2639 }
2640
2641 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2642                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2643 {
2644         struct smp_irk *irk;
2645
2646         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2647         if (!irk) {
2648                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2649                 if (!irk)
2650                         return NULL;
2651
2652                 bacpy(&irk->bdaddr, bdaddr);
2653                 irk->addr_type = addr_type;
2654
2655                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2656         }
2657
2658         memcpy(irk->val, val, 16);
2659         bacpy(&irk->rpa, rpa);
2660
2661         return irk;
2662 }
2663
2664 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2665 {
2666         struct link_key *key;
2667
2668         key = hci_find_link_key(hdev, bdaddr);
2669         if (!key)
2670                 return -ENOENT;
2671
2672         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2673
2674         list_del_rcu(&key->list);
2675         kfree_rcu(key, rcu);
2676
2677         return 0;
2678 }
2679
2680 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2681 {
2682         struct smp_ltk *k;
2683         int removed = 0;
2684
2685         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2686                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2687                         continue;
2688
2689                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2690
2691                 list_del_rcu(&k->list);
2692                 kfree_rcu(k, rcu);
2693                 removed++;
2694         }
2695
2696         return removed ? 0 : -ENOENT;
2697 }
2698
2699 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2700 {
2701         struct smp_irk *k;
2702
2703         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2704                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2705                         continue;
2706
2707                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2708
2709                 list_del_rcu(&k->list);
2710                 kfree_rcu(k, rcu);
2711         }
2712 }
2713
2714 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2715 {
2716         struct smp_ltk *k;
2717         struct smp_irk *irk;
2718         u8 addr_type;
2719
2720         if (type == BDADDR_BREDR) {
2721                 if (hci_find_link_key(hdev, bdaddr))
2722                         return true;
2723                 return false;
2724         }
2725
2726         /* Convert to HCI addr type which struct smp_ltk uses */
2727         if (type == BDADDR_LE_PUBLIC)
2728                 addr_type = ADDR_LE_DEV_PUBLIC;
2729         else
2730                 addr_type = ADDR_LE_DEV_RANDOM;
2731
2732         irk = hci_get_irk(hdev, bdaddr, addr_type);
2733         if (irk) {
2734                 bdaddr = &irk->bdaddr;
2735                 addr_type = irk->addr_type;
2736         }
2737
2738         rcu_read_lock();
2739         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2740                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2741                         rcu_read_unlock();
2742                         return true;
2743                 }
2744         }
2745         rcu_read_unlock();
2746
2747         return false;
2748 }
2749
2750 /* HCI command timer function */
2751 static void hci_cmd_timeout(struct work_struct *work)
2752 {
2753         struct hci_dev *hdev = container_of(work, struct hci_dev,
2754                                             cmd_timer.work);
2755
2756         if (hdev->sent_cmd) {
2757                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2758                 u16 opcode = __le16_to_cpu(sent->opcode);
2759
2760                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2761         } else {
2762                 bt_dev_err(hdev, "command tx timeout");
2763         }
2764
2765         if (hdev->cmd_timeout)
2766                 hdev->cmd_timeout(hdev);
2767
2768         atomic_set(&hdev->cmd_cnt, 1);
2769         queue_work(hdev->workqueue, &hdev->cmd_work);
2770 }
2771
2772 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2773                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2774 {
2775         struct oob_data *data;
2776
2777         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2778                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2779                         continue;
2780                 if (data->bdaddr_type != bdaddr_type)
2781                         continue;
2782                 return data;
2783         }
2784
2785         return NULL;
2786 }
2787
2788 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2789                                u8 bdaddr_type)
2790 {
2791         struct oob_data *data;
2792
2793         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2794         if (!data)
2795                 return -ENOENT;
2796
2797         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2798
2799         list_del(&data->list);
2800         kfree(data);
2801
2802         return 0;
2803 }
2804
2805 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2806 {
2807         struct oob_data *data, *n;
2808
2809         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2810                 list_del(&data->list);
2811                 kfree(data);
2812         }
2813 }
2814
2815 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2816                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2817                             u8 *hash256, u8 *rand256)
2818 {
2819         struct oob_data *data;
2820
2821         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2822         if (!data) {
2823                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2824                 if (!data)
2825                         return -ENOMEM;
2826
2827                 bacpy(&data->bdaddr, bdaddr);
2828                 data->bdaddr_type = bdaddr_type;
2829                 list_add(&data->list, &hdev->remote_oob_data);
2830         }
2831
2832         if (hash192 && rand192) {
2833                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2834                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2835                 if (hash256 && rand256)
2836                         data->present = 0x03;
2837         } else {
2838                 memset(data->hash192, 0, sizeof(data->hash192));
2839                 memset(data->rand192, 0, sizeof(data->rand192));
2840                 if (hash256 && rand256)
2841                         data->present = 0x02;
2842                 else
2843                         data->present = 0x00;
2844         }
2845
2846         if (hash256 && rand256) {
2847                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2848                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2849         } else {
2850                 memset(data->hash256, 0, sizeof(data->hash256));
2851                 memset(data->rand256, 0, sizeof(data->rand256));
2852                 if (hash192 && rand192)
2853                         data->present = 0x01;
2854         }
2855
2856         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2857
2858         return 0;
2859 }
2860
2861 /* This function requires the caller holds hdev->lock */
2862 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2863 {
2864         struct adv_info *adv_instance;
2865
2866         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2867                 if (adv_instance->instance == instance)
2868                         return adv_instance;
2869         }
2870
2871         return NULL;
2872 }
2873
2874 /* This function requires the caller holds hdev->lock */
2875 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2876 {
2877         struct adv_info *cur_instance;
2878
2879         cur_instance = hci_find_adv_instance(hdev, instance);
2880         if (!cur_instance)
2881                 return NULL;
2882
2883         if (cur_instance == list_last_entry(&hdev->adv_instances,
2884                                             struct adv_info, list))
2885                 return list_first_entry(&hdev->adv_instances,
2886                                                  struct adv_info, list);
2887         else
2888                 return list_next_entry(cur_instance, list);
2889 }
2890
2891 /* This function requires the caller holds hdev->lock */
2892 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2893 {
2894         struct adv_info *adv_instance;
2895
2896         adv_instance = hci_find_adv_instance(hdev, instance);
2897         if (!adv_instance)
2898                 return -ENOENT;
2899
2900         BT_DBG("%s removing %dMR", hdev->name, instance);
2901
2902         if (hdev->cur_adv_instance == instance) {
2903                 if (hdev->adv_instance_timeout) {
2904                         cancel_delayed_work(&hdev->adv_instance_expire);
2905                         hdev->adv_instance_timeout = 0;
2906                 }
2907                 hdev->cur_adv_instance = 0x00;
2908         }
2909
2910         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2911
2912         list_del(&adv_instance->list);
2913         kfree(adv_instance);
2914
2915         hdev->adv_instance_cnt--;
2916
2917         return 0;
2918 }
2919
2920 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2921 {
2922         struct adv_info *adv_instance, *n;
2923
2924         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2925                 adv_instance->rpa_expired = rpa_expired;
2926 }
2927
2928 /* This function requires the caller holds hdev->lock */
2929 void hci_adv_instances_clear(struct hci_dev *hdev)
2930 {
2931         struct adv_info *adv_instance, *n;
2932
2933         if (hdev->adv_instance_timeout) {
2934                 cancel_delayed_work(&hdev->adv_instance_expire);
2935                 hdev->adv_instance_timeout = 0;
2936         }
2937
2938         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2939                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2940                 list_del(&adv_instance->list);
2941                 kfree(adv_instance);
2942         }
2943
2944         hdev->adv_instance_cnt = 0;
2945         hdev->cur_adv_instance = 0x00;
2946 }
2947
2948 static void adv_instance_rpa_expired(struct work_struct *work)
2949 {
2950         struct adv_info *adv_instance = container_of(work, struct adv_info,
2951                                                      rpa_expired_cb.work);
2952
2953         BT_DBG("");
2954
2955         adv_instance->rpa_expired = true;
2956 }
2957
2958 /* This function requires the caller holds hdev->lock */
2959 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2960                          u16 adv_data_len, u8 *adv_data,
2961                          u16 scan_rsp_len, u8 *scan_rsp_data,
2962                          u16 timeout, u16 duration, s8 tx_power,
2963                          u32 min_interval, u32 max_interval)
2964 {
2965         struct adv_info *adv_instance;
2966
2967         adv_instance = hci_find_adv_instance(hdev, instance);
2968         if (adv_instance) {
2969                 memset(adv_instance->adv_data, 0,
2970                        sizeof(adv_instance->adv_data));
2971                 memset(adv_instance->scan_rsp_data, 0,
2972                        sizeof(adv_instance->scan_rsp_data));
2973         } else {
2974                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2975                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2976                         return -EOVERFLOW;
2977
2978                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2979                 if (!adv_instance)
2980                         return -ENOMEM;
2981
2982                 adv_instance->pending = true;
2983                 adv_instance->instance = instance;
2984                 list_add(&adv_instance->list, &hdev->adv_instances);
2985                 hdev->adv_instance_cnt++;
2986         }
2987
2988         adv_instance->flags = flags;
2989         adv_instance->adv_data_len = adv_data_len;
2990         adv_instance->scan_rsp_len = scan_rsp_len;
2991         adv_instance->min_interval = min_interval;
2992         adv_instance->max_interval = max_interval;
2993         adv_instance->tx_power = tx_power;
2994
2995         if (adv_data_len)
2996                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2997
2998         if (scan_rsp_len)
2999                 memcpy(adv_instance->scan_rsp_data,
3000                        scan_rsp_data, scan_rsp_len);
3001
3002         adv_instance->timeout = timeout;
3003         adv_instance->remaining_time = timeout;
3004
3005         if (duration == 0)
3006                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3007         else
3008                 adv_instance->duration = duration;
3009
3010         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3011                           adv_instance_rpa_expired);
3012
3013         BT_DBG("%s for %dMR", hdev->name, instance);
3014
3015         return 0;
3016 }
3017
3018 /* This function requires the caller holds hdev->lock */
3019 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3020                               u16 adv_data_len, u8 *adv_data,
3021                               u16 scan_rsp_len, u8 *scan_rsp_data)
3022 {
3023         struct adv_info *adv_instance;
3024
3025         adv_instance = hci_find_adv_instance(hdev, instance);
3026
3027         /* If advertisement doesn't exist, we can't modify its data */
3028         if (!adv_instance)
3029                 return -ENOENT;
3030
3031         if (adv_data_len) {
3032                 memset(adv_instance->adv_data, 0,
3033                        sizeof(adv_instance->adv_data));
3034                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3035                 adv_instance->adv_data_len = adv_data_len;
3036         }
3037
3038         if (scan_rsp_len) {
3039                 memset(adv_instance->scan_rsp_data, 0,
3040                        sizeof(adv_instance->scan_rsp_data));
3041                 memcpy(adv_instance->scan_rsp_data,
3042                        scan_rsp_data, scan_rsp_len);
3043                 adv_instance->scan_rsp_len = scan_rsp_len;
3044         }
3045
3046         return 0;
3047 }
3048
3049 /* This function requires the caller holds hdev->lock */
3050 void hci_adv_monitors_clear(struct hci_dev *hdev)
3051 {
3052         struct adv_monitor *monitor;
3053         int handle;
3054
3055         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3056                 hci_free_adv_monitor(hdev, monitor);
3057
3058         idr_destroy(&hdev->adv_monitors_idr);
3059 }
3060
3061 /* Frees the monitor structure and do some bookkeepings.
3062  * This function requires the caller holds hdev->lock.
3063  */
3064 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3065 {
3066         struct adv_pattern *pattern;
3067         struct adv_pattern *tmp;
3068
3069         if (!monitor)
3070                 return;
3071
3072         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3073                 list_del(&pattern->list);
3074                 kfree(pattern);
3075         }
3076
3077         if (monitor->handle)
3078                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3079
3080         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3081                 hdev->adv_monitors_cnt--;
3082                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3083         }
3084
3085         kfree(monitor);
3086 }
3087
3088 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3089 {
3090         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3091 }
3092
3093 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3094 {
3095         return mgmt_remove_adv_monitor_complete(hdev, status);
3096 }
3097
3098 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3099  * also attempts to forward the request to the controller.
3100  * Returns true if request is forwarded (result is pending), false otherwise.
3101  * This function requires the caller holds hdev->lock.
3102  */
3103 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3104                          int *err)
3105 {
3106         int min, max, handle;
3107
3108         *err = 0;
3109
3110         if (!monitor) {
3111                 *err = -EINVAL;
3112                 return false;
3113         }
3114
3115         min = HCI_MIN_ADV_MONITOR_HANDLE;
3116         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3117         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3118                            GFP_KERNEL);
3119         if (handle < 0) {
3120                 *err = handle;
3121                 return false;
3122         }
3123
3124         monitor->handle = handle;
3125
3126         if (!hdev_is_powered(hdev))
3127                 return false;
3128
3129         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3130         case HCI_ADV_MONITOR_EXT_NONE:
3131                 hci_update_background_scan(hdev);
3132                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3133                 /* Message was not forwarded to controller - not an error */
3134                 return false;
3135         case HCI_ADV_MONITOR_EXT_MSFT:
3136                 *err = msft_add_monitor_pattern(hdev, monitor);
3137                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3138                            *err);
3139                 break;
3140         }
3141
3142         return (*err == 0);
3143 }
3144
3145 /* Attempts to tell the controller and free the monitor. If somehow the
3146  * controller doesn't have a corresponding handle, remove anyway.
3147  * Returns true if request is forwarded (result is pending), false otherwise.
3148  * This function requires the caller holds hdev->lock.
3149  */
3150 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3151                                    struct adv_monitor *monitor,
3152                                    u16 handle, int *err)
3153 {
3154         *err = 0;
3155
3156         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3157         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3158                 goto free_monitor;
3159         case HCI_ADV_MONITOR_EXT_MSFT:
3160                 *err = msft_remove_monitor(hdev, monitor, handle);
3161                 break;
3162         }
3163
3164         /* In case no matching handle registered, just free the monitor */
3165         if (*err == -ENOENT)
3166                 goto free_monitor;
3167
3168         return (*err == 0);
3169
3170 free_monitor:
3171         if (*err == -ENOENT)
3172                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3173                             monitor->handle);
3174         hci_free_adv_monitor(hdev, monitor);
3175
3176         *err = 0;
3177         return false;
3178 }
3179
3180 /* Returns true if request is forwarded (result is pending), false otherwise.
3181  * This function requires the caller holds hdev->lock.
3182  */
3183 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3184 {
3185         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3186         bool pending;
3187
3188         if (!monitor) {
3189                 *err = -EINVAL;
3190                 return false;
3191         }
3192
3193         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3194         if (!*err && !pending)
3195                 hci_update_background_scan(hdev);
3196
3197         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3198                    hdev->name, handle, *err, pending ? "" : "not ");
3199
3200         return pending;
3201 }
3202
3203 /* Returns true if request is forwarded (result is pending), false otherwise.
3204  * This function requires the caller holds hdev->lock.
3205  */
3206 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3207 {
3208         struct adv_monitor *monitor;
3209         int idr_next_id = 0;
3210         bool pending = false;
3211         bool update = false;
3212
3213         *err = 0;
3214
3215         while (!*err && !pending) {
3216                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3217                 if (!monitor)
3218                         break;
3219
3220                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3221
3222                 if (!*err && !pending)
3223                         update = true;
3224         }
3225
3226         if (update)
3227                 hci_update_background_scan(hdev);
3228
3229         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3230                    hdev->name, *err, pending ? "" : "not ");
3231
3232         return pending;
3233 }
3234
3235 /* This function requires the caller holds hdev->lock */
3236 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3237 {
3238         return !idr_is_empty(&hdev->adv_monitors_idr);
3239 }
3240
3241 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3242 {
3243         if (msft_monitor_supported(hdev))
3244                 return HCI_ADV_MONITOR_EXT_MSFT;
3245
3246         return HCI_ADV_MONITOR_EXT_NONE;
3247 }
3248
3249 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3250                                          bdaddr_t *bdaddr, u8 type)
3251 {
3252         struct bdaddr_list *b;
3253
3254         list_for_each_entry(b, bdaddr_list, list) {
3255                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3256                         return b;
3257         }
3258
3259         return NULL;
3260 }
3261
3262 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3263                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3264                                 u8 type)
3265 {
3266         struct bdaddr_list_with_irk *b;
3267
3268         list_for_each_entry(b, bdaddr_list, list) {
3269                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3270                         return b;
3271         }
3272
3273         return NULL;
3274 }
3275
3276 struct bdaddr_list_with_flags *
3277 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3278                                   bdaddr_t *bdaddr, u8 type)
3279 {
3280         struct bdaddr_list_with_flags *b;
3281
3282         list_for_each_entry(b, bdaddr_list, list) {
3283                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3284                         return b;
3285         }
3286
3287         return NULL;
3288 }
3289
3290 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3291 {
3292         struct bdaddr_list *b, *n;
3293
3294         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3295                 list_del(&b->list);
3296                 kfree(b);
3297         }
3298 }
3299
3300 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3301 {
3302         struct bdaddr_list *entry;
3303
3304         if (!bacmp(bdaddr, BDADDR_ANY))
3305                 return -EBADF;
3306
3307         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3308                 return -EEXIST;
3309
3310         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3311         if (!entry)
3312                 return -ENOMEM;
3313
3314         bacpy(&entry->bdaddr, bdaddr);
3315         entry->bdaddr_type = type;
3316
3317         list_add(&entry->list, list);
3318
3319         return 0;
3320 }
3321
3322 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3323                                         u8 type, u8 *peer_irk, u8 *local_irk)
3324 {
3325         struct bdaddr_list_with_irk *entry;
3326
3327         if (!bacmp(bdaddr, BDADDR_ANY))
3328                 return -EBADF;
3329
3330         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3331                 return -EEXIST;
3332
3333         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3334         if (!entry)
3335                 return -ENOMEM;
3336
3337         bacpy(&entry->bdaddr, bdaddr);
3338         entry->bdaddr_type = type;
3339
3340         if (peer_irk)
3341                 memcpy(entry->peer_irk, peer_irk, 16);
3342
3343         if (local_irk)
3344                 memcpy(entry->local_irk, local_irk, 16);
3345
3346         list_add(&entry->list, list);
3347
3348         return 0;
3349 }
3350
3351 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3352                                    u8 type, u32 flags)
3353 {
3354         struct bdaddr_list_with_flags *entry;
3355
3356         if (!bacmp(bdaddr, BDADDR_ANY))
3357                 return -EBADF;
3358
3359         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3360                 return -EEXIST;
3361
3362         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3363         if (!entry)
3364                 return -ENOMEM;
3365
3366         bacpy(&entry->bdaddr, bdaddr);
3367         entry->bdaddr_type = type;
3368         entry->current_flags = flags;
3369
3370         list_add(&entry->list, list);
3371
3372         return 0;
3373 }
3374
3375 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3376 {
3377         struct bdaddr_list *entry;
3378
3379         if (!bacmp(bdaddr, BDADDR_ANY)) {
3380                 hci_bdaddr_list_clear(list);
3381                 return 0;
3382         }
3383
3384         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3385         if (!entry)
3386                 return -ENOENT;
3387
3388         list_del(&entry->list);
3389         kfree(entry);
3390
3391         return 0;
3392 }
3393
3394 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3395                                                         u8 type)
3396 {
3397         struct bdaddr_list_with_irk *entry;
3398
3399         if (!bacmp(bdaddr, BDADDR_ANY)) {
3400                 hci_bdaddr_list_clear(list);
3401                 return 0;
3402         }
3403
3404         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3405         if (!entry)
3406                 return -ENOENT;
3407
3408         list_del(&entry->list);
3409         kfree(entry);
3410
3411         return 0;
3412 }
3413
3414 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3415                                    u8 type)
3416 {
3417         struct bdaddr_list_with_flags *entry;
3418
3419         if (!bacmp(bdaddr, BDADDR_ANY)) {
3420                 hci_bdaddr_list_clear(list);
3421                 return 0;
3422         }
3423
3424         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3425         if (!entry)
3426                 return -ENOENT;
3427
3428         list_del(&entry->list);
3429         kfree(entry);
3430
3431         return 0;
3432 }
3433
3434 /* This function requires the caller holds hdev->lock */
3435 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3436                                                bdaddr_t *addr, u8 addr_type)
3437 {
3438         struct hci_conn_params *params;
3439
3440         list_for_each_entry(params, &hdev->le_conn_params, list) {
3441                 if (bacmp(&params->addr, addr) == 0 &&
3442                     params->addr_type == addr_type) {
3443                         return params;
3444                 }
3445         }
3446
3447         return NULL;
3448 }
3449
3450 /* This function requires the caller holds hdev->lock */
3451 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3452                                                   bdaddr_t *addr, u8 addr_type)
3453 {
3454         struct hci_conn_params *param;
3455
3456         switch (addr_type) {
3457         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3458                 addr_type = ADDR_LE_DEV_PUBLIC;
3459                 break;
3460         case ADDR_LE_DEV_RANDOM_RESOLVED:
3461                 addr_type = ADDR_LE_DEV_RANDOM;
3462                 break;
3463         }
3464
3465         list_for_each_entry(param, list, action) {
3466                 if (bacmp(&param->addr, addr) == 0 &&
3467                     param->addr_type == addr_type)
3468                         return param;
3469         }
3470
3471         return NULL;
3472 }
3473
3474 /* This function requires the caller holds hdev->lock */
3475 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3476                                             bdaddr_t *addr, u8 addr_type)
3477 {
3478         struct hci_conn_params *params;
3479
3480         params = hci_conn_params_lookup(hdev, addr, addr_type);
3481         if (params)
3482                 return params;
3483
3484         params = kzalloc(sizeof(*params), GFP_KERNEL);
3485         if (!params) {
3486                 bt_dev_err(hdev, "out of memory");
3487                 return NULL;
3488         }
3489
3490         bacpy(&params->addr, addr);
3491         params->addr_type = addr_type;
3492
3493         list_add(&params->list, &hdev->le_conn_params);
3494         INIT_LIST_HEAD(&params->action);
3495
3496         params->conn_min_interval = hdev->le_conn_min_interval;
3497         params->conn_max_interval = hdev->le_conn_max_interval;
3498         params->conn_latency = hdev->le_conn_latency;
3499         params->supervision_timeout = hdev->le_supv_timeout;
3500         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3501
3502         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3503
3504         return params;
3505 }
3506
3507 static void hci_conn_params_free(struct hci_conn_params *params)
3508 {
3509         if (params->conn) {
3510                 hci_conn_drop(params->conn);
3511                 hci_conn_put(params->conn);
3512         }
3513
3514         list_del(&params->action);
3515         list_del(&params->list);
3516         kfree(params);
3517 }
3518
3519 /* This function requires the caller holds hdev->lock */
3520 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3521 {
3522         struct hci_conn_params *params;
3523
3524         params = hci_conn_params_lookup(hdev, addr, addr_type);
3525         if (!params)
3526                 return;
3527
3528         hci_conn_params_free(params);
3529
3530         hci_update_background_scan(hdev);
3531
3532         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3533 }
3534
3535 /* This function requires the caller holds hdev->lock */
3536 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3537 {
3538         struct hci_conn_params *params, *tmp;
3539
3540         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3541                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3542                         continue;
3543
3544                 /* If trying to estabilish one time connection to disabled
3545                  * device, leave the params, but mark them as just once.
3546                  */
3547                 if (params->explicit_connect) {
3548                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3549                         continue;
3550                 }
3551
3552                 list_del(&params->list);
3553                 kfree(params);
3554         }
3555
3556         BT_DBG("All LE disabled connection parameters were removed");
3557 }
3558
3559 /* This function requires the caller holds hdev->lock */
3560 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3561 {
3562         struct hci_conn_params *params, *tmp;
3563
3564         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3565                 hci_conn_params_free(params);
3566
3567         BT_DBG("All LE connection parameters were removed");
3568 }
3569
3570 /* Copy the Identity Address of the controller.
3571  *
3572  * If the controller has a public BD_ADDR, then by default use that one.
3573  * If this is a LE only controller without a public address, default to
3574  * the static random address.
3575  *
3576  * For debugging purposes it is possible to force controllers with a
3577  * public address to use the static random address instead.
3578  *
3579  * In case BR/EDR has been disabled on a dual-mode controller and
3580  * userspace has configured a static address, then that address
3581  * becomes the identity address instead of the public BR/EDR address.
3582  */
3583 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3584                                u8 *bdaddr_type)
3585 {
3586         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3587             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3588             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3589              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3590                 bacpy(bdaddr, &hdev->static_addr);
3591                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3592         } else {
3593                 bacpy(bdaddr, &hdev->bdaddr);
3594                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3595         }
3596 }
3597
3598 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3599 {
3600         int i;
3601
3602         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3603                 clear_bit(i, hdev->suspend_tasks);
3604
3605         wake_up(&hdev->suspend_wait_q);
3606 }
3607
3608 static int hci_suspend_wait_event(struct hci_dev *hdev)
3609 {
3610 #define WAKE_COND                                                              \
3611         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3612          __SUSPEND_NUM_TASKS)
3613
3614         int i;
3615         int ret = wait_event_timeout(hdev->suspend_wait_q,
3616                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3617
3618         if (ret == 0) {
3619                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3620                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3621                         if (test_bit(i, hdev->suspend_tasks))
3622                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3623                         clear_bit(i, hdev->suspend_tasks);
3624                 }
3625
3626                 ret = -ETIMEDOUT;
3627         } else {
3628                 ret = 0;
3629         }
3630
3631         return ret;
3632 }
3633
3634 static void hci_prepare_suspend(struct work_struct *work)
3635 {
3636         struct hci_dev *hdev =
3637                 container_of(work, struct hci_dev, suspend_prepare);
3638
3639         hci_dev_lock(hdev);
3640         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3641         hci_dev_unlock(hdev);
3642 }
3643
3644 static int hci_change_suspend_state(struct hci_dev *hdev,
3645                                     enum suspended_state next)
3646 {
3647         hdev->suspend_state_next = next;
3648         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3649         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3650         return hci_suspend_wait_event(hdev);
3651 }
3652
3653 static void hci_clear_wake_reason(struct hci_dev *hdev)
3654 {
3655         hci_dev_lock(hdev);
3656
3657         hdev->wake_reason = 0;
3658         bacpy(&hdev->wake_addr, BDADDR_ANY);
3659         hdev->wake_addr_type = 0;
3660
3661         hci_dev_unlock(hdev);
3662 }
3663
3664 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3665                                 void *data)
3666 {
3667         struct hci_dev *hdev =
3668                 container_of(nb, struct hci_dev, suspend_notifier);
3669         int ret = 0;
3670         u8 state = BT_RUNNING;
3671
3672         /* If powering down, wait for completion. */
3673         if (mgmt_powering_down(hdev)) {
3674                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3675                 ret = hci_suspend_wait_event(hdev);
3676                 if (ret)
3677                         goto done;
3678         }
3679
3680         /* Suspend notifier should only act on events when powered. */
3681         if (!hdev_is_powered(hdev) ||
3682             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3683                 goto done;
3684
3685         if (action == PM_SUSPEND_PREPARE) {
3686                 /* Suspend consists of two actions:
3687                  *  - First, disconnect everything and make the controller not
3688                  *    connectable (disabling scanning)
3689                  *  - Second, program event filter/whitelist and enable scan
3690                  */
3691                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3692                 if (!ret)
3693                         state = BT_SUSPEND_DISCONNECT;
3694
3695                 /* Only configure whitelist if disconnect succeeded and wake
3696                  * isn't being prevented.
3697                  */
3698                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3699                         ret = hci_change_suspend_state(hdev,
3700                                                 BT_SUSPEND_CONFIGURE_WAKE);
3701                         if (!ret)
3702                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3703                 }
3704
3705                 hci_clear_wake_reason(hdev);
3706                 mgmt_suspending(hdev, state);
3707
3708         } else if (action == PM_POST_SUSPEND) {
3709                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3710
3711                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3712                               hdev->wake_addr_type);
3713         }
3714
3715 done:
3716         /* We always allow suspend even if suspend preparation failed and
3717          * attempt to recover in resume.
3718          */
3719         if (ret)
3720                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3721                            action, ret);
3722
3723         return NOTIFY_DONE;
3724 }
3725
3726 /* Alloc HCI device */
3727 struct hci_dev *hci_alloc_dev(void)
3728 {
3729         struct hci_dev *hdev;
3730
3731         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3732         if (!hdev)
3733                 return NULL;
3734
3735         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3736         hdev->esco_type = (ESCO_HV1);
3737         hdev->link_mode = (HCI_LM_ACCEPT);
3738         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3739         hdev->io_capability = 0x03;     /* No Input No Output */
3740         hdev->manufacturer = 0xffff;    /* Default to internal use */
3741         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3742         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3743         hdev->adv_instance_cnt = 0;
3744         hdev->cur_adv_instance = 0x00;
3745         hdev->adv_instance_timeout = 0;
3746
3747         hdev->advmon_allowlist_duration = 300;
3748         hdev->advmon_no_filter_duration = 500;
3749         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3750
3751         hdev->sniff_max_interval = 800;
3752         hdev->sniff_min_interval = 80;
3753
3754         hdev->le_adv_channel_map = 0x07;
3755         hdev->le_adv_min_interval = 0x0800;
3756         hdev->le_adv_max_interval = 0x0800;
3757         hdev->le_scan_interval = 0x0060;
3758         hdev->le_scan_window = 0x0030;
3759         hdev->le_scan_int_suspend = 0x0400;
3760         hdev->le_scan_window_suspend = 0x0012;
3761         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3762         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3763         hdev->le_scan_int_connect = 0x0060;
3764         hdev->le_scan_window_connect = 0x0060;
3765         hdev->le_conn_min_interval = 0x0018;
3766         hdev->le_conn_max_interval = 0x0028;
3767         hdev->le_conn_latency = 0x0000;
3768         hdev->le_supv_timeout = 0x002a;
3769         hdev->le_def_tx_len = 0x001b;
3770         hdev->le_def_tx_time = 0x0148;
3771         hdev->le_max_tx_len = 0x001b;
3772         hdev->le_max_tx_time = 0x0148;
3773         hdev->le_max_rx_len = 0x001b;
3774         hdev->le_max_rx_time = 0x0148;
3775         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3776         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3777         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3778         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3779         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3780         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3781         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3782         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3783         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3784
3785         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3786         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3787         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3788         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3789         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3790         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3791
3792         /* default 1.28 sec page scan */
3793         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3794         hdev->def_page_scan_int = 0x0800;
3795         hdev->def_page_scan_window = 0x0012;
3796
3797         mutex_init(&hdev->lock);
3798         mutex_init(&hdev->req_lock);
3799
3800         INIT_LIST_HEAD(&hdev->mgmt_pending);
3801         INIT_LIST_HEAD(&hdev->blacklist);
3802         INIT_LIST_HEAD(&hdev->whitelist);
3803         INIT_LIST_HEAD(&hdev->uuids);
3804         INIT_LIST_HEAD(&hdev->link_keys);
3805         INIT_LIST_HEAD(&hdev->long_term_keys);
3806         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3807         INIT_LIST_HEAD(&hdev->remote_oob_data);
3808         INIT_LIST_HEAD(&hdev->le_white_list);
3809         INIT_LIST_HEAD(&hdev->le_resolv_list);
3810         INIT_LIST_HEAD(&hdev->le_conn_params);
3811         INIT_LIST_HEAD(&hdev->pend_le_conns);
3812         INIT_LIST_HEAD(&hdev->pend_le_reports);
3813         INIT_LIST_HEAD(&hdev->conn_hash.list);
3814         INIT_LIST_HEAD(&hdev->adv_instances);
3815         INIT_LIST_HEAD(&hdev->blocked_keys);
3816
3817         INIT_WORK(&hdev->rx_work, hci_rx_work);
3818         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3819         INIT_WORK(&hdev->tx_work, hci_tx_work);
3820         INIT_WORK(&hdev->power_on, hci_power_on);
3821         INIT_WORK(&hdev->error_reset, hci_error_reset);
3822         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3823
3824         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3825
3826         skb_queue_head_init(&hdev->rx_q);
3827         skb_queue_head_init(&hdev->cmd_q);
3828         skb_queue_head_init(&hdev->raw_q);
3829
3830         init_waitqueue_head(&hdev->req_wait_q);
3831         init_waitqueue_head(&hdev->suspend_wait_q);
3832
3833         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3834
3835         hci_request_setup(hdev);
3836
3837         hci_init_sysfs(hdev);
3838         discovery_init(hdev);
3839
3840         return hdev;
3841 }
3842 EXPORT_SYMBOL(hci_alloc_dev);
3843
3844 /* Free HCI device */
3845 void hci_free_dev(struct hci_dev *hdev)
3846 {
3847         /* will free via device release */
3848         put_device(&hdev->dev);
3849 }
3850 EXPORT_SYMBOL(hci_free_dev);
3851
3852 /* Register HCI device */
3853 int hci_register_dev(struct hci_dev *hdev)
3854 {
3855         int id, error;
3856
3857         if (!hdev->open || !hdev->close || !hdev->send)
3858                 return -EINVAL;
3859
3860         /* Do not allow HCI_AMP devices to register at index 0,
3861          * so the index can be used as the AMP controller ID.
3862          */
3863         switch (hdev->dev_type) {
3864         case HCI_PRIMARY:
3865                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3866                 break;
3867         case HCI_AMP:
3868                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3869                 break;
3870         default:
3871                 return -EINVAL;
3872         }
3873
3874         if (id < 0)
3875                 return id;
3876
3877         sprintf(hdev->name, "hci%d", id);
3878         hdev->id = id;
3879
3880         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3881
3882         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3883         if (!hdev->workqueue) {
3884                 error = -ENOMEM;
3885                 goto err;
3886         }
3887
3888         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3889                                                       hdev->name);
3890         if (!hdev->req_workqueue) {
3891                 destroy_workqueue(hdev->workqueue);
3892                 error = -ENOMEM;
3893                 goto err;
3894         }
3895
3896         if (!IS_ERR_OR_NULL(bt_debugfs))
3897                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3898
3899         dev_set_name(&hdev->dev, "%s", hdev->name);
3900
3901         error = device_add(&hdev->dev);
3902         if (error < 0)
3903                 goto err_wqueue;
3904
3905         hci_leds_init(hdev);
3906
3907         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3908                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3909                                     hdev);
3910         if (hdev->rfkill) {
3911                 if (rfkill_register(hdev->rfkill) < 0) {
3912                         rfkill_destroy(hdev->rfkill);
3913                         hdev->rfkill = NULL;
3914                 }
3915         }
3916
3917         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3918                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3919
3920         hci_dev_set_flag(hdev, HCI_SETUP);
3921         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3922
3923         if (hdev->dev_type == HCI_PRIMARY) {
3924                 /* Assume BR/EDR support until proven otherwise (such as
3925                  * through reading supported features during init.
3926                  */
3927                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3928         }
3929
3930         write_lock(&hci_dev_list_lock);
3931         list_add(&hdev->list, &hci_dev_list);
3932         write_unlock(&hci_dev_list_lock);
3933
3934         /* Devices that are marked for raw-only usage are unconfigured
3935          * and should not be included in normal operation.
3936          */
3937         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3938                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3939
3940         hci_sock_dev_event(hdev, HCI_DEV_REG);
3941         hci_dev_hold(hdev);
3942
3943         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3944                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3945                 error = register_pm_notifier(&hdev->suspend_notifier);
3946                 if (error)
3947                         goto err_wqueue;
3948         }
3949
3950         queue_work(hdev->req_workqueue, &hdev->power_on);
3951
3952         idr_init(&hdev->adv_monitors_idr);
3953
3954         return id;
3955
3956 err_wqueue:
3957         destroy_workqueue(hdev->workqueue);
3958         destroy_workqueue(hdev->req_workqueue);
3959 err:
3960         ida_simple_remove(&hci_index_ida, hdev->id);
3961
3962         return error;
3963 }
3964 EXPORT_SYMBOL(hci_register_dev);
3965
3966 /* Unregister HCI device */
3967 void hci_unregister_dev(struct hci_dev *hdev)
3968 {
3969         int id;
3970
3971         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3972
3973         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3974
3975         id = hdev->id;
3976
3977         write_lock(&hci_dev_list_lock);
3978         list_del(&hdev->list);
3979         write_unlock(&hci_dev_list_lock);
3980
3981         cancel_work_sync(&hdev->power_on);
3982
3983         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3984                 hci_suspend_clear_tasks(hdev);
3985                 unregister_pm_notifier(&hdev->suspend_notifier);
3986                 cancel_work_sync(&hdev->suspend_prepare);
3987         }
3988
3989         hci_dev_do_close(hdev);
3990
3991         if (!test_bit(HCI_INIT, &hdev->flags) &&
3992             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3993             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3994                 hci_dev_lock(hdev);
3995                 mgmt_index_removed(hdev);
3996                 hci_dev_unlock(hdev);
3997         }
3998
3999         /* mgmt_index_removed should take care of emptying the
4000          * pending list */
4001         BUG_ON(!list_empty(&hdev->mgmt_pending));
4002
4003         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4004
4005         if (hdev->rfkill) {
4006                 rfkill_unregister(hdev->rfkill);
4007                 rfkill_destroy(hdev->rfkill);
4008         }
4009
4010         device_del(&hdev->dev);
4011
4012         debugfs_remove_recursive(hdev->debugfs);
4013         kfree_const(hdev->hw_info);
4014         kfree_const(hdev->fw_info);
4015
4016         destroy_workqueue(hdev->workqueue);
4017         destroy_workqueue(hdev->req_workqueue);
4018
4019         hci_dev_lock(hdev);
4020         hci_bdaddr_list_clear(&hdev->blacklist);
4021         hci_bdaddr_list_clear(&hdev->whitelist);
4022         hci_uuids_clear(hdev);
4023         hci_link_keys_clear(hdev);
4024         hci_smp_ltks_clear(hdev);
4025         hci_smp_irks_clear(hdev);
4026         hci_remote_oob_data_clear(hdev);
4027         hci_adv_instances_clear(hdev);
4028         hci_adv_monitors_clear(hdev);
4029         hci_bdaddr_list_clear(&hdev->le_white_list);
4030         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4031         hci_conn_params_clear_all(hdev);
4032         hci_discovery_filter_clear(hdev);
4033         hci_blocked_keys_clear(hdev);
4034         hci_dev_unlock(hdev);
4035
4036         hci_dev_put(hdev);
4037
4038         ida_simple_remove(&hci_index_ida, id);
4039 }
4040 EXPORT_SYMBOL(hci_unregister_dev);
4041
4042 /* Suspend HCI device */
4043 int hci_suspend_dev(struct hci_dev *hdev)
4044 {
4045         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4046         return 0;
4047 }
4048 EXPORT_SYMBOL(hci_suspend_dev);
4049
4050 /* Resume HCI device */
4051 int hci_resume_dev(struct hci_dev *hdev)
4052 {
4053         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4054         return 0;
4055 }
4056 EXPORT_SYMBOL(hci_resume_dev);
4057
4058 /* Reset HCI device */
4059 int hci_reset_dev(struct hci_dev *hdev)
4060 {
4061         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4062         struct sk_buff *skb;
4063
4064         skb = bt_skb_alloc(3, GFP_ATOMIC);
4065         if (!skb)
4066                 return -ENOMEM;
4067
4068         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4069         skb_put_data(skb, hw_err, 3);
4070
4071         /* Send Hardware Error to upper stack */
4072         return hci_recv_frame(hdev, skb);
4073 }
4074 EXPORT_SYMBOL(hci_reset_dev);
4075
4076 /* Receive frame from HCI drivers */
4077 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4078 {
4079         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4080                       && !test_bit(HCI_INIT, &hdev->flags))) {
4081                 kfree_skb(skb);
4082                 return -ENXIO;
4083         }
4084
4085         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4086             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4087             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4088             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4089                 kfree_skb(skb);
4090                 return -EINVAL;
4091         }
4092
4093         /* Incoming skb */
4094         bt_cb(skb)->incoming = 1;
4095
4096         /* Time stamp */
4097         __net_timestamp(skb);
4098
4099         skb_queue_tail(&hdev->rx_q, skb);
4100         queue_work(hdev->workqueue, &hdev->rx_work);
4101
4102         return 0;
4103 }
4104 EXPORT_SYMBOL(hci_recv_frame);
4105
4106 /* Receive diagnostic message from HCI drivers */
4107 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4108 {
4109         /* Mark as diagnostic packet */
4110         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4111
4112         /* Time stamp */
4113         __net_timestamp(skb);
4114
4115         skb_queue_tail(&hdev->rx_q, skb);
4116         queue_work(hdev->workqueue, &hdev->rx_work);
4117
4118         return 0;
4119 }
4120 EXPORT_SYMBOL(hci_recv_diag);
4121
4122 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4123 {
4124         va_list vargs;
4125
4126         va_start(vargs, fmt);
4127         kfree_const(hdev->hw_info);
4128         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4129         va_end(vargs);
4130 }
4131 EXPORT_SYMBOL(hci_set_hw_info);
4132
4133 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4134 {
4135         va_list vargs;
4136
4137         va_start(vargs, fmt);
4138         kfree_const(hdev->fw_info);
4139         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4140         va_end(vargs);
4141 }
4142 EXPORT_SYMBOL(hci_set_fw_info);
4143
4144 /* ---- Interface to upper protocols ---- */
4145
4146 int hci_register_cb(struct hci_cb *cb)
4147 {
4148         BT_DBG("%p name %s", cb, cb->name);
4149
4150         mutex_lock(&hci_cb_list_lock);
4151         list_add_tail(&cb->list, &hci_cb_list);
4152         mutex_unlock(&hci_cb_list_lock);
4153
4154         return 0;
4155 }
4156 EXPORT_SYMBOL(hci_register_cb);
4157
4158 int hci_unregister_cb(struct hci_cb *cb)
4159 {
4160         BT_DBG("%p name %s", cb, cb->name);
4161
4162         mutex_lock(&hci_cb_list_lock);
4163         list_del(&cb->list);
4164         mutex_unlock(&hci_cb_list_lock);
4165
4166         return 0;
4167 }
4168 EXPORT_SYMBOL(hci_unregister_cb);
4169
4170 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4171 {
4172         int err;
4173
4174         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4175                skb->len);
4176
4177         /* Time stamp */
4178         __net_timestamp(skb);
4179
4180         /* Send copy to monitor */
4181         hci_send_to_monitor(hdev, skb);
4182
4183         if (atomic_read(&hdev->promisc)) {
4184                 /* Send copy to the sockets */
4185                 hci_send_to_sock(hdev, skb);
4186         }
4187
4188         /* Get rid of skb owner, prior to sending to the driver. */
4189         skb_orphan(skb);
4190
4191         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4192                 kfree_skb(skb);
4193                 return;
4194         }
4195
4196         err = hdev->send(hdev, skb);
4197         if (err < 0) {
4198                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4199                 kfree_skb(skb);
4200         }
4201 }
4202
4203 /* Send HCI command */
4204 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4205                  const void *param)
4206 {
4207         struct sk_buff *skb;
4208
4209         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4210
4211         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4212         if (!skb) {
4213                 bt_dev_err(hdev, "no memory for command");
4214                 return -ENOMEM;
4215         }
4216
4217         /* Stand-alone HCI commands must be flagged as
4218          * single-command requests.
4219          */
4220         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4221
4222         skb_queue_tail(&hdev->cmd_q, skb);
4223         queue_work(hdev->workqueue, &hdev->cmd_work);
4224
4225         return 0;
4226 }
4227
4228 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4229                    const void *param)
4230 {
4231         struct sk_buff *skb;
4232
4233         if (hci_opcode_ogf(opcode) != 0x3f) {
4234                 /* A controller receiving a command shall respond with either
4235                  * a Command Status Event or a Command Complete Event.
4236                  * Therefore, all standard HCI commands must be sent via the
4237                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4238                  * Some vendors do not comply with this rule for vendor-specific
4239                  * commands and do not return any event. We want to support
4240                  * unresponded commands for such cases only.
4241                  */
4242                 bt_dev_err(hdev, "unresponded command not supported");
4243                 return -EINVAL;
4244         }
4245
4246         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4247         if (!skb) {
4248                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4249                            opcode);
4250                 return -ENOMEM;
4251         }
4252
4253         hci_send_frame(hdev, skb);
4254
4255         return 0;
4256 }
4257 EXPORT_SYMBOL(__hci_cmd_send);
4258
4259 /* Get data from the previously sent command */
4260 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4261 {
4262         struct hci_command_hdr *hdr;
4263
4264         if (!hdev->sent_cmd)
4265                 return NULL;
4266
4267         hdr = (void *) hdev->sent_cmd->data;
4268
4269         if (hdr->opcode != cpu_to_le16(opcode))
4270                 return NULL;
4271
4272         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4273
4274         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4275 }
4276
4277 /* Send HCI command and wait for command commplete event */
4278 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4279                              const void *param, u32 timeout)
4280 {
4281         struct sk_buff *skb;
4282
4283         if (!test_bit(HCI_UP, &hdev->flags))
4284                 return ERR_PTR(-ENETDOWN);
4285
4286         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4287
4288         hci_req_sync_lock(hdev);
4289         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4290         hci_req_sync_unlock(hdev);
4291
4292         return skb;
4293 }
4294 EXPORT_SYMBOL(hci_cmd_sync);
4295
4296 /* Send ACL data */
4297 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4298 {
4299         struct hci_acl_hdr *hdr;
4300         int len = skb->len;
4301
4302         skb_push(skb, HCI_ACL_HDR_SIZE);
4303         skb_reset_transport_header(skb);
4304         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4305         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4306         hdr->dlen   = cpu_to_le16(len);
4307 }
4308
4309 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4310                           struct sk_buff *skb, __u16 flags)
4311 {
4312         struct hci_conn *conn = chan->conn;
4313         struct hci_dev *hdev = conn->hdev;
4314         struct sk_buff *list;
4315
4316         skb->len = skb_headlen(skb);
4317         skb->data_len = 0;
4318
4319         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4320
4321         switch (hdev->dev_type) {
4322         case HCI_PRIMARY:
4323                 hci_add_acl_hdr(skb, conn->handle, flags);
4324                 break;
4325         case HCI_AMP:
4326                 hci_add_acl_hdr(skb, chan->handle, flags);
4327                 break;
4328         default:
4329                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4330                 return;
4331         }
4332
4333         list = skb_shinfo(skb)->frag_list;
4334         if (!list) {
4335                 /* Non fragmented */
4336                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4337
4338                 skb_queue_tail(queue, skb);
4339         } else {
4340                 /* Fragmented */
4341                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4342
4343                 skb_shinfo(skb)->frag_list = NULL;
4344
4345                 /* Queue all fragments atomically. We need to use spin_lock_bh
4346                  * here because of 6LoWPAN links, as there this function is
4347                  * called from softirq and using normal spin lock could cause
4348                  * deadlocks.
4349                  */
4350                 spin_lock_bh(&queue->lock);
4351
4352                 __skb_queue_tail(queue, skb);
4353
4354                 flags &= ~ACL_START;
4355                 flags |= ACL_CONT;
4356                 do {
4357                         skb = list; list = list->next;
4358
4359                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4360                         hci_add_acl_hdr(skb, conn->handle, flags);
4361
4362                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4363
4364                         __skb_queue_tail(queue, skb);
4365                 } while (list);
4366
4367                 spin_unlock_bh(&queue->lock);
4368         }
4369 }
4370
4371 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4372 {
4373         struct hci_dev *hdev = chan->conn->hdev;
4374
4375         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4376
4377         hci_queue_acl(chan, &chan->data_q, skb, flags);
4378
4379         queue_work(hdev->workqueue, &hdev->tx_work);
4380 }
4381
4382 /* Send SCO data */
4383 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4384 {
4385         struct hci_dev *hdev = conn->hdev;
4386         struct hci_sco_hdr hdr;
4387
4388         BT_DBG("%s len %d", hdev->name, skb->len);
4389
4390         hdr.handle = cpu_to_le16(conn->handle);
4391         hdr.dlen   = skb->len;
4392
4393         skb_push(skb, HCI_SCO_HDR_SIZE);
4394         skb_reset_transport_header(skb);
4395         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4396
4397         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4398
4399         skb_queue_tail(&conn->data_q, skb);
4400         queue_work(hdev->workqueue, &hdev->tx_work);
4401 }
4402
4403 /* ---- HCI TX task (outgoing data) ---- */
4404
4405 /* HCI Connection scheduler */
4406 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4407                                      int *quote)
4408 {
4409         struct hci_conn_hash *h = &hdev->conn_hash;
4410         struct hci_conn *conn = NULL, *c;
4411         unsigned int num = 0, min = ~0;
4412
4413         /* We don't have to lock device here. Connections are always
4414          * added and removed with TX task disabled. */
4415
4416         rcu_read_lock();
4417
4418         list_for_each_entry_rcu(c, &h->list, list) {
4419                 if (c->type != type || skb_queue_empty(&c->data_q))
4420                         continue;
4421
4422                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4423                         continue;
4424
4425                 num++;
4426
4427                 if (c->sent < min) {
4428                         min  = c->sent;
4429                         conn = c;
4430                 }
4431
4432                 if (hci_conn_num(hdev, type) == num)
4433                         break;
4434         }
4435
4436         rcu_read_unlock();
4437
4438         if (conn) {
4439                 int cnt, q;
4440
4441                 switch (conn->type) {
4442                 case ACL_LINK:
4443                         cnt = hdev->acl_cnt;
4444                         break;
4445                 case SCO_LINK:
4446                 case ESCO_LINK:
4447                         cnt = hdev->sco_cnt;
4448                         break;
4449                 case LE_LINK:
4450                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4451                         break;
4452                 default:
4453                         cnt = 0;
4454                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4455                 }
4456
4457                 q = cnt / num;
4458                 *quote = q ? q : 1;
4459         } else
4460                 *quote = 0;
4461
4462         BT_DBG("conn %p quote %d", conn, *quote);
4463         return conn;
4464 }
4465
4466 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4467 {
4468         struct hci_conn_hash *h = &hdev->conn_hash;
4469         struct hci_conn *c;
4470
4471         bt_dev_err(hdev, "link tx timeout");
4472
4473         rcu_read_lock();
4474
4475         /* Kill stalled connections */
4476         list_for_each_entry_rcu(c, &h->list, list) {
4477                 if (c->type == type && c->sent) {
4478                         bt_dev_err(hdev, "killing stalled connection %pMR",
4479                                    &c->dst);
4480                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4481                 }
4482         }
4483
4484         rcu_read_unlock();
4485 }
4486
4487 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4488                                       int *quote)
4489 {
4490         struct hci_conn_hash *h = &hdev->conn_hash;
4491         struct hci_chan *chan = NULL;
4492         unsigned int num = 0, min = ~0, cur_prio = 0;
4493         struct hci_conn *conn;
4494         int cnt, q, conn_num = 0;
4495
4496         BT_DBG("%s", hdev->name);
4497
4498         rcu_read_lock();
4499
4500         list_for_each_entry_rcu(conn, &h->list, list) {
4501                 struct hci_chan *tmp;
4502
4503                 if (conn->type != type)
4504                         continue;
4505
4506                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4507                         continue;
4508
4509                 conn_num++;
4510
4511                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4512                         struct sk_buff *skb;
4513
4514                         if (skb_queue_empty(&tmp->data_q))
4515                                 continue;
4516
4517                         skb = skb_peek(&tmp->data_q);
4518                         if (skb->priority < cur_prio)
4519                                 continue;
4520
4521                         if (skb->priority > cur_prio) {
4522                                 num = 0;
4523                                 min = ~0;
4524                                 cur_prio = skb->priority;
4525                         }
4526
4527                         num++;
4528
4529                         if (conn->sent < min) {
4530                                 min  = conn->sent;
4531                                 chan = tmp;
4532                         }
4533                 }
4534
4535                 if (hci_conn_num(hdev, type) == conn_num)
4536                         break;
4537         }
4538
4539         rcu_read_unlock();
4540
4541         if (!chan)
4542                 return NULL;
4543
4544         switch (chan->conn->type) {
4545         case ACL_LINK:
4546                 cnt = hdev->acl_cnt;
4547                 break;
4548         case AMP_LINK:
4549                 cnt = hdev->block_cnt;
4550                 break;
4551         case SCO_LINK:
4552         case ESCO_LINK:
4553                 cnt = hdev->sco_cnt;
4554                 break;
4555         case LE_LINK:
4556                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4557                 break;
4558         default:
4559                 cnt = 0;
4560                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4561         }
4562
4563         q = cnt / num;
4564         *quote = q ? q : 1;
4565         BT_DBG("chan %p quote %d", chan, *quote);
4566         return chan;
4567 }
4568
4569 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4570 {
4571         struct hci_conn_hash *h = &hdev->conn_hash;
4572         struct hci_conn *conn;
4573         int num = 0;
4574
4575         BT_DBG("%s", hdev->name);
4576
4577         rcu_read_lock();
4578
4579         list_for_each_entry_rcu(conn, &h->list, list) {
4580                 struct hci_chan *chan;
4581
4582                 if (conn->type != type)
4583                         continue;
4584
4585                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4586                         continue;
4587
4588                 num++;
4589
4590                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4591                         struct sk_buff *skb;
4592
4593                         if (chan->sent) {
4594                                 chan->sent = 0;
4595                                 continue;
4596                         }
4597
4598                         if (skb_queue_empty(&chan->data_q))
4599                                 continue;
4600
4601                         skb = skb_peek(&chan->data_q);
4602                         if (skb->priority >= HCI_PRIO_MAX - 1)
4603                                 continue;
4604
4605                         skb->priority = HCI_PRIO_MAX - 1;
4606
4607                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4608                                skb->priority);
4609                 }
4610
4611                 if (hci_conn_num(hdev, type) == num)
4612                         break;
4613         }
4614
4615         rcu_read_unlock();
4616
4617 }
4618
4619 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4620 {
4621         /* Calculate count of blocks used by this packet */
4622         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4623 }
4624
4625 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4626 {
4627         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4628                 /* ACL tx timeout must be longer than maximum
4629                  * link supervision timeout (40.9 seconds) */
4630                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4631                                        HCI_ACL_TX_TIMEOUT))
4632                         hci_link_tx_to(hdev, ACL_LINK);
4633         }
4634 }
4635
4636 /* Schedule SCO */
4637 static void hci_sched_sco(struct hci_dev *hdev)
4638 {
4639         struct hci_conn *conn;
4640         struct sk_buff *skb;
4641         int quote;
4642
4643         BT_DBG("%s", hdev->name);
4644
4645         if (!hci_conn_num(hdev, SCO_LINK))
4646                 return;
4647
4648         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4649                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4650                         BT_DBG("skb %p len %d", skb, skb->len);
4651                         hci_send_frame(hdev, skb);
4652
4653                         conn->sent++;
4654                         if (conn->sent == ~0)
4655                                 conn->sent = 0;
4656                 }
4657         }
4658 }
4659
4660 static void hci_sched_esco(struct hci_dev *hdev)
4661 {
4662         struct hci_conn *conn;
4663         struct sk_buff *skb;
4664         int quote;
4665
4666         BT_DBG("%s", hdev->name);
4667
4668         if (!hci_conn_num(hdev, ESCO_LINK))
4669                 return;
4670
4671         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4672                                                      &quote))) {
4673                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4674                         BT_DBG("skb %p len %d", skb, skb->len);
4675                         hci_send_frame(hdev, skb);
4676
4677                         conn->sent++;
4678                         if (conn->sent == ~0)
4679                                 conn->sent = 0;
4680                 }
4681         }
4682 }
4683
4684 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4685 {
4686         unsigned int cnt = hdev->acl_cnt;
4687         struct hci_chan *chan;
4688         struct sk_buff *skb;
4689         int quote;
4690
4691         __check_timeout(hdev, cnt);
4692
4693         while (hdev->acl_cnt &&
4694                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4695                 u32 priority = (skb_peek(&chan->data_q))->priority;
4696                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4697                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4698                                skb->len, skb->priority);
4699
4700                         /* Stop if priority has changed */
4701                         if (skb->priority < priority)
4702                                 break;
4703
4704                         skb = skb_dequeue(&chan->data_q);
4705
4706                         hci_conn_enter_active_mode(chan->conn,
4707                                                    bt_cb(skb)->force_active);
4708
4709                         hci_send_frame(hdev, skb);
4710                         hdev->acl_last_tx = jiffies;
4711
4712                         hdev->acl_cnt--;
4713                         chan->sent++;
4714                         chan->conn->sent++;
4715
4716                         /* Send pending SCO packets right away */
4717                         hci_sched_sco(hdev);
4718                         hci_sched_esco(hdev);
4719                 }
4720         }
4721
4722         if (cnt != hdev->acl_cnt)
4723                 hci_prio_recalculate(hdev, ACL_LINK);
4724 }
4725
4726 static void hci_sched_acl_blk(struct hci_dev *hdev)
4727 {
4728         unsigned int cnt = hdev->block_cnt;
4729         struct hci_chan *chan;
4730         struct sk_buff *skb;
4731         int quote;
4732         u8 type;
4733
4734         __check_timeout(hdev, cnt);
4735
4736         BT_DBG("%s", hdev->name);
4737
4738         if (hdev->dev_type == HCI_AMP)
4739                 type = AMP_LINK;
4740         else
4741                 type = ACL_LINK;
4742
4743         while (hdev->block_cnt > 0 &&
4744                (chan = hci_chan_sent(hdev, type, &quote))) {
4745                 u32 priority = (skb_peek(&chan->data_q))->priority;
4746                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4747                         int blocks;
4748
4749                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4750                                skb->len, skb->priority);
4751
4752                         /* Stop if priority has changed */
4753                         if (skb->priority < priority)
4754                                 break;
4755
4756                         skb = skb_dequeue(&chan->data_q);
4757
4758                         blocks = __get_blocks(hdev, skb);
4759                         if (blocks > hdev->block_cnt)
4760                                 return;
4761
4762                         hci_conn_enter_active_mode(chan->conn,
4763                                                    bt_cb(skb)->force_active);
4764
4765                         hci_send_frame(hdev, skb);
4766                         hdev->acl_last_tx = jiffies;
4767
4768                         hdev->block_cnt -= blocks;
4769                         quote -= blocks;
4770
4771                         chan->sent += blocks;
4772                         chan->conn->sent += blocks;
4773                 }
4774         }
4775
4776         if (cnt != hdev->block_cnt)
4777                 hci_prio_recalculate(hdev, type);
4778 }
4779
4780 static void hci_sched_acl(struct hci_dev *hdev)
4781 {
4782         BT_DBG("%s", hdev->name);
4783
4784         /* No ACL link over BR/EDR controller */
4785         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4786                 return;
4787
4788         /* No AMP link over AMP controller */
4789         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4790                 return;
4791
4792         switch (hdev->flow_ctl_mode) {
4793         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4794                 hci_sched_acl_pkt(hdev);
4795                 break;
4796
4797         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4798                 hci_sched_acl_blk(hdev);
4799                 break;
4800         }
4801 }
4802
4803 static void hci_sched_le(struct hci_dev *hdev)
4804 {
4805         struct hci_chan *chan;
4806         struct sk_buff *skb;
4807         int quote, cnt, tmp;
4808
4809         BT_DBG("%s", hdev->name);
4810
4811         if (!hci_conn_num(hdev, LE_LINK))
4812                 return;
4813
4814         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4815
4816         __check_timeout(hdev, cnt);
4817
4818         tmp = cnt;
4819         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4820                 u32 priority = (skb_peek(&chan->data_q))->priority;
4821                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4822                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4823                                skb->len, skb->priority);
4824
4825                         /* Stop if priority has changed */
4826                         if (skb->priority < priority)
4827                                 break;
4828
4829                         skb = skb_dequeue(&chan->data_q);
4830
4831                         hci_send_frame(hdev, skb);
4832                         hdev->le_last_tx = jiffies;
4833
4834                         cnt--;
4835                         chan->sent++;
4836                         chan->conn->sent++;
4837
4838                         /* Send pending SCO packets right away */
4839                         hci_sched_sco(hdev);
4840                         hci_sched_esco(hdev);
4841                 }
4842         }
4843
4844         if (hdev->le_pkts)
4845                 hdev->le_cnt = cnt;
4846         else
4847                 hdev->acl_cnt = cnt;
4848
4849         if (cnt != tmp)
4850                 hci_prio_recalculate(hdev, LE_LINK);
4851 }
4852
4853 static void hci_tx_work(struct work_struct *work)
4854 {
4855         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4856         struct sk_buff *skb;
4857
4858         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4859                hdev->sco_cnt, hdev->le_cnt);
4860
4861         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4862                 /* Schedule queues and send stuff to HCI driver */
4863                 hci_sched_sco(hdev);
4864                 hci_sched_esco(hdev);
4865                 hci_sched_acl(hdev);
4866                 hci_sched_le(hdev);
4867         }
4868
4869         /* Send next queued raw (unknown type) packet */
4870         while ((skb = skb_dequeue(&hdev->raw_q)))
4871                 hci_send_frame(hdev, skb);
4872 }
4873
4874 /* ----- HCI RX task (incoming data processing) ----- */
4875
4876 /* ACL data packet */
4877 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4878 {
4879         struct hci_acl_hdr *hdr = (void *) skb->data;
4880         struct hci_conn *conn;
4881         __u16 handle, flags;
4882
4883         skb_pull(skb, HCI_ACL_HDR_SIZE);
4884
4885         handle = __le16_to_cpu(hdr->handle);
4886         flags  = hci_flags(handle);
4887         handle = hci_handle(handle);
4888
4889         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4890                handle, flags);
4891
4892         hdev->stat.acl_rx++;
4893
4894         hci_dev_lock(hdev);
4895         conn = hci_conn_hash_lookup_handle(hdev, handle);
4896         hci_dev_unlock(hdev);
4897
4898         if (conn) {
4899                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4900
4901                 /* Send to upper protocol */
4902                 l2cap_recv_acldata(conn, skb, flags);
4903                 return;
4904         } else {
4905                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4906                            handle);
4907         }
4908
4909         kfree_skb(skb);
4910 }
4911
4912 /* SCO data packet */
4913 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4914 {
4915         struct hci_sco_hdr *hdr = (void *) skb->data;
4916         struct hci_conn *conn;
4917         __u16 handle, flags;
4918
4919         skb_pull(skb, HCI_SCO_HDR_SIZE);
4920
4921         handle = __le16_to_cpu(hdr->handle);
4922         flags  = hci_flags(handle);
4923         handle = hci_handle(handle);
4924
4925         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4926                handle, flags);
4927
4928         hdev->stat.sco_rx++;
4929
4930         hci_dev_lock(hdev);
4931         conn = hci_conn_hash_lookup_handle(hdev, handle);
4932         hci_dev_unlock(hdev);
4933
4934         if (conn) {
4935                 /* Send to upper protocol */
4936                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4937                 sco_recv_scodata(conn, skb);
4938                 return;
4939         } else {
4940                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4941                            handle);
4942         }
4943
4944         kfree_skb(skb);
4945 }
4946
4947 static bool hci_req_is_complete(struct hci_dev *hdev)
4948 {
4949         struct sk_buff *skb;
4950
4951         skb = skb_peek(&hdev->cmd_q);
4952         if (!skb)
4953                 return true;
4954
4955         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4956 }
4957
4958 static void hci_resend_last(struct hci_dev *hdev)
4959 {
4960         struct hci_command_hdr *sent;
4961         struct sk_buff *skb;
4962         u16 opcode;
4963
4964         if (!hdev->sent_cmd)
4965                 return;
4966
4967         sent = (void *) hdev->sent_cmd->data;
4968         opcode = __le16_to_cpu(sent->opcode);
4969         if (opcode == HCI_OP_RESET)
4970                 return;
4971
4972         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4973         if (!skb)
4974                 return;
4975
4976         skb_queue_head(&hdev->cmd_q, skb);
4977         queue_work(hdev->workqueue, &hdev->cmd_work);
4978 }
4979
4980 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4981                           hci_req_complete_t *req_complete,
4982                           hci_req_complete_skb_t *req_complete_skb)
4983 {
4984         struct sk_buff *skb;
4985         unsigned long flags;
4986
4987         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4988
4989         /* If the completed command doesn't match the last one that was
4990          * sent we need to do special handling of it.
4991          */
4992         if (!hci_sent_cmd_data(hdev, opcode)) {
4993                 /* Some CSR based controllers generate a spontaneous
4994                  * reset complete event during init and any pending
4995                  * command will never be completed. In such a case we
4996                  * need to resend whatever was the last sent
4997                  * command.
4998                  */
4999                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5000                         hci_resend_last(hdev);
5001
5002                 return;
5003         }
5004
5005         /* If we reach this point this event matches the last command sent */
5006         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5007
5008         /* If the command succeeded and there's still more commands in
5009          * this request the request is not yet complete.
5010          */
5011         if (!status && !hci_req_is_complete(hdev))
5012                 return;
5013
5014         /* If this was the last command in a request the complete
5015          * callback would be found in hdev->sent_cmd instead of the
5016          * command queue (hdev->cmd_q).
5017          */
5018         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5019                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5020                 return;
5021         }
5022
5023         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5024                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5025                 return;
5026         }
5027
5028         /* Remove all pending commands belonging to this request */
5029         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5030         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5031                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5032                         __skb_queue_head(&hdev->cmd_q, skb);
5033                         break;
5034                 }
5035
5036                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5037                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5038                 else
5039                         *req_complete = bt_cb(skb)->hci.req_complete;
5040                 kfree_skb(skb);
5041         }
5042         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5043 }
5044
5045 static void hci_rx_work(struct work_struct *work)
5046 {
5047         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5048         struct sk_buff *skb;
5049
5050         BT_DBG("%s", hdev->name);
5051
5052         while ((skb = skb_dequeue(&hdev->rx_q))) {
5053                 /* Send copy to monitor */
5054                 hci_send_to_monitor(hdev, skb);
5055
5056                 if (atomic_read(&hdev->promisc)) {
5057                         /* Send copy to the sockets */
5058                         hci_send_to_sock(hdev, skb);
5059                 }
5060
5061                 /* If the device has been opened in HCI_USER_CHANNEL,
5062                  * the userspace has exclusive access to device.
5063                  * When device is HCI_INIT, we still need to process
5064                  * the data packets to the driver in order
5065                  * to complete its setup().
5066                  */
5067                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5068                     !test_bit(HCI_INIT, &hdev->flags)) {
5069                         kfree_skb(skb);
5070                         continue;
5071                 }
5072
5073                 if (test_bit(HCI_INIT, &hdev->flags)) {
5074                         /* Don't process data packets in this states. */
5075                         switch (hci_skb_pkt_type(skb)) {
5076                         case HCI_ACLDATA_PKT:
5077                         case HCI_SCODATA_PKT:
5078                         case HCI_ISODATA_PKT:
5079                                 kfree_skb(skb);
5080                                 continue;
5081                         }
5082                 }
5083
5084                 /* Process frame */
5085                 switch (hci_skb_pkt_type(skb)) {
5086                 case HCI_EVENT_PKT:
5087                         BT_DBG("%s Event packet", hdev->name);
5088                         hci_event_packet(hdev, skb);
5089                         break;
5090
5091                 case HCI_ACLDATA_PKT:
5092                         BT_DBG("%s ACL data packet", hdev->name);
5093                         hci_acldata_packet(hdev, skb);
5094                         break;
5095
5096                 case HCI_SCODATA_PKT:
5097                         BT_DBG("%s SCO data packet", hdev->name);
5098                         hci_scodata_packet(hdev, skb);
5099                         break;
5100
5101                 default:
5102                         kfree_skb(skb);
5103                         break;
5104                 }
5105         }
5106 }
5107
5108 static void hci_cmd_work(struct work_struct *work)
5109 {
5110         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5111         struct sk_buff *skb;
5112
5113         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5114                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5115
5116         /* Send queued commands */
5117         if (atomic_read(&hdev->cmd_cnt)) {
5118                 skb = skb_dequeue(&hdev->cmd_q);
5119                 if (!skb)
5120                         return;
5121
5122                 kfree_skb(hdev->sent_cmd);
5123
5124                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5125                 if (hdev->sent_cmd) {
5126                         if (hci_req_status_pend(hdev))
5127                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5128                         atomic_dec(&hdev->cmd_cnt);
5129                         hci_send_frame(hdev, skb);
5130                         if (test_bit(HCI_RESET, &hdev->flags))
5131                                 cancel_delayed_work(&hdev->cmd_timer);
5132                         else
5133                                 schedule_delayed_work(&hdev->cmd_timer,
5134                                                       HCI_CMD_TIMEOUT);
5135                 } else {
5136                         skb_queue_head(&hdev->cmd_q, skb);
5137                         queue_work(hdev->workqueue, &hdev->cmd_work);
5138                 }
5139         }
5140 }