Merge tag 'for-5.13-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47 #include "aosp.h"
48
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
52
53 /* HCI device list */
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
56
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
60
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67                              size_t count, loff_t *ppos)
68 {
69         struct hci_dev *hdev = file->private_data;
70         char buf[3];
71
72         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
73         buf[1] = '\n';
74         buf[2] = '\0';
75         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79                               size_t count, loff_t *ppos)
80 {
81         struct hci_dev *hdev = file->private_data;
82         struct sk_buff *skb;
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         err = kstrtobool_from_user(user_buf, count, &enable);
90         if (err)
91                 return err;
92
93         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94                 return -EALREADY;
95
96         hci_req_sync_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_sync_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         kfree_skb(skb);
109
110         hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112         return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116         .open           = simple_open,
117         .read           = dut_mode_read,
118         .write          = dut_mode_write,
119         .llseek         = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123                                 size_t count, loff_t *ppos)
124 {
125         struct hci_dev *hdev = file->private_data;
126         char buf[3];
127
128         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129         buf[1] = '\n';
130         buf[2] = '\0';
131         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135                                  size_t count, loff_t *ppos)
136 {
137         struct hci_dev *hdev = file->private_data;
138         bool enable;
139         int err;
140
141         err = kstrtobool_from_user(user_buf, count, &enable);
142         if (err)
143                 return err;
144
145         /* When the diagnostic flags are not persistent and the transport
146          * is not active or in user channel operation, then there is no need
147          * for the vendor callback. Instead just store the desired value and
148          * the setting will be programmed when the controller gets powered on.
149          */
150         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151             (!test_bit(HCI_RUNNING, &hdev->flags) ||
152              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
153                 goto done;
154
155         hci_req_sync_lock(hdev);
156         err = hdev->set_diag(hdev, enable);
157         hci_req_sync_unlock(hdev);
158
159         if (err < 0)
160                 return err;
161
162 done:
163         if (enable)
164                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
165         else
166                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
167
168         return count;
169 }
170
171 static const struct file_operations vendor_diag_fops = {
172         .open           = simple_open,
173         .read           = vendor_diag_read,
174         .write          = vendor_diag_write,
175         .llseek         = default_llseek,
176 };
177
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
179 {
180         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
181                             &dut_mode_fops);
182
183         if (hdev->set_diag)
184                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
185                                     &vendor_diag_fops);
186 }
187
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
189 {
190         BT_DBG("%s %ld", req->hdev->name, opt);
191
192         /* Reset device */
193         set_bit(HCI_RESET, &req->hdev->flags);
194         hci_req_add(req, HCI_OP_RESET, 0, NULL);
195         return 0;
196 }
197
198 static void bredr_init(struct hci_request *req)
199 {
200         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Read Local Supported Features */
203         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
204
205         /* Read Local Version */
206         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read BD Address */
209         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
210 }
211
212 static void amp_init1(struct hci_request *req)
213 {
214         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
215
216         /* Read Local Version */
217         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218
219         /* Read Local Supported Commands */
220         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
221
222         /* Read Local AMP Info */
223         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
224
225         /* Read Data Blk size */
226         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
227
228         /* Read Flow Control Mode */
229         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
230
231         /* Read Location Data */
232         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
233 }
234
235 static int amp_init2(struct hci_request *req)
236 {
237         /* Read Local Supported Features. Not all AMP controllers
238          * support this so it's placed conditionally in the second
239          * stage init.
240          */
241         if (req->hdev->commands[14] & 0x20)
242                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
243
244         return 0;
245 }
246
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
248 {
249         struct hci_dev *hdev = req->hdev;
250
251         BT_DBG("%s %ld", hdev->name, opt);
252
253         /* Reset */
254         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255                 hci_reset_req(req, 0);
256
257         switch (hdev->dev_type) {
258         case HCI_PRIMARY:
259                 bredr_init(req);
260                 break;
261         case HCI_AMP:
262                 amp_init1(req);
263                 break;
264         default:
265                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
266                 break;
267         }
268
269         return 0;
270 }
271
272 static void bredr_setup(struct hci_request *req)
273 {
274         __le16 param;
275         __u8 flt_type;
276
277         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read Class of Device */
281         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
282
283         /* Read Local Name */
284         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
285
286         /* Read Voice Setting */
287         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
288
289         /* Read Number of Supported IAC */
290         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
291
292         /* Read Current IAC LAP */
293         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
294
295         /* Clear Event Filters */
296         flt_type = HCI_FLT_CLEAR_ALL;
297         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
298
299         /* Connection accept timeout ~20 secs */
300         param = cpu_to_le16(0x7d00);
301         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
302 }
303
304 static void le_setup(struct hci_request *req)
305 {
306         struct hci_dev *hdev = req->hdev;
307
308         /* Read LE Buffer Size */
309         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
310
311         /* Read LE Local Supported Features */
312         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
313
314         /* Read LE Supported States */
315         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
316
317         /* LE-only controllers have LE implicitly enabled */
318         if (!lmp_bredr_capable(hdev))
319                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
320 }
321
322 static void hci_setup_event_mask(struct hci_request *req)
323 {
324         struct hci_dev *hdev = req->hdev;
325
326         /* The second byte is 0xff instead of 0x9f (two reserved bits
327          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
328          * command otherwise.
329          */
330         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
331
332         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333          * any event mask for pre 1.2 devices.
334          */
335         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
336                 return;
337
338         if (lmp_bredr_capable(hdev)) {
339                 events[4] |= 0x01; /* Flow Specification Complete */
340         } else {
341                 /* Use a different default for LE-only devices */
342                 memset(events, 0, sizeof(events));
343                 events[1] |= 0x20; /* Command Complete */
344                 events[1] |= 0x40; /* Command Status */
345                 events[1] |= 0x80; /* Hardware Error */
346
347                 /* If the controller supports the Disconnect command, enable
348                  * the corresponding event. In addition enable packet flow
349                  * control related events.
350                  */
351                 if (hdev->commands[0] & 0x20) {
352                         events[0] |= 0x10; /* Disconnection Complete */
353                         events[2] |= 0x04; /* Number of Completed Packets */
354                         events[3] |= 0x02; /* Data Buffer Overflow */
355                 }
356
357                 /* If the controller supports the Read Remote Version
358                  * Information command, enable the corresponding event.
359                  */
360                 if (hdev->commands[2] & 0x80)
361                         events[1] |= 0x08; /* Read Remote Version Information
362                                             * Complete
363                                             */
364
365                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366                         events[0] |= 0x80; /* Encryption Change */
367                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
368                 }
369         }
370
371         if (lmp_inq_rssi_capable(hdev) ||
372             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373                 events[4] |= 0x02; /* Inquiry Result with RSSI */
374
375         if (lmp_ext_feat_capable(hdev))
376                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
377
378         if (lmp_esco_capable(hdev)) {
379                 events[5] |= 0x08; /* Synchronous Connection Complete */
380                 events[5] |= 0x10; /* Synchronous Connection Changed */
381         }
382
383         if (lmp_sniffsubr_capable(hdev))
384                 events[5] |= 0x20; /* Sniff Subrating */
385
386         if (lmp_pause_enc_capable(hdev))
387                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
388
389         if (lmp_ext_inq_capable(hdev))
390                 events[5] |= 0x40; /* Extended Inquiry Result */
391
392         if (lmp_no_flush_capable(hdev))
393                 events[7] |= 0x01; /* Enhanced Flush Complete */
394
395         if (lmp_lsto_capable(hdev))
396                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
397
398         if (lmp_ssp_capable(hdev)) {
399                 events[6] |= 0x01;      /* IO Capability Request */
400                 events[6] |= 0x02;      /* IO Capability Response */
401                 events[6] |= 0x04;      /* User Confirmation Request */
402                 events[6] |= 0x08;      /* User Passkey Request */
403                 events[6] |= 0x10;      /* Remote OOB Data Request */
404                 events[6] |= 0x20;      /* Simple Pairing Complete */
405                 events[7] |= 0x04;      /* User Passkey Notification */
406                 events[7] |= 0x08;      /* Keypress Notification */
407                 events[7] |= 0x10;      /* Remote Host Supported
408                                          * Features Notification
409                                          */
410         }
411
412         if (lmp_le_capable(hdev))
413                 events[7] |= 0x20;      /* LE Meta-Event */
414
415         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
416 }
417
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
419 {
420         struct hci_dev *hdev = req->hdev;
421
422         if (hdev->dev_type == HCI_AMP)
423                 return amp_init2(req);
424
425         if (lmp_bredr_capable(hdev))
426                 bredr_setup(req);
427         else
428                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
429
430         if (lmp_le_capable(hdev))
431                 le_setup(req);
432
433         /* All Bluetooth 1.2 and later controllers should support the
434          * HCI command for reading the local supported commands.
435          *
436          * Unfortunately some controllers indicate Bluetooth 1.2 support,
437          * but do not have support for this command. If that is the case,
438          * the driver can quirk the behavior and skip reading the local
439          * supported commands.
440          */
441         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
444
445         if (lmp_ssp_capable(hdev)) {
446                 /* When SSP is available, then the host features page
447                  * should also be available as well. However some
448                  * controllers list the max_page as 0 as long as SSP
449                  * has not been enabled. To achieve proper debugging
450                  * output, force the minimum max_page to 1 at least.
451                  */
452                 hdev->max_page = 0x01;
453
454                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
455                         u8 mode = 0x01;
456
457                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458                                     sizeof(mode), &mode);
459                 } else {
460                         struct hci_cp_write_eir cp;
461
462                         memset(hdev->eir, 0, sizeof(hdev->eir));
463                         memset(&cp, 0, sizeof(cp));
464
465                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
466                 }
467         }
468
469         if (lmp_inq_rssi_capable(hdev) ||
470             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
471                 u8 mode;
472
473                 /* If Extended Inquiry Result events are supported, then
474                  * they are clearly preferred over Inquiry Result with RSSI
475                  * events.
476                  */
477                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
478
479                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
480         }
481
482         if (lmp_inq_tx_pwr_capable(hdev))
483                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
484
485         if (lmp_ext_feat_capable(hdev)) {
486                 struct hci_cp_read_local_ext_features cp;
487
488                 cp.page = 0x01;
489                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
490                             sizeof(cp), &cp);
491         }
492
493         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
494                 u8 enable = 1;
495                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
496                             &enable);
497         }
498
499         return 0;
500 }
501
502 static void hci_setup_link_policy(struct hci_request *req)
503 {
504         struct hci_dev *hdev = req->hdev;
505         struct hci_cp_write_def_link_policy cp;
506         u16 link_policy = 0;
507
508         if (lmp_rswitch_capable(hdev))
509                 link_policy |= HCI_LP_RSWITCH;
510         if (lmp_hold_capable(hdev))
511                 link_policy |= HCI_LP_HOLD;
512         if (lmp_sniff_capable(hdev))
513                 link_policy |= HCI_LP_SNIFF;
514         if (lmp_park_capable(hdev))
515                 link_policy |= HCI_LP_PARK;
516
517         cp.policy = cpu_to_le16(link_policy);
518         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
519 }
520
521 static void hci_set_le_support(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_cp_write_le_host_supported cp;
525
526         /* LE-only devices do not support explicit enablement */
527         if (!lmp_bredr_capable(hdev))
528                 return;
529
530         memset(&cp, 0, sizeof(cp));
531
532         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
533                 cp.le = 0x01;
534                 cp.simul = 0x00;
535         }
536
537         if (cp.le != lmp_host_le_capable(hdev))
538                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
539                             &cp);
540 }
541
542 static void hci_set_event_mask_page_2(struct hci_request *req)
543 {
544         struct hci_dev *hdev = req->hdev;
545         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546         bool changed = false;
547
548         /* If Connectionless Slave Broadcast master role is supported
549          * enable all necessary events for it.
550          */
551         if (lmp_csb_master_capable(hdev)) {
552                 events[1] |= 0x40;      /* Triggered Clock Capture */
553                 events[1] |= 0x80;      /* Synchronization Train Complete */
554                 events[2] |= 0x10;      /* Slave Page Response Timeout */
555                 events[2] |= 0x20;      /* CSB Channel Map Change */
556                 changed = true;
557         }
558
559         /* If Connectionless Slave Broadcast slave role is supported
560          * enable all necessary events for it.
561          */
562         if (lmp_csb_slave_capable(hdev)) {
563                 events[2] |= 0x01;      /* Synchronization Train Received */
564                 events[2] |= 0x02;      /* CSB Receive */
565                 events[2] |= 0x04;      /* CSB Timeout */
566                 events[2] |= 0x08;      /* Truncated Page Complete */
567                 changed = true;
568         }
569
570         /* Enable Authenticated Payload Timeout Expired event if supported */
571         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
572                 events[2] |= 0x80;
573                 changed = true;
574         }
575
576         /* Some Broadcom based controllers indicate support for Set Event
577          * Mask Page 2 command, but then actually do not support it. Since
578          * the default value is all bits set to zero, the command is only
579          * required if the event mask has to be changed. In case no change
580          * to the event mask is needed, skip this command.
581          */
582         if (changed)
583                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584                             sizeof(events), events);
585 }
586
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
588 {
589         struct hci_dev *hdev = req->hdev;
590         u8 p;
591
592         hci_setup_event_mask(req);
593
594         if (hdev->commands[6] & 0x20 &&
595             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596                 struct hci_cp_read_stored_link_key cp;
597
598                 bacpy(&cp.bdaddr, BDADDR_ANY);
599                 cp.read_all = 0x01;
600                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
601         }
602
603         if (hdev->commands[5] & 0x10)
604                 hci_setup_link_policy(req);
605
606         if (hdev->commands[8] & 0x01)
607                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
608
609         if (hdev->commands[18] & 0x04 &&
610             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
612
613         /* Some older Broadcom based Bluetooth 1.2 controllers do not
614          * support the Read Page Scan Type command. Check support for
615          * this command in the bit mask of supported commands.
616          */
617         if (hdev->commands[13] & 0x01)
618                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
619
620         if (lmp_le_capable(hdev)) {
621                 u8 events[8];
622
623                 memset(events, 0, sizeof(events));
624
625                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626                         events[0] |= 0x10;      /* LE Long Term Key Request */
627
628                 /* If controller supports the Connection Parameters Request
629                  * Link Layer Procedure, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632                         events[0] |= 0x20;      /* LE Remote Connection
633                                                  * Parameter Request
634                                                  */
635
636                 /* If the controller supports the Data Length Extension
637                  * feature, enable the corresponding event.
638                  */
639                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640                         events[0] |= 0x40;      /* LE Data Length Change */
641
642                 /* If the controller supports LL Privacy feature, enable
643                  * the corresponding event.
644                  */
645                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646                         events[1] |= 0x02;      /* LE Enhanced Connection
647                                                  * Complete
648                                                  */
649
650                 /* If the controller supports Extended Scanner Filter
651                  * Policies, enable the correspondig event.
652                  */
653                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654                         events[1] |= 0x04;      /* LE Direct Advertising
655                                                  * Report
656                                                  */
657
658                 /* If the controller supports Channel Selection Algorithm #2
659                  * feature, enable the corresponding event.
660                  */
661                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662                         events[2] |= 0x08;      /* LE Channel Selection
663                                                  * Algorithm
664                                                  */
665
666                 /* If the controller supports the LE Set Scan Enable command,
667                  * enable the corresponding advertising report event.
668                  */
669                 if (hdev->commands[26] & 0x08)
670                         events[0] |= 0x02;      /* LE Advertising Report */
671
672                 /* If the controller supports the LE Create Connection
673                  * command, enable the corresponding event.
674                  */
675                 if (hdev->commands[26] & 0x10)
676                         events[0] |= 0x01;      /* LE Connection Complete */
677
678                 /* If the controller supports the LE Connection Update
679                  * command, enable the corresponding event.
680                  */
681                 if (hdev->commands[27] & 0x04)
682                         events[0] |= 0x04;      /* LE Connection Update
683                                                  * Complete
684                                                  */
685
686                 /* If the controller supports the LE Read Remote Used Features
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[27] & 0x20)
690                         events[0] |= 0x08;      /* LE Read Remote Used
691                                                  * Features Complete
692                                                  */
693
694                 /* If the controller supports the LE Read Local P-256
695                  * Public Key command, enable the corresponding event.
696                  */
697                 if (hdev->commands[34] & 0x02)
698                         events[0] |= 0x80;      /* LE Read Local P-256
699                                                  * Public Key Complete
700                                                  */
701
702                 /* If the controller supports the LE Generate DHKey
703                  * command, enable the corresponding event.
704                  */
705                 if (hdev->commands[34] & 0x04)
706                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
707
708                 /* If the controller supports the LE Set Default PHY or
709                  * LE Set PHY commands, enable the corresponding event.
710                  */
711                 if (hdev->commands[35] & (0x20 | 0x40))
712                         events[1] |= 0x08;        /* LE PHY Update Complete */
713
714                 /* If the controller supports LE Set Extended Scan Parameters
715                  * and LE Set Extended Scan Enable commands, enable the
716                  * corresponding event.
717                  */
718                 if (use_ext_scan(hdev))
719                         events[1] |= 0x10;      /* LE Extended Advertising
720                                                  * Report
721                                                  */
722
723                 /* If the controller supports the LE Extended Advertising
724                  * command, enable the corresponding event.
725                  */
726                 if (ext_adv_capable(hdev))
727                         events[2] |= 0x02;      /* LE Advertising Set
728                                                  * Terminated
729                                                  */
730
731                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
732                             events);
733
734                 /* Read LE Advertising Channel TX Power */
735                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736                         /* HCI TS spec forbids mixing of legacy and extended
737                          * advertising commands wherein READ_ADV_TX_POWER is
738                          * also included. So do not call it if extended adv
739                          * is supported otherwise controller will return
740                          * COMMAND_DISALLOWED for extended commands.
741                          */
742                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
743                 }
744
745                 if (hdev->commands[38] & 0x80) {
746                         /* Read LE Min/Max Tx Power*/
747                         hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
748                                     0, NULL);
749                 }
750
751                 if (hdev->commands[26] & 0x40) {
752                         /* Read LE White List Size */
753                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
754                                     0, NULL);
755                 }
756
757                 if (hdev->commands[26] & 0x80) {
758                         /* Clear LE White List */
759                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
760                 }
761
762                 if (hdev->commands[34] & 0x40) {
763                         /* Read LE Resolving List Size */
764                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
765                                     0, NULL);
766                 }
767
768                 if (hdev->commands[34] & 0x20) {
769                         /* Clear LE Resolving List */
770                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
771                 }
772
773                 if (hdev->commands[35] & 0x04) {
774                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
775
776                         /* Set RPA timeout */
777                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
778                                     &rpa_timeout);
779                 }
780
781                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
782                         /* Read LE Maximum Data Length */
783                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
784
785                         /* Read LE Suggested Default Data Length */
786                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
787                 }
788
789                 if (ext_adv_capable(hdev)) {
790                         /* Read LE Number of Supported Advertising Sets */
791                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
792                                     0, NULL);
793                 }
794
795                 hci_set_le_support(req);
796         }
797
798         /* Read features beyond page 1 if available */
799         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
800                 struct hci_cp_read_local_ext_features cp;
801
802                 cp.page = p;
803                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
804                             sizeof(cp), &cp);
805         }
806
807         return 0;
808 }
809
810 static int hci_init4_req(struct hci_request *req, unsigned long opt)
811 {
812         struct hci_dev *hdev = req->hdev;
813
814         /* Some Broadcom based Bluetooth controllers do not support the
815          * Delete Stored Link Key command. They are clearly indicating its
816          * absence in the bit mask of supported commands.
817          *
818          * Check the supported commands and only if the command is marked
819          * as supported send it. If not supported assume that the controller
820          * does not have actual support for stored link keys which makes this
821          * command redundant anyway.
822          *
823          * Some controllers indicate that they support handling deleting
824          * stored link keys, but they don't. The quirk lets a driver
825          * just disable this command.
826          */
827         if (hdev->commands[6] & 0x80 &&
828             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
829                 struct hci_cp_delete_stored_link_key cp;
830
831                 bacpy(&cp.bdaddr, BDADDR_ANY);
832                 cp.delete_all = 0x01;
833                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
834                             sizeof(cp), &cp);
835         }
836
837         /* Set event mask page 2 if the HCI command for it is supported */
838         if (hdev->commands[22] & 0x04)
839                 hci_set_event_mask_page_2(req);
840
841         /* Read local codec list if the HCI command is supported */
842         if (hdev->commands[29] & 0x20)
843                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
844
845         /* Read local pairing options if the HCI command is supported */
846         if (hdev->commands[41] & 0x08)
847                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
848
849         /* Get MWS transport configuration if the HCI command is supported */
850         if (hdev->commands[30] & 0x08)
851                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
852
853         /* Check for Synchronization Train support */
854         if (lmp_sync_train_capable(hdev))
855                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
856
857         /* Enable Secure Connections if supported and configured */
858         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
859             bredr_sc_enabled(hdev)) {
860                 u8 support = 0x01;
861
862                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
863                             sizeof(support), &support);
864         }
865
866         /* Set erroneous data reporting if supported to the wideband speech
867          * setting value
868          */
869         if (hdev->commands[18] & 0x08 &&
870             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
871                 bool enabled = hci_dev_test_flag(hdev,
872                                                  HCI_WIDEBAND_SPEECH_ENABLED);
873
874                 if (enabled !=
875                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
876                         struct hci_cp_write_def_err_data_reporting cp;
877
878                         cp.err_data_reporting = enabled ?
879                                                 ERR_DATA_REPORTING_ENABLED :
880                                                 ERR_DATA_REPORTING_DISABLED;
881
882                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
883                                     sizeof(cp), &cp);
884                 }
885         }
886
887         /* Set Suggested Default Data Length to maximum if supported */
888         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
889                 struct hci_cp_le_write_def_data_len cp;
890
891                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
892                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
893                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
894         }
895
896         /* Set Default PHY parameters if command is supported */
897         if (hdev->commands[35] & 0x20) {
898                 struct hci_cp_le_set_default_phy cp;
899
900                 cp.all_phys = 0x00;
901                 cp.tx_phys = hdev->le_tx_def_phys;
902                 cp.rx_phys = hdev->le_rx_def_phys;
903
904                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
905         }
906
907         return 0;
908 }
909
910 static int __hci_init(struct hci_dev *hdev)
911 {
912         int err;
913
914         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917
918         if (hci_dev_test_flag(hdev, HCI_SETUP))
919                 hci_debugfs_create_basic(hdev);
920
921         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
922         if (err < 0)
923                 return err;
924
925         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
926          * BR/EDR/LE type controllers. AMP controllers only need the
927          * first two stages of init.
928          */
929         if (hdev->dev_type != HCI_PRIMARY)
930                 return 0;
931
932         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
933         if (err < 0)
934                 return err;
935
936         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
937         if (err < 0)
938                 return err;
939
940         /* This function is only called when the controller is actually in
941          * configured state. When the controller is marked as unconfigured,
942          * this initialization procedure is not run.
943          *
944          * It means that it is possible that a controller runs through its
945          * setup phase and then discovers missing settings. If that is the
946          * case, then this function will not be called. It then will only
947          * be called during the config phase.
948          *
949          * So only when in setup phase or config phase, create the debugfs
950          * entries and register the SMP channels.
951          */
952         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
953             !hci_dev_test_flag(hdev, HCI_CONFIG))
954                 return 0;
955
956         hci_debugfs_create_common(hdev);
957
958         if (lmp_bredr_capable(hdev))
959                 hci_debugfs_create_bredr(hdev);
960
961         if (lmp_le_capable(hdev))
962                 hci_debugfs_create_le(hdev);
963
964         return 0;
965 }
966
967 static int hci_init0_req(struct hci_request *req, unsigned long opt)
968 {
969         struct hci_dev *hdev = req->hdev;
970
971         BT_DBG("%s %ld", hdev->name, opt);
972
973         /* Reset */
974         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
975                 hci_reset_req(req, 0);
976
977         /* Read Local Version */
978         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
979
980         /* Read BD Address */
981         if (hdev->set_bdaddr)
982                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
983
984         return 0;
985 }
986
987 static int __hci_unconf_init(struct hci_dev *hdev)
988 {
989         int err;
990
991         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
992                 return 0;
993
994         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
995         if (err < 0)
996                 return err;
997
998         if (hci_dev_test_flag(hdev, HCI_SETUP))
999                 hci_debugfs_create_basic(hdev);
1000
1001         return 0;
1002 }
1003
1004 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1005 {
1006         __u8 scan = opt;
1007
1008         BT_DBG("%s %x", req->hdev->name, scan);
1009
1010         /* Inquiry and Page scans */
1011         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1012         return 0;
1013 }
1014
1015 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1016 {
1017         __u8 auth = opt;
1018
1019         BT_DBG("%s %x", req->hdev->name, auth);
1020
1021         /* Authentication */
1022         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1023         return 0;
1024 }
1025
1026 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1027 {
1028         __u8 encrypt = opt;
1029
1030         BT_DBG("%s %x", req->hdev->name, encrypt);
1031
1032         /* Encryption */
1033         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1034         return 0;
1035 }
1036
1037 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1038 {
1039         __le16 policy = cpu_to_le16(opt);
1040
1041         BT_DBG("%s %x", req->hdev->name, policy);
1042
1043         /* Default link policy */
1044         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1045         return 0;
1046 }
1047
1048 /* Get HCI device by index.
1049  * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1051 {
1052         struct hci_dev *hdev = NULL, *d;
1053
1054         BT_DBG("%d", index);
1055
1056         if (index < 0)
1057                 return NULL;
1058
1059         read_lock(&hci_dev_list_lock);
1060         list_for_each_entry(d, &hci_dev_list, list) {
1061                 if (d->id == index) {
1062                         hdev = hci_dev_hold(d);
1063                         break;
1064                 }
1065         }
1066         read_unlock(&hci_dev_list_lock);
1067         return hdev;
1068 }
1069
1070 /* ---- Inquiry support ---- */
1071
1072 bool hci_discovery_active(struct hci_dev *hdev)
1073 {
1074         struct discovery_state *discov = &hdev->discovery;
1075
1076         switch (discov->state) {
1077         case DISCOVERY_FINDING:
1078         case DISCOVERY_RESOLVING:
1079                 return true;
1080
1081         default:
1082                 return false;
1083         }
1084 }
1085
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 {
1088         int old_state = hdev->discovery.state;
1089
1090         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
1092         if (old_state == state)
1093                 return;
1094
1095         hdev->discovery.state = state;
1096
1097         switch (state) {
1098         case DISCOVERY_STOPPED:
1099                 hci_update_background_scan(hdev);
1100
1101                 if (old_state != DISCOVERY_STARTING)
1102                         mgmt_discovering(hdev, 0);
1103                 break;
1104         case DISCOVERY_STARTING:
1105                 break;
1106         case DISCOVERY_FINDING:
1107                 mgmt_discovering(hdev, 1);
1108                 break;
1109         case DISCOVERY_RESOLVING:
1110                 break;
1111         case DISCOVERY_STOPPING:
1112                 break;
1113         }
1114 }
1115
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *p, *n;
1120
1121         list_for_each_entry_safe(p, n, &cache->all, all) {
1122                 list_del(&p->all);
1123                 kfree(p);
1124         }
1125
1126         INIT_LIST_HEAD(&cache->unknown);
1127         INIT_LIST_HEAD(&cache->resolve);
1128 }
1129
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131                                                bdaddr_t *bdaddr)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *e;
1135
1136         BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138         list_for_each_entry(e, &cache->all, all) {
1139                 if (!bacmp(&e->data.bdaddr, bdaddr))
1140                         return e;
1141         }
1142
1143         return NULL;
1144 }
1145
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147                                                        bdaddr_t *bdaddr)
1148 {
1149         struct discovery_state *cache = &hdev->discovery;
1150         struct inquiry_entry *e;
1151
1152         BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154         list_for_each_entry(e, &cache->unknown, list) {
1155                 if (!bacmp(&e->data.bdaddr, bdaddr))
1156                         return e;
1157         }
1158
1159         return NULL;
1160 }
1161
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163                                                        bdaddr_t *bdaddr,
1164                                                        int state)
1165 {
1166         struct discovery_state *cache = &hdev->discovery;
1167         struct inquiry_entry *e;
1168
1169         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170
1171         list_for_each_entry(e, &cache->resolve, list) {
1172                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173                         return e;
1174                 if (!bacmp(&e->data.bdaddr, bdaddr))
1175                         return e;
1176         }
1177
1178         return NULL;
1179 }
1180
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182                                       struct inquiry_entry *ie)
1183 {
1184         struct discovery_state *cache = &hdev->discovery;
1185         struct list_head *pos = &cache->resolve;
1186         struct inquiry_entry *p;
1187
1188         list_del(&ie->list);
1189
1190         list_for_each_entry(p, &cache->resolve, list) {
1191                 if (p->name_state != NAME_PENDING &&
1192                     abs(p->data.rssi) >= abs(ie->data.rssi))
1193                         break;
1194                 pos = &p->list;
1195         }
1196
1197         list_add(&ie->list, pos);
1198 }
1199
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201                              bool name_known)
1202 {
1203         struct discovery_state *cache = &hdev->discovery;
1204         struct inquiry_entry *ie;
1205         u32 flags = 0;
1206
1207         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208
1209         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210
1211         if (!data->ssp_mode)
1212                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213
1214         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215         if (ie) {
1216                 if (!ie->data.ssp_mode)
1217                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219                 if (ie->name_state == NAME_NEEDED &&
1220                     data->rssi != ie->data.rssi) {
1221                         ie->data.rssi = data->rssi;
1222                         hci_inquiry_cache_update_resolve(hdev, ie);
1223                 }
1224
1225                 goto update;
1226         }
1227
1228         /* Entry not in the cache. Add new one. */
1229         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230         if (!ie) {
1231                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232                 goto done;
1233         }
1234
1235         list_add(&ie->all, &cache->all);
1236
1237         if (name_known) {
1238                 ie->name_state = NAME_KNOWN;
1239         } else {
1240                 ie->name_state = NAME_NOT_KNOWN;
1241                 list_add(&ie->list, &cache->unknown);
1242         }
1243
1244 update:
1245         if (name_known && ie->name_state != NAME_KNOWN &&
1246             ie->name_state != NAME_PENDING) {
1247                 ie->name_state = NAME_KNOWN;
1248                 list_del(&ie->list);
1249         }
1250
1251         memcpy(&ie->data, data, sizeof(*data));
1252         ie->timestamp = jiffies;
1253         cache->timestamp = jiffies;
1254
1255         if (ie->name_state == NAME_NOT_KNOWN)
1256                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257
1258 done:
1259         return flags;
1260 }
1261
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 {
1264         struct discovery_state *cache = &hdev->discovery;
1265         struct inquiry_info *info = (struct inquiry_info *) buf;
1266         struct inquiry_entry *e;
1267         int copied = 0;
1268
1269         list_for_each_entry(e, &cache->all, all) {
1270                 struct inquiry_data *data = &e->data;
1271
1272                 if (copied >= num)
1273                         break;
1274
1275                 bacpy(&info->bdaddr, &data->bdaddr);
1276                 info->pscan_rep_mode    = data->pscan_rep_mode;
1277                 info->pscan_period_mode = data->pscan_period_mode;
1278                 info->pscan_mode        = data->pscan_mode;
1279                 memcpy(info->dev_class, data->dev_class, 3);
1280                 info->clock_offset      = data->clock_offset;
1281
1282                 info++;
1283                 copied++;
1284         }
1285
1286         BT_DBG("cache %p, copied %d", cache, copied);
1287         return copied;
1288 }
1289
1290 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1291 {
1292         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293         struct hci_dev *hdev = req->hdev;
1294         struct hci_cp_inquiry cp;
1295
1296         BT_DBG("%s", hdev->name);
1297
1298         if (test_bit(HCI_INQUIRY, &hdev->flags))
1299                 return 0;
1300
1301         /* Start Inquiry */
1302         memcpy(&cp.lap, &ir->lap, 3);
1303         cp.length  = ir->length;
1304         cp.num_rsp = ir->num_rsp;
1305         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306
1307         return 0;
1308 }
1309
1310 int hci_inquiry(void __user *arg)
1311 {
1312         __u8 __user *ptr = arg;
1313         struct hci_inquiry_req ir;
1314         struct hci_dev *hdev;
1315         int err = 0, do_inquiry = 0, max_rsp;
1316         long timeo;
1317         __u8 *buf;
1318
1319         if (copy_from_user(&ir, ptr, sizeof(ir)))
1320                 return -EFAULT;
1321
1322         hdev = hci_dev_get(ir.dev_id);
1323         if (!hdev)
1324                 return -ENODEV;
1325
1326         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1327                 err = -EBUSY;
1328                 goto done;
1329         }
1330
1331         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1332                 err = -EOPNOTSUPP;
1333                 goto done;
1334         }
1335
1336         if (hdev->dev_type != HCI_PRIMARY) {
1337                 err = -EOPNOTSUPP;
1338                 goto done;
1339         }
1340
1341         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1342                 err = -EOPNOTSUPP;
1343                 goto done;
1344         }
1345
1346         hci_dev_lock(hdev);
1347         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1348             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1349                 hci_inquiry_cache_flush(hdev);
1350                 do_inquiry = 1;
1351         }
1352         hci_dev_unlock(hdev);
1353
1354         timeo = ir.length * msecs_to_jiffies(2000);
1355
1356         if (do_inquiry) {
1357                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1358                                    timeo, NULL);
1359                 if (err < 0)
1360                         goto done;
1361
1362                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1363                  * cleared). If it is interrupted by a signal, return -EINTR.
1364                  */
1365                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1366                                 TASK_INTERRUPTIBLE)) {
1367                         err = -EINTR;
1368                         goto done;
1369                 }
1370         }
1371
1372         /* for unlimited number of responses we will use buffer with
1373          * 255 entries
1374          */
1375         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1376
1377         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1378          * copy it to the user space.
1379          */
1380         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1381         if (!buf) {
1382                 err = -ENOMEM;
1383                 goto done;
1384         }
1385
1386         hci_dev_lock(hdev);
1387         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1388         hci_dev_unlock(hdev);
1389
1390         BT_DBG("num_rsp %d", ir.num_rsp);
1391
1392         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1393                 ptr += sizeof(ir);
1394                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1395                                  ir.num_rsp))
1396                         err = -EFAULT;
1397         } else
1398                 err = -EFAULT;
1399
1400         kfree(buf);
1401
1402 done:
1403         hci_dev_put(hdev);
1404         return err;
1405 }
1406
1407 /**
1408  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1409  *                                     (BD_ADDR) for a HCI device from
1410  *                                     a firmware node property.
1411  * @hdev:       The HCI device
1412  *
1413  * Search the firmware node for 'local-bd-address'.
1414  *
1415  * All-zero BD addresses are rejected, because those could be properties
1416  * that exist in the firmware tables, but were not updated by the firmware. For
1417  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1418  */
1419 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1420 {
1421         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1422         bdaddr_t ba;
1423         int ret;
1424
1425         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1426                                             (u8 *)&ba, sizeof(ba));
1427         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1428                 return;
1429
1430         bacpy(&hdev->public_addr, &ba);
1431 }
1432
1433 static int hci_dev_do_open(struct hci_dev *hdev)
1434 {
1435         int ret = 0;
1436
1437         BT_DBG("%s %p", hdev->name, hdev);
1438
1439         hci_req_sync_lock(hdev);
1440
1441         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1442                 ret = -ENODEV;
1443                 goto done;
1444         }
1445
1446         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1448                 /* Check for rfkill but allow the HCI setup stage to
1449                  * proceed (which in itself doesn't cause any RF activity).
1450                  */
1451                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1452                         ret = -ERFKILL;
1453                         goto done;
1454                 }
1455
1456                 /* Check for valid public address or a configured static
1457                  * random adddress, but let the HCI setup proceed to
1458                  * be able to determine if there is a public address
1459                  * or not.
1460                  *
1461                  * In case of user channel usage, it is not important
1462                  * if a public address or static random address is
1463                  * available.
1464                  *
1465                  * This check is only valid for BR/EDR controllers
1466                  * since AMP controllers do not have an address.
1467                  */
1468                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1469                     hdev->dev_type == HCI_PRIMARY &&
1470                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1471                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1472                         ret = -EADDRNOTAVAIL;
1473                         goto done;
1474                 }
1475         }
1476
1477         if (test_bit(HCI_UP, &hdev->flags)) {
1478                 ret = -EALREADY;
1479                 goto done;
1480         }
1481
1482         if (hdev->open(hdev)) {
1483                 ret = -EIO;
1484                 goto done;
1485         }
1486
1487         set_bit(HCI_RUNNING, &hdev->flags);
1488         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1489
1490         atomic_set(&hdev->cmd_cnt, 1);
1491         set_bit(HCI_INIT, &hdev->flags);
1492
1493         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1494             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1495                 bool invalid_bdaddr;
1496
1497                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1498
1499                 if (hdev->setup)
1500                         ret = hdev->setup(hdev);
1501
1502                 /* The transport driver can set the quirk to mark the
1503                  * BD_ADDR invalid before creating the HCI device or in
1504                  * its setup callback.
1505                  */
1506                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1507                                           &hdev->quirks);
1508
1509                 if (ret)
1510                         goto setup_failed;
1511
1512                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1513                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1514                                 hci_dev_get_bd_addr_from_property(hdev);
1515
1516                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1517                             hdev->set_bdaddr) {
1518                                 ret = hdev->set_bdaddr(hdev,
1519                                                        &hdev->public_addr);
1520
1521                                 /* If setting of the BD_ADDR from the device
1522                                  * property succeeds, then treat the address
1523                                  * as valid even if the invalid BD_ADDR
1524                                  * quirk indicates otherwise.
1525                                  */
1526                                 if (!ret)
1527                                         invalid_bdaddr = false;
1528                         }
1529                 }
1530
1531 setup_failed:
1532                 /* The transport driver can set these quirks before
1533                  * creating the HCI device or in its setup callback.
1534                  *
1535                  * For the invalid BD_ADDR quirk it is possible that
1536                  * it becomes a valid address if the bootloader does
1537                  * provide it (see above).
1538                  *
1539                  * In case any of them is set, the controller has to
1540                  * start up as unconfigured.
1541                  */
1542                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1543                     invalid_bdaddr)
1544                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1545
1546                 /* For an unconfigured controller it is required to
1547                  * read at least the version information provided by
1548                  * the Read Local Version Information command.
1549                  *
1550                  * If the set_bdaddr driver callback is provided, then
1551                  * also the original Bluetooth public device address
1552                  * will be read using the Read BD Address command.
1553                  */
1554                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1555                         ret = __hci_unconf_init(hdev);
1556         }
1557
1558         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1559                 /* If public address change is configured, ensure that
1560                  * the address gets programmed. If the driver does not
1561                  * support changing the public address, fail the power
1562                  * on procedure.
1563                  */
1564                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1565                     hdev->set_bdaddr)
1566                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1567                 else
1568                         ret = -EADDRNOTAVAIL;
1569         }
1570
1571         if (!ret) {
1572                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1573                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1574                         ret = __hci_init(hdev);
1575                         if (!ret && hdev->post_init)
1576                                 ret = hdev->post_init(hdev);
1577                 }
1578         }
1579
1580         /* If the HCI Reset command is clearing all diagnostic settings,
1581          * then they need to be reprogrammed after the init procedure
1582          * completed.
1583          */
1584         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1585             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1586             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1587                 ret = hdev->set_diag(hdev, true);
1588
1589         msft_do_open(hdev);
1590         aosp_do_open(hdev);
1591
1592         clear_bit(HCI_INIT, &hdev->flags);
1593
1594         if (!ret) {
1595                 hci_dev_hold(hdev);
1596                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1597                 hci_adv_instances_set_rpa_expired(hdev, true);
1598                 set_bit(HCI_UP, &hdev->flags);
1599                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1600                 hci_leds_update_powered(hdev, true);
1601                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1602                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1603                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1604                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1605                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1606                     hdev->dev_type == HCI_PRIMARY) {
1607                         ret = __hci_req_hci_power_on(hdev);
1608                         mgmt_power_on(hdev, ret);
1609                 }
1610         } else {
1611                 /* Init failed, cleanup */
1612                 flush_work(&hdev->tx_work);
1613                 flush_work(&hdev->cmd_work);
1614                 flush_work(&hdev->rx_work);
1615
1616                 skb_queue_purge(&hdev->cmd_q);
1617                 skb_queue_purge(&hdev->rx_q);
1618
1619                 if (hdev->flush)
1620                         hdev->flush(hdev);
1621
1622                 if (hdev->sent_cmd) {
1623                         kfree_skb(hdev->sent_cmd);
1624                         hdev->sent_cmd = NULL;
1625                 }
1626
1627                 clear_bit(HCI_RUNNING, &hdev->flags);
1628                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1629
1630                 hdev->close(hdev);
1631                 hdev->flags &= BIT(HCI_RAW);
1632         }
1633
1634 done:
1635         hci_req_sync_unlock(hdev);
1636         return ret;
1637 }
1638
1639 /* ---- HCI ioctl helpers ---- */
1640
1641 int hci_dev_open(__u16 dev)
1642 {
1643         struct hci_dev *hdev;
1644         int err;
1645
1646         hdev = hci_dev_get(dev);
1647         if (!hdev)
1648                 return -ENODEV;
1649
1650         /* Devices that are marked as unconfigured can only be powered
1651          * up as user channel. Trying to bring them up as normal devices
1652          * will result into a failure. Only user channel operation is
1653          * possible.
1654          *
1655          * When this function is called for a user channel, the flag
1656          * HCI_USER_CHANNEL will be set first before attempting to
1657          * open the device.
1658          */
1659         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1660             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1661                 err = -EOPNOTSUPP;
1662                 goto done;
1663         }
1664
1665         /* We need to ensure that no other power on/off work is pending
1666          * before proceeding to call hci_dev_do_open. This is
1667          * particularly important if the setup procedure has not yet
1668          * completed.
1669          */
1670         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1671                 cancel_delayed_work(&hdev->power_off);
1672
1673         /* After this call it is guaranteed that the setup procedure
1674          * has finished. This means that error conditions like RFKILL
1675          * or no valid public or static random address apply.
1676          */
1677         flush_workqueue(hdev->req_workqueue);
1678
1679         /* For controllers not using the management interface and that
1680          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1681          * so that pairing works for them. Once the management interface
1682          * is in use this bit will be cleared again and userspace has
1683          * to explicitly enable it.
1684          */
1685         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1686             !hci_dev_test_flag(hdev, HCI_MGMT))
1687                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1688
1689         err = hci_dev_do_open(hdev);
1690
1691 done:
1692         hci_dev_put(hdev);
1693         return err;
1694 }
1695
1696 /* This function requires the caller holds hdev->lock */
1697 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1698 {
1699         struct hci_conn_params *p;
1700
1701         list_for_each_entry(p, &hdev->le_conn_params, list) {
1702                 if (p->conn) {
1703                         hci_conn_drop(p->conn);
1704                         hci_conn_put(p->conn);
1705                         p->conn = NULL;
1706                 }
1707                 list_del_init(&p->action);
1708         }
1709
1710         BT_DBG("All LE pending actions cleared");
1711 }
1712
1713 int hci_dev_do_close(struct hci_dev *hdev)
1714 {
1715         bool auto_off;
1716
1717         BT_DBG("%s %p", hdev->name, hdev);
1718
1719         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1720             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1721             test_bit(HCI_UP, &hdev->flags)) {
1722                 /* Execute vendor specific shutdown routine */
1723                 if (hdev->shutdown)
1724                         hdev->shutdown(hdev);
1725         }
1726
1727         cancel_delayed_work(&hdev->power_off);
1728
1729         hci_request_cancel_all(hdev);
1730         hci_req_sync_lock(hdev);
1731
1732         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1733                 cancel_delayed_work_sync(&hdev->cmd_timer);
1734                 hci_req_sync_unlock(hdev);
1735                 return 0;
1736         }
1737
1738         hci_leds_update_powered(hdev, false);
1739
1740         /* Flush RX and TX works */
1741         flush_work(&hdev->tx_work);
1742         flush_work(&hdev->rx_work);
1743
1744         if (hdev->discov_timeout > 0) {
1745                 hdev->discov_timeout = 0;
1746                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1747                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1748         }
1749
1750         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1751                 cancel_delayed_work(&hdev->service_cache);
1752
1753         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1754                 struct adv_info *adv_instance;
1755
1756                 cancel_delayed_work_sync(&hdev->rpa_expired);
1757
1758                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1759                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1760         }
1761
1762         /* Avoid potential lockdep warnings from the *_flush() calls by
1763          * ensuring the workqueue is empty up front.
1764          */
1765         drain_workqueue(hdev->workqueue);
1766
1767         hci_dev_lock(hdev);
1768
1769         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1770
1771         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1772
1773         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1774             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1775             hci_dev_test_flag(hdev, HCI_MGMT))
1776                 __mgmt_power_off(hdev);
1777
1778         hci_inquiry_cache_flush(hdev);
1779         hci_pend_le_actions_clear(hdev);
1780         hci_conn_hash_flush(hdev);
1781         hci_dev_unlock(hdev);
1782
1783         smp_unregister(hdev);
1784
1785         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1786
1787         aosp_do_close(hdev);
1788         msft_do_close(hdev);
1789
1790         if (hdev->flush)
1791                 hdev->flush(hdev);
1792
1793         /* Reset device */
1794         skb_queue_purge(&hdev->cmd_q);
1795         atomic_set(&hdev->cmd_cnt, 1);
1796         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1797             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1798                 set_bit(HCI_INIT, &hdev->flags);
1799                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1800                 clear_bit(HCI_INIT, &hdev->flags);
1801         }
1802
1803         /* flush cmd  work */
1804         flush_work(&hdev->cmd_work);
1805
1806         /* Drop queues */
1807         skb_queue_purge(&hdev->rx_q);
1808         skb_queue_purge(&hdev->cmd_q);
1809         skb_queue_purge(&hdev->raw_q);
1810
1811         /* Drop last sent command */
1812         if (hdev->sent_cmd) {
1813                 cancel_delayed_work_sync(&hdev->cmd_timer);
1814                 kfree_skb(hdev->sent_cmd);
1815                 hdev->sent_cmd = NULL;
1816         }
1817
1818         clear_bit(HCI_RUNNING, &hdev->flags);
1819         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1820
1821         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1822                 wake_up(&hdev->suspend_wait_q);
1823
1824         /* After this point our queues are empty
1825          * and no tasks are scheduled. */
1826         hdev->close(hdev);
1827
1828         /* Clear flags */
1829         hdev->flags &= BIT(HCI_RAW);
1830         hci_dev_clear_volatile_flags(hdev);
1831
1832         /* Controller radio is available but is currently powered down */
1833         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1834
1835         memset(hdev->eir, 0, sizeof(hdev->eir));
1836         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1837         bacpy(&hdev->random_addr, BDADDR_ANY);
1838
1839         hci_req_sync_unlock(hdev);
1840
1841         hci_dev_put(hdev);
1842         return 0;
1843 }
1844
1845 int hci_dev_close(__u16 dev)
1846 {
1847         struct hci_dev *hdev;
1848         int err;
1849
1850         hdev = hci_dev_get(dev);
1851         if (!hdev)
1852                 return -ENODEV;
1853
1854         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1855                 err = -EBUSY;
1856                 goto done;
1857         }
1858
1859         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1860                 cancel_delayed_work(&hdev->power_off);
1861
1862         err = hci_dev_do_close(hdev);
1863
1864 done:
1865         hci_dev_put(hdev);
1866         return err;
1867 }
1868
1869 static int hci_dev_do_reset(struct hci_dev *hdev)
1870 {
1871         int ret;
1872
1873         BT_DBG("%s %p", hdev->name, hdev);
1874
1875         hci_req_sync_lock(hdev);
1876
1877         /* Drop queues */
1878         skb_queue_purge(&hdev->rx_q);
1879         skb_queue_purge(&hdev->cmd_q);
1880
1881         /* Avoid potential lockdep warnings from the *_flush() calls by
1882          * ensuring the workqueue is empty up front.
1883          */
1884         drain_workqueue(hdev->workqueue);
1885
1886         hci_dev_lock(hdev);
1887         hci_inquiry_cache_flush(hdev);
1888         hci_conn_hash_flush(hdev);
1889         hci_dev_unlock(hdev);
1890
1891         if (hdev->flush)
1892                 hdev->flush(hdev);
1893
1894         atomic_set(&hdev->cmd_cnt, 1);
1895         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1896
1897         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1898
1899         hci_req_sync_unlock(hdev);
1900         return ret;
1901 }
1902
1903 int hci_dev_reset(__u16 dev)
1904 {
1905         struct hci_dev *hdev;
1906         int err;
1907
1908         hdev = hci_dev_get(dev);
1909         if (!hdev)
1910                 return -ENODEV;
1911
1912         if (!test_bit(HCI_UP, &hdev->flags)) {
1913                 err = -ENETDOWN;
1914                 goto done;
1915         }
1916
1917         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1918                 err = -EBUSY;
1919                 goto done;
1920         }
1921
1922         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1923                 err = -EOPNOTSUPP;
1924                 goto done;
1925         }
1926
1927         err = hci_dev_do_reset(hdev);
1928
1929 done:
1930         hci_dev_put(hdev);
1931         return err;
1932 }
1933
1934 int hci_dev_reset_stat(__u16 dev)
1935 {
1936         struct hci_dev *hdev;
1937         int ret = 0;
1938
1939         hdev = hci_dev_get(dev);
1940         if (!hdev)
1941                 return -ENODEV;
1942
1943         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1944                 ret = -EBUSY;
1945                 goto done;
1946         }
1947
1948         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1949                 ret = -EOPNOTSUPP;
1950                 goto done;
1951         }
1952
1953         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1954
1955 done:
1956         hci_dev_put(hdev);
1957         return ret;
1958 }
1959
1960 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1961 {
1962         bool conn_changed, discov_changed;
1963
1964         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1965
1966         if ((scan & SCAN_PAGE))
1967                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1968                                                           HCI_CONNECTABLE);
1969         else
1970                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1971                                                            HCI_CONNECTABLE);
1972
1973         if ((scan & SCAN_INQUIRY)) {
1974                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1975                                                             HCI_DISCOVERABLE);
1976         } else {
1977                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1978                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1979                                                              HCI_DISCOVERABLE);
1980         }
1981
1982         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1983                 return;
1984
1985         if (conn_changed || discov_changed) {
1986                 /* In case this was disabled through mgmt */
1987                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1988
1989                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1990                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1991
1992                 mgmt_new_settings(hdev);
1993         }
1994 }
1995
1996 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1997 {
1998         struct hci_dev *hdev;
1999         struct hci_dev_req dr;
2000         int err = 0;
2001
2002         if (copy_from_user(&dr, arg, sizeof(dr)))
2003                 return -EFAULT;
2004
2005         hdev = hci_dev_get(dr.dev_id);
2006         if (!hdev)
2007                 return -ENODEV;
2008
2009         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2010                 err = -EBUSY;
2011                 goto done;
2012         }
2013
2014         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2015                 err = -EOPNOTSUPP;
2016                 goto done;
2017         }
2018
2019         if (hdev->dev_type != HCI_PRIMARY) {
2020                 err = -EOPNOTSUPP;
2021                 goto done;
2022         }
2023
2024         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2025                 err = -EOPNOTSUPP;
2026                 goto done;
2027         }
2028
2029         switch (cmd) {
2030         case HCISETAUTH:
2031                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2032                                    HCI_INIT_TIMEOUT, NULL);
2033                 break;
2034
2035         case HCISETENCRYPT:
2036                 if (!lmp_encrypt_capable(hdev)) {
2037                         err = -EOPNOTSUPP;
2038                         break;
2039                 }
2040
2041                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2042                         /* Auth must be enabled first */
2043                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2044                                            HCI_INIT_TIMEOUT, NULL);
2045                         if (err)
2046                                 break;
2047                 }
2048
2049                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2050                                    HCI_INIT_TIMEOUT, NULL);
2051                 break;
2052
2053         case HCISETSCAN:
2054                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2055                                    HCI_INIT_TIMEOUT, NULL);
2056
2057                 /* Ensure that the connectable and discoverable states
2058                  * get correctly modified as this was a non-mgmt change.
2059                  */
2060                 if (!err)
2061                         hci_update_scan_state(hdev, dr.dev_opt);
2062                 break;
2063
2064         case HCISETLINKPOL:
2065                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2066                                    HCI_INIT_TIMEOUT, NULL);
2067                 break;
2068
2069         case HCISETLINKMODE:
2070                 hdev->link_mode = ((__u16) dr.dev_opt) &
2071                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2072                 break;
2073
2074         case HCISETPTYPE:
2075                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2076                         break;
2077
2078                 hdev->pkt_type = (__u16) dr.dev_opt;
2079                 mgmt_phy_configuration_changed(hdev, NULL);
2080                 break;
2081
2082         case HCISETACLMTU:
2083                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2084                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2085                 break;
2086
2087         case HCISETSCOMTU:
2088                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2089                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2090                 break;
2091
2092         default:
2093                 err = -EINVAL;
2094                 break;
2095         }
2096
2097 done:
2098         hci_dev_put(hdev);
2099         return err;
2100 }
2101
2102 int hci_get_dev_list(void __user *arg)
2103 {
2104         struct hci_dev *hdev;
2105         struct hci_dev_list_req *dl;
2106         struct hci_dev_req *dr;
2107         int n = 0, size, err;
2108         __u16 dev_num;
2109
2110         if (get_user(dev_num, (__u16 __user *) arg))
2111                 return -EFAULT;
2112
2113         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2114                 return -EINVAL;
2115
2116         size = sizeof(*dl) + dev_num * sizeof(*dr);
2117
2118         dl = kzalloc(size, GFP_KERNEL);
2119         if (!dl)
2120                 return -ENOMEM;
2121
2122         dr = dl->dev_req;
2123
2124         read_lock(&hci_dev_list_lock);
2125         list_for_each_entry(hdev, &hci_dev_list, list) {
2126                 unsigned long flags = hdev->flags;
2127
2128                 /* When the auto-off is configured it means the transport
2129                  * is running, but in that case still indicate that the
2130                  * device is actually down.
2131                  */
2132                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2133                         flags &= ~BIT(HCI_UP);
2134
2135                 (dr + n)->dev_id  = hdev->id;
2136                 (dr + n)->dev_opt = flags;
2137
2138                 if (++n >= dev_num)
2139                         break;
2140         }
2141         read_unlock(&hci_dev_list_lock);
2142
2143         dl->dev_num = n;
2144         size = sizeof(*dl) + n * sizeof(*dr);
2145
2146         err = copy_to_user(arg, dl, size);
2147         kfree(dl);
2148
2149         return err ? -EFAULT : 0;
2150 }
2151
2152 int hci_get_dev_info(void __user *arg)
2153 {
2154         struct hci_dev *hdev;
2155         struct hci_dev_info di;
2156         unsigned long flags;
2157         int err = 0;
2158
2159         if (copy_from_user(&di, arg, sizeof(di)))
2160                 return -EFAULT;
2161
2162         hdev = hci_dev_get(di.dev_id);
2163         if (!hdev)
2164                 return -ENODEV;
2165
2166         /* When the auto-off is configured it means the transport
2167          * is running, but in that case still indicate that the
2168          * device is actually down.
2169          */
2170         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2171                 flags = hdev->flags & ~BIT(HCI_UP);
2172         else
2173                 flags = hdev->flags;
2174
2175         strcpy(di.name, hdev->name);
2176         di.bdaddr   = hdev->bdaddr;
2177         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2178         di.flags    = flags;
2179         di.pkt_type = hdev->pkt_type;
2180         if (lmp_bredr_capable(hdev)) {
2181                 di.acl_mtu  = hdev->acl_mtu;
2182                 di.acl_pkts = hdev->acl_pkts;
2183                 di.sco_mtu  = hdev->sco_mtu;
2184                 di.sco_pkts = hdev->sco_pkts;
2185         } else {
2186                 di.acl_mtu  = hdev->le_mtu;
2187                 di.acl_pkts = hdev->le_pkts;
2188                 di.sco_mtu  = 0;
2189                 di.sco_pkts = 0;
2190         }
2191         di.link_policy = hdev->link_policy;
2192         di.link_mode   = hdev->link_mode;
2193
2194         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2195         memcpy(&di.features, &hdev->features, sizeof(di.features));
2196
2197         if (copy_to_user(arg, &di, sizeof(di)))
2198                 err = -EFAULT;
2199
2200         hci_dev_put(hdev);
2201
2202         return err;
2203 }
2204
2205 /* ---- Interface to HCI drivers ---- */
2206
2207 static int hci_rfkill_set_block(void *data, bool blocked)
2208 {
2209         struct hci_dev *hdev = data;
2210
2211         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2212
2213         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2214                 return -EBUSY;
2215
2216         if (blocked) {
2217                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2218                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2219                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2220                         hci_dev_do_close(hdev);
2221         } else {
2222                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2223         }
2224
2225         return 0;
2226 }
2227
2228 static const struct rfkill_ops hci_rfkill_ops = {
2229         .set_block = hci_rfkill_set_block,
2230 };
2231
2232 static void hci_power_on(struct work_struct *work)
2233 {
2234         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2235         int err;
2236
2237         BT_DBG("%s", hdev->name);
2238
2239         if (test_bit(HCI_UP, &hdev->flags) &&
2240             hci_dev_test_flag(hdev, HCI_MGMT) &&
2241             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2242                 cancel_delayed_work(&hdev->power_off);
2243                 hci_req_sync_lock(hdev);
2244                 err = __hci_req_hci_power_on(hdev);
2245                 hci_req_sync_unlock(hdev);
2246                 mgmt_power_on(hdev, err);
2247                 return;
2248         }
2249
2250         err = hci_dev_do_open(hdev);
2251         if (err < 0) {
2252                 hci_dev_lock(hdev);
2253                 mgmt_set_powered_failed(hdev, err);
2254                 hci_dev_unlock(hdev);
2255                 return;
2256         }
2257
2258         /* During the HCI setup phase, a few error conditions are
2259          * ignored and they need to be checked now. If they are still
2260          * valid, it is important to turn the device back off.
2261          */
2262         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2263             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2264             (hdev->dev_type == HCI_PRIMARY &&
2265              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2266              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2267                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2268                 hci_dev_do_close(hdev);
2269         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2270                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2271                                    HCI_AUTO_OFF_TIMEOUT);
2272         }
2273
2274         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2275                 /* For unconfigured devices, set the HCI_RAW flag
2276                  * so that userspace can easily identify them.
2277                  */
2278                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2279                         set_bit(HCI_RAW, &hdev->flags);
2280
2281                 /* For fully configured devices, this will send
2282                  * the Index Added event. For unconfigured devices,
2283                  * it will send Unconfigued Index Added event.
2284                  *
2285                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2286                  * and no event will be send.
2287                  */
2288                 mgmt_index_added(hdev);
2289         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2290                 /* When the controller is now configured, then it
2291                  * is important to clear the HCI_RAW flag.
2292                  */
2293                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2294                         clear_bit(HCI_RAW, &hdev->flags);
2295
2296                 /* Powering on the controller with HCI_CONFIG set only
2297                  * happens with the transition from unconfigured to
2298                  * configured. This will send the Index Added event.
2299                  */
2300                 mgmt_index_added(hdev);
2301         }
2302 }
2303
2304 static void hci_power_off(struct work_struct *work)
2305 {
2306         struct hci_dev *hdev = container_of(work, struct hci_dev,
2307                                             power_off.work);
2308
2309         BT_DBG("%s", hdev->name);
2310
2311         hci_dev_do_close(hdev);
2312 }
2313
2314 static void hci_error_reset(struct work_struct *work)
2315 {
2316         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2317
2318         BT_DBG("%s", hdev->name);
2319
2320         if (hdev->hw_error)
2321                 hdev->hw_error(hdev, hdev->hw_error_code);
2322         else
2323                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2324
2325         if (hci_dev_do_close(hdev))
2326                 return;
2327
2328         hci_dev_do_open(hdev);
2329 }
2330
2331 void hci_uuids_clear(struct hci_dev *hdev)
2332 {
2333         struct bt_uuid *uuid, *tmp;
2334
2335         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2336                 list_del(&uuid->list);
2337                 kfree(uuid);
2338         }
2339 }
2340
2341 void hci_link_keys_clear(struct hci_dev *hdev)
2342 {
2343         struct link_key *key;
2344
2345         list_for_each_entry(key, &hdev->link_keys, list) {
2346                 list_del_rcu(&key->list);
2347                 kfree_rcu(key, rcu);
2348         }
2349 }
2350
2351 void hci_smp_ltks_clear(struct hci_dev *hdev)
2352 {
2353         struct smp_ltk *k;
2354
2355         list_for_each_entry(k, &hdev->long_term_keys, list) {
2356                 list_del_rcu(&k->list);
2357                 kfree_rcu(k, rcu);
2358         }
2359 }
2360
2361 void hci_smp_irks_clear(struct hci_dev *hdev)
2362 {
2363         struct smp_irk *k;
2364
2365         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2366                 list_del_rcu(&k->list);
2367                 kfree_rcu(k, rcu);
2368         }
2369 }
2370
2371 void hci_blocked_keys_clear(struct hci_dev *hdev)
2372 {
2373         struct blocked_key *b;
2374
2375         list_for_each_entry(b, &hdev->blocked_keys, list) {
2376                 list_del_rcu(&b->list);
2377                 kfree_rcu(b, rcu);
2378         }
2379 }
2380
2381 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2382 {
2383         bool blocked = false;
2384         struct blocked_key *b;
2385
2386         rcu_read_lock();
2387         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2388                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2389                         blocked = true;
2390                         break;
2391                 }
2392         }
2393
2394         rcu_read_unlock();
2395         return blocked;
2396 }
2397
2398 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2399 {
2400         struct link_key *k;
2401
2402         rcu_read_lock();
2403         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2404                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2405                         rcu_read_unlock();
2406
2407                         if (hci_is_blocked_key(hdev,
2408                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2409                                                k->val)) {
2410                                 bt_dev_warn_ratelimited(hdev,
2411                                                         "Link key blocked for %pMR",
2412                                                         &k->bdaddr);
2413                                 return NULL;
2414                         }
2415
2416                         return k;
2417                 }
2418         }
2419         rcu_read_unlock();
2420
2421         return NULL;
2422 }
2423
2424 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2425                                u8 key_type, u8 old_key_type)
2426 {
2427         /* Legacy key */
2428         if (key_type < 0x03)
2429                 return true;
2430
2431         /* Debug keys are insecure so don't store them persistently */
2432         if (key_type == HCI_LK_DEBUG_COMBINATION)
2433                 return false;
2434
2435         /* Changed combination key and there's no previous one */
2436         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2437                 return false;
2438
2439         /* Security mode 3 case */
2440         if (!conn)
2441                 return true;
2442
2443         /* BR/EDR key derived using SC from an LE link */
2444         if (conn->type == LE_LINK)
2445                 return true;
2446
2447         /* Neither local nor remote side had no-bonding as requirement */
2448         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2449                 return true;
2450
2451         /* Local side had dedicated bonding as requirement */
2452         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2453                 return true;
2454
2455         /* Remote side had dedicated bonding as requirement */
2456         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2457                 return true;
2458
2459         /* If none of the above criteria match, then don't store the key
2460          * persistently */
2461         return false;
2462 }
2463
2464 static u8 ltk_role(u8 type)
2465 {
2466         if (type == SMP_LTK)
2467                 return HCI_ROLE_MASTER;
2468
2469         return HCI_ROLE_SLAVE;
2470 }
2471
2472 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2473                              u8 addr_type, u8 role)
2474 {
2475         struct smp_ltk *k;
2476
2477         rcu_read_lock();
2478         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2479                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2480                         continue;
2481
2482                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2483                         rcu_read_unlock();
2484
2485                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2486                                                k->val)) {
2487                                 bt_dev_warn_ratelimited(hdev,
2488                                                         "LTK blocked for %pMR",
2489                                                         &k->bdaddr);
2490                                 return NULL;
2491                         }
2492
2493                         return k;
2494                 }
2495         }
2496         rcu_read_unlock();
2497
2498         return NULL;
2499 }
2500
2501 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2502 {
2503         struct smp_irk *irk_to_return = NULL;
2504         struct smp_irk *irk;
2505
2506         rcu_read_lock();
2507         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2508                 if (!bacmp(&irk->rpa, rpa)) {
2509                         irk_to_return = irk;
2510                         goto done;
2511                 }
2512         }
2513
2514         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2515                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2516                         bacpy(&irk->rpa, rpa);
2517                         irk_to_return = irk;
2518                         goto done;
2519                 }
2520         }
2521
2522 done:
2523         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2524                                                 irk_to_return->val)) {
2525                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2526                                         &irk_to_return->bdaddr);
2527                 irk_to_return = NULL;
2528         }
2529
2530         rcu_read_unlock();
2531
2532         return irk_to_return;
2533 }
2534
2535 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2536                                      u8 addr_type)
2537 {
2538         struct smp_irk *irk_to_return = NULL;
2539         struct smp_irk *irk;
2540
2541         /* Identity Address must be public or static random */
2542         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2543                 return NULL;
2544
2545         rcu_read_lock();
2546         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2547                 if (addr_type == irk->addr_type &&
2548                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2549                         irk_to_return = irk;
2550                         goto done;
2551                 }
2552         }
2553
2554 done:
2555
2556         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2557                                                 irk_to_return->val)) {
2558                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2559                                         &irk_to_return->bdaddr);
2560                 irk_to_return = NULL;
2561         }
2562
2563         rcu_read_unlock();
2564
2565         return irk_to_return;
2566 }
2567
2568 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2569                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2570                                   u8 pin_len, bool *persistent)
2571 {
2572         struct link_key *key, *old_key;
2573         u8 old_key_type;
2574
2575         old_key = hci_find_link_key(hdev, bdaddr);
2576         if (old_key) {
2577                 old_key_type = old_key->type;
2578                 key = old_key;
2579         } else {
2580                 old_key_type = conn ? conn->key_type : 0xff;
2581                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2582                 if (!key)
2583                         return NULL;
2584                 list_add_rcu(&key->list, &hdev->link_keys);
2585         }
2586
2587         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2588
2589         /* Some buggy controller combinations generate a changed
2590          * combination key for legacy pairing even when there's no
2591          * previous key */
2592         if (type == HCI_LK_CHANGED_COMBINATION &&
2593             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2594                 type = HCI_LK_COMBINATION;
2595                 if (conn)
2596                         conn->key_type = type;
2597         }
2598
2599         bacpy(&key->bdaddr, bdaddr);
2600         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2601         key->pin_len = pin_len;
2602
2603         if (type == HCI_LK_CHANGED_COMBINATION)
2604                 key->type = old_key_type;
2605         else
2606                 key->type = type;
2607
2608         if (persistent)
2609                 *persistent = hci_persistent_key(hdev, conn, type,
2610                                                  old_key_type);
2611
2612         return key;
2613 }
2614
2615 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2616                             u8 addr_type, u8 type, u8 authenticated,
2617                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2618 {
2619         struct smp_ltk *key, *old_key;
2620         u8 role = ltk_role(type);
2621
2622         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2623         if (old_key)
2624                 key = old_key;
2625         else {
2626                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2627                 if (!key)
2628                         return NULL;
2629                 list_add_rcu(&key->list, &hdev->long_term_keys);
2630         }
2631
2632         bacpy(&key->bdaddr, bdaddr);
2633         key->bdaddr_type = addr_type;
2634         memcpy(key->val, tk, sizeof(key->val));
2635         key->authenticated = authenticated;
2636         key->ediv = ediv;
2637         key->rand = rand;
2638         key->enc_size = enc_size;
2639         key->type = type;
2640
2641         return key;
2642 }
2643
2644 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2645                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2646 {
2647         struct smp_irk *irk;
2648
2649         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2650         if (!irk) {
2651                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2652                 if (!irk)
2653                         return NULL;
2654
2655                 bacpy(&irk->bdaddr, bdaddr);
2656                 irk->addr_type = addr_type;
2657
2658                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2659         }
2660
2661         memcpy(irk->val, val, 16);
2662         bacpy(&irk->rpa, rpa);
2663
2664         return irk;
2665 }
2666
2667 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2668 {
2669         struct link_key *key;
2670
2671         key = hci_find_link_key(hdev, bdaddr);
2672         if (!key)
2673                 return -ENOENT;
2674
2675         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2676
2677         list_del_rcu(&key->list);
2678         kfree_rcu(key, rcu);
2679
2680         return 0;
2681 }
2682
2683 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2684 {
2685         struct smp_ltk *k;
2686         int removed = 0;
2687
2688         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2689                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2690                         continue;
2691
2692                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2693
2694                 list_del_rcu(&k->list);
2695                 kfree_rcu(k, rcu);
2696                 removed++;
2697         }
2698
2699         return removed ? 0 : -ENOENT;
2700 }
2701
2702 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2703 {
2704         struct smp_irk *k;
2705
2706         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2707                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2708                         continue;
2709
2710                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2711
2712                 list_del_rcu(&k->list);
2713                 kfree_rcu(k, rcu);
2714         }
2715 }
2716
2717 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2718 {
2719         struct smp_ltk *k;
2720         struct smp_irk *irk;
2721         u8 addr_type;
2722
2723         if (type == BDADDR_BREDR) {
2724                 if (hci_find_link_key(hdev, bdaddr))
2725                         return true;
2726                 return false;
2727         }
2728
2729         /* Convert to HCI addr type which struct smp_ltk uses */
2730         if (type == BDADDR_LE_PUBLIC)
2731                 addr_type = ADDR_LE_DEV_PUBLIC;
2732         else
2733                 addr_type = ADDR_LE_DEV_RANDOM;
2734
2735         irk = hci_get_irk(hdev, bdaddr, addr_type);
2736         if (irk) {
2737                 bdaddr = &irk->bdaddr;
2738                 addr_type = irk->addr_type;
2739         }
2740
2741         rcu_read_lock();
2742         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2743                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2744                         rcu_read_unlock();
2745                         return true;
2746                 }
2747         }
2748         rcu_read_unlock();
2749
2750         return false;
2751 }
2752
2753 /* HCI command timer function */
2754 static void hci_cmd_timeout(struct work_struct *work)
2755 {
2756         struct hci_dev *hdev = container_of(work, struct hci_dev,
2757                                             cmd_timer.work);
2758
2759         if (hdev->sent_cmd) {
2760                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2761                 u16 opcode = __le16_to_cpu(sent->opcode);
2762
2763                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2764         } else {
2765                 bt_dev_err(hdev, "command tx timeout");
2766         }
2767
2768         if (hdev->cmd_timeout)
2769                 hdev->cmd_timeout(hdev);
2770
2771         atomic_set(&hdev->cmd_cnt, 1);
2772         queue_work(hdev->workqueue, &hdev->cmd_work);
2773 }
2774
2775 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2776                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2777 {
2778         struct oob_data *data;
2779
2780         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2781                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2782                         continue;
2783                 if (data->bdaddr_type != bdaddr_type)
2784                         continue;
2785                 return data;
2786         }
2787
2788         return NULL;
2789 }
2790
2791 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2792                                u8 bdaddr_type)
2793 {
2794         struct oob_data *data;
2795
2796         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2797         if (!data)
2798                 return -ENOENT;
2799
2800         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2801
2802         list_del(&data->list);
2803         kfree(data);
2804
2805         return 0;
2806 }
2807
2808 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2809 {
2810         struct oob_data *data, *n;
2811
2812         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2813                 list_del(&data->list);
2814                 kfree(data);
2815         }
2816 }
2817
2818 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2819                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2820                             u8 *hash256, u8 *rand256)
2821 {
2822         struct oob_data *data;
2823
2824         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2825         if (!data) {
2826                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2827                 if (!data)
2828                         return -ENOMEM;
2829
2830                 bacpy(&data->bdaddr, bdaddr);
2831                 data->bdaddr_type = bdaddr_type;
2832                 list_add(&data->list, &hdev->remote_oob_data);
2833         }
2834
2835         if (hash192 && rand192) {
2836                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2837                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2838                 if (hash256 && rand256)
2839                         data->present = 0x03;
2840         } else {
2841                 memset(data->hash192, 0, sizeof(data->hash192));
2842                 memset(data->rand192, 0, sizeof(data->rand192));
2843                 if (hash256 && rand256)
2844                         data->present = 0x02;
2845                 else
2846                         data->present = 0x00;
2847         }
2848
2849         if (hash256 && rand256) {
2850                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2851                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2852         } else {
2853                 memset(data->hash256, 0, sizeof(data->hash256));
2854                 memset(data->rand256, 0, sizeof(data->rand256));
2855                 if (hash192 && rand192)
2856                         data->present = 0x01;
2857         }
2858
2859         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2860
2861         return 0;
2862 }
2863
2864 /* This function requires the caller holds hdev->lock */
2865 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2866 {
2867         struct adv_info *adv_instance;
2868
2869         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2870                 if (adv_instance->instance == instance)
2871                         return adv_instance;
2872         }
2873
2874         return NULL;
2875 }
2876
2877 /* This function requires the caller holds hdev->lock */
2878 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2879 {
2880         struct adv_info *cur_instance;
2881
2882         cur_instance = hci_find_adv_instance(hdev, instance);
2883         if (!cur_instance)
2884                 return NULL;
2885
2886         if (cur_instance == list_last_entry(&hdev->adv_instances,
2887                                             struct adv_info, list))
2888                 return list_first_entry(&hdev->adv_instances,
2889                                                  struct adv_info, list);
2890         else
2891                 return list_next_entry(cur_instance, list);
2892 }
2893
2894 /* This function requires the caller holds hdev->lock */
2895 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2896 {
2897         struct adv_info *adv_instance;
2898
2899         adv_instance = hci_find_adv_instance(hdev, instance);
2900         if (!adv_instance)
2901                 return -ENOENT;
2902
2903         BT_DBG("%s removing %dMR", hdev->name, instance);
2904
2905         if (hdev->cur_adv_instance == instance) {
2906                 if (hdev->adv_instance_timeout) {
2907                         cancel_delayed_work(&hdev->adv_instance_expire);
2908                         hdev->adv_instance_timeout = 0;
2909                 }
2910                 hdev->cur_adv_instance = 0x00;
2911         }
2912
2913         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2914
2915         list_del(&adv_instance->list);
2916         kfree(adv_instance);
2917
2918         hdev->adv_instance_cnt--;
2919
2920         return 0;
2921 }
2922
2923 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2924 {
2925         struct adv_info *adv_instance, *n;
2926
2927         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2928                 adv_instance->rpa_expired = rpa_expired;
2929 }
2930
2931 /* This function requires the caller holds hdev->lock */
2932 void hci_adv_instances_clear(struct hci_dev *hdev)
2933 {
2934         struct adv_info *adv_instance, *n;
2935
2936         if (hdev->adv_instance_timeout) {
2937                 cancel_delayed_work(&hdev->adv_instance_expire);
2938                 hdev->adv_instance_timeout = 0;
2939         }
2940
2941         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2942                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2943                 list_del(&adv_instance->list);
2944                 kfree(adv_instance);
2945         }
2946
2947         hdev->adv_instance_cnt = 0;
2948         hdev->cur_adv_instance = 0x00;
2949 }
2950
2951 static void adv_instance_rpa_expired(struct work_struct *work)
2952 {
2953         struct adv_info *adv_instance = container_of(work, struct adv_info,
2954                                                      rpa_expired_cb.work);
2955
2956         BT_DBG("");
2957
2958         adv_instance->rpa_expired = true;
2959 }
2960
2961 /* This function requires the caller holds hdev->lock */
2962 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2963                          u16 adv_data_len, u8 *adv_data,
2964                          u16 scan_rsp_len, u8 *scan_rsp_data,
2965                          u16 timeout, u16 duration, s8 tx_power,
2966                          u32 min_interval, u32 max_interval)
2967 {
2968         struct adv_info *adv_instance;
2969
2970         adv_instance = hci_find_adv_instance(hdev, instance);
2971         if (adv_instance) {
2972                 memset(adv_instance->adv_data, 0,
2973                        sizeof(adv_instance->adv_data));
2974                 memset(adv_instance->scan_rsp_data, 0,
2975                        sizeof(adv_instance->scan_rsp_data));
2976         } else {
2977                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2978                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2979                         return -EOVERFLOW;
2980
2981                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2982                 if (!adv_instance)
2983                         return -ENOMEM;
2984
2985                 adv_instance->pending = true;
2986                 adv_instance->instance = instance;
2987                 list_add(&adv_instance->list, &hdev->adv_instances);
2988                 hdev->adv_instance_cnt++;
2989         }
2990
2991         adv_instance->flags = flags;
2992         adv_instance->adv_data_len = adv_data_len;
2993         adv_instance->scan_rsp_len = scan_rsp_len;
2994         adv_instance->min_interval = min_interval;
2995         adv_instance->max_interval = max_interval;
2996         adv_instance->tx_power = tx_power;
2997
2998         if (adv_data_len)
2999                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3000
3001         if (scan_rsp_len)
3002                 memcpy(adv_instance->scan_rsp_data,
3003                        scan_rsp_data, scan_rsp_len);
3004
3005         adv_instance->timeout = timeout;
3006         adv_instance->remaining_time = timeout;
3007
3008         if (duration == 0)
3009                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3010         else
3011                 adv_instance->duration = duration;
3012
3013         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3014                           adv_instance_rpa_expired);
3015
3016         BT_DBG("%s for %dMR", hdev->name, instance);
3017
3018         return 0;
3019 }
3020
3021 /* This function requires the caller holds hdev->lock */
3022 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3023                               u16 adv_data_len, u8 *adv_data,
3024                               u16 scan_rsp_len, u8 *scan_rsp_data)
3025 {
3026         struct adv_info *adv_instance;
3027
3028         adv_instance = hci_find_adv_instance(hdev, instance);
3029
3030         /* If advertisement doesn't exist, we can't modify its data */
3031         if (!adv_instance)
3032                 return -ENOENT;
3033
3034         if (adv_data_len) {
3035                 memset(adv_instance->adv_data, 0,
3036                        sizeof(adv_instance->adv_data));
3037                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3038                 adv_instance->adv_data_len = adv_data_len;
3039         }
3040
3041         if (scan_rsp_len) {
3042                 memset(adv_instance->scan_rsp_data, 0,
3043                        sizeof(adv_instance->scan_rsp_data));
3044                 memcpy(adv_instance->scan_rsp_data,
3045                        scan_rsp_data, scan_rsp_len);
3046                 adv_instance->scan_rsp_len = scan_rsp_len;
3047         }
3048
3049         return 0;
3050 }
3051
3052 /* This function requires the caller holds hdev->lock */
3053 void hci_adv_monitors_clear(struct hci_dev *hdev)
3054 {
3055         struct adv_monitor *monitor;
3056         int handle;
3057
3058         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3059                 hci_free_adv_monitor(hdev, monitor);
3060
3061         idr_destroy(&hdev->adv_monitors_idr);
3062 }
3063
3064 /* Frees the monitor structure and do some bookkeepings.
3065  * This function requires the caller holds hdev->lock.
3066  */
3067 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3068 {
3069         struct adv_pattern *pattern;
3070         struct adv_pattern *tmp;
3071
3072         if (!monitor)
3073                 return;
3074
3075         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3076                 list_del(&pattern->list);
3077                 kfree(pattern);
3078         }
3079
3080         if (monitor->handle)
3081                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3082
3083         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3084                 hdev->adv_monitors_cnt--;
3085                 mgmt_adv_monitor_removed(hdev, monitor->handle);
3086         }
3087
3088         kfree(monitor);
3089 }
3090
3091 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3092 {
3093         return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3094 }
3095
3096 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3097 {
3098         return mgmt_remove_adv_monitor_complete(hdev, status);
3099 }
3100
3101 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3102  * also attempts to forward the request to the controller.
3103  * Returns true if request is forwarded (result is pending), false otherwise.
3104  * This function requires the caller holds hdev->lock.
3105  */
3106 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3107                          int *err)
3108 {
3109         int min, max, handle;
3110
3111         *err = 0;
3112
3113         if (!monitor) {
3114                 *err = -EINVAL;
3115                 return false;
3116         }
3117
3118         min = HCI_MIN_ADV_MONITOR_HANDLE;
3119         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3120         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3121                            GFP_KERNEL);
3122         if (handle < 0) {
3123                 *err = handle;
3124                 return false;
3125         }
3126
3127         monitor->handle = handle;
3128
3129         if (!hdev_is_powered(hdev))
3130                 return false;
3131
3132         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3133         case HCI_ADV_MONITOR_EXT_NONE:
3134                 hci_update_background_scan(hdev);
3135                 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3136                 /* Message was not forwarded to controller - not an error */
3137                 return false;
3138         case HCI_ADV_MONITOR_EXT_MSFT:
3139                 *err = msft_add_monitor_pattern(hdev, monitor);
3140                 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3141                            *err);
3142                 break;
3143         }
3144
3145         return (*err == 0);
3146 }
3147
3148 /* Attempts to tell the controller and free the monitor. If somehow the
3149  * controller doesn't have a corresponding handle, remove anyway.
3150  * Returns true if request is forwarded (result is pending), false otherwise.
3151  * This function requires the caller holds hdev->lock.
3152  */
3153 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3154                                    struct adv_monitor *monitor,
3155                                    u16 handle, int *err)
3156 {
3157         *err = 0;
3158
3159         switch (hci_get_adv_monitor_offload_ext(hdev)) {
3160         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3161                 goto free_monitor;
3162         case HCI_ADV_MONITOR_EXT_MSFT:
3163                 *err = msft_remove_monitor(hdev, monitor, handle);
3164                 break;
3165         }
3166
3167         /* In case no matching handle registered, just free the monitor */
3168         if (*err == -ENOENT)
3169                 goto free_monitor;
3170
3171         return (*err == 0);
3172
3173 free_monitor:
3174         if (*err == -ENOENT)
3175                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3176                             monitor->handle);
3177         hci_free_adv_monitor(hdev, monitor);
3178
3179         *err = 0;
3180         return false;
3181 }
3182
3183 /* Returns true if request is forwarded (result is pending), false otherwise.
3184  * This function requires the caller holds hdev->lock.
3185  */
3186 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3187 {
3188         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3189         bool pending;
3190
3191         if (!monitor) {
3192                 *err = -EINVAL;
3193                 return false;
3194         }
3195
3196         pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3197         if (!*err && !pending)
3198                 hci_update_background_scan(hdev);
3199
3200         bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3201                    hdev->name, handle, *err, pending ? "" : "not ");
3202
3203         return pending;
3204 }
3205
3206 /* Returns true if request is forwarded (result is pending), false otherwise.
3207  * This function requires the caller holds hdev->lock.
3208  */
3209 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3210 {
3211         struct adv_monitor *monitor;
3212         int idr_next_id = 0;
3213         bool pending = false;
3214         bool update = false;
3215
3216         *err = 0;
3217
3218         while (!*err && !pending) {
3219                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3220                 if (!monitor)
3221                         break;
3222
3223                 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3224
3225                 if (!*err && !pending)
3226                         update = true;
3227         }
3228
3229         if (update)
3230                 hci_update_background_scan(hdev);
3231
3232         bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3233                    hdev->name, *err, pending ? "" : "not ");
3234
3235         return pending;
3236 }
3237
3238 /* This function requires the caller holds hdev->lock */
3239 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3240 {
3241         return !idr_is_empty(&hdev->adv_monitors_idr);
3242 }
3243
3244 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3245 {
3246         if (msft_monitor_supported(hdev))
3247                 return HCI_ADV_MONITOR_EXT_MSFT;
3248
3249         return HCI_ADV_MONITOR_EXT_NONE;
3250 }
3251
3252 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3253                                          bdaddr_t *bdaddr, u8 type)
3254 {
3255         struct bdaddr_list *b;
3256
3257         list_for_each_entry(b, bdaddr_list, list) {
3258                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3259                         return b;
3260         }
3261
3262         return NULL;
3263 }
3264
3265 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3266                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3267                                 u8 type)
3268 {
3269         struct bdaddr_list_with_irk *b;
3270
3271         list_for_each_entry(b, bdaddr_list, list) {
3272                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3273                         return b;
3274         }
3275
3276         return NULL;
3277 }
3278
3279 struct bdaddr_list_with_flags *
3280 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3281                                   bdaddr_t *bdaddr, u8 type)
3282 {
3283         struct bdaddr_list_with_flags *b;
3284
3285         list_for_each_entry(b, bdaddr_list, list) {
3286                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3287                         return b;
3288         }
3289
3290         return NULL;
3291 }
3292
3293 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3294 {
3295         struct bdaddr_list *b, *n;
3296
3297         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3298                 list_del(&b->list);
3299                 kfree(b);
3300         }
3301 }
3302
3303 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3304 {
3305         struct bdaddr_list *entry;
3306
3307         if (!bacmp(bdaddr, BDADDR_ANY))
3308                 return -EBADF;
3309
3310         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3311                 return -EEXIST;
3312
3313         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3314         if (!entry)
3315                 return -ENOMEM;
3316
3317         bacpy(&entry->bdaddr, bdaddr);
3318         entry->bdaddr_type = type;
3319
3320         list_add(&entry->list, list);
3321
3322         return 0;
3323 }
3324
3325 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3326                                         u8 type, u8 *peer_irk, u8 *local_irk)
3327 {
3328         struct bdaddr_list_with_irk *entry;
3329
3330         if (!bacmp(bdaddr, BDADDR_ANY))
3331                 return -EBADF;
3332
3333         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3334                 return -EEXIST;
3335
3336         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3337         if (!entry)
3338                 return -ENOMEM;
3339
3340         bacpy(&entry->bdaddr, bdaddr);
3341         entry->bdaddr_type = type;
3342
3343         if (peer_irk)
3344                 memcpy(entry->peer_irk, peer_irk, 16);
3345
3346         if (local_irk)
3347                 memcpy(entry->local_irk, local_irk, 16);
3348
3349         list_add(&entry->list, list);
3350
3351         return 0;
3352 }
3353
3354 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3355                                    u8 type, u32 flags)
3356 {
3357         struct bdaddr_list_with_flags *entry;
3358
3359         if (!bacmp(bdaddr, BDADDR_ANY))
3360                 return -EBADF;
3361
3362         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3363                 return -EEXIST;
3364
3365         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3366         if (!entry)
3367                 return -ENOMEM;
3368
3369         bacpy(&entry->bdaddr, bdaddr);
3370         entry->bdaddr_type = type;
3371         entry->current_flags = flags;
3372
3373         list_add(&entry->list, list);
3374
3375         return 0;
3376 }
3377
3378 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3379 {
3380         struct bdaddr_list *entry;
3381
3382         if (!bacmp(bdaddr, BDADDR_ANY)) {
3383                 hci_bdaddr_list_clear(list);
3384                 return 0;
3385         }
3386
3387         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3388         if (!entry)
3389                 return -ENOENT;
3390
3391         list_del(&entry->list);
3392         kfree(entry);
3393
3394         return 0;
3395 }
3396
3397 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3398                                                         u8 type)
3399 {
3400         struct bdaddr_list_with_irk *entry;
3401
3402         if (!bacmp(bdaddr, BDADDR_ANY)) {
3403                 hci_bdaddr_list_clear(list);
3404                 return 0;
3405         }
3406
3407         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3408         if (!entry)
3409                 return -ENOENT;
3410
3411         list_del(&entry->list);
3412         kfree(entry);
3413
3414         return 0;
3415 }
3416
3417 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3418                                    u8 type)
3419 {
3420         struct bdaddr_list_with_flags *entry;
3421
3422         if (!bacmp(bdaddr, BDADDR_ANY)) {
3423                 hci_bdaddr_list_clear(list);
3424                 return 0;
3425         }
3426
3427         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3428         if (!entry)
3429                 return -ENOENT;
3430
3431         list_del(&entry->list);
3432         kfree(entry);
3433
3434         return 0;
3435 }
3436
3437 /* This function requires the caller holds hdev->lock */
3438 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3439                                                bdaddr_t *addr, u8 addr_type)
3440 {
3441         struct hci_conn_params *params;
3442
3443         list_for_each_entry(params, &hdev->le_conn_params, list) {
3444                 if (bacmp(&params->addr, addr) == 0 &&
3445                     params->addr_type == addr_type) {
3446                         return params;
3447                 }
3448         }
3449
3450         return NULL;
3451 }
3452
3453 /* This function requires the caller holds hdev->lock */
3454 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3455                                                   bdaddr_t *addr, u8 addr_type)
3456 {
3457         struct hci_conn_params *param;
3458
3459         switch (addr_type) {
3460         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3461                 addr_type = ADDR_LE_DEV_PUBLIC;
3462                 break;
3463         case ADDR_LE_DEV_RANDOM_RESOLVED:
3464                 addr_type = ADDR_LE_DEV_RANDOM;
3465                 break;
3466         }
3467
3468         list_for_each_entry(param, list, action) {
3469                 if (bacmp(&param->addr, addr) == 0 &&
3470                     param->addr_type == addr_type)
3471                         return param;
3472         }
3473
3474         return NULL;
3475 }
3476
3477 /* This function requires the caller holds hdev->lock */
3478 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3479                                             bdaddr_t *addr, u8 addr_type)
3480 {
3481         struct hci_conn_params *params;
3482
3483         params = hci_conn_params_lookup(hdev, addr, addr_type);
3484         if (params)
3485                 return params;
3486
3487         params = kzalloc(sizeof(*params), GFP_KERNEL);
3488         if (!params) {
3489                 bt_dev_err(hdev, "out of memory");
3490                 return NULL;
3491         }
3492
3493         bacpy(&params->addr, addr);
3494         params->addr_type = addr_type;
3495
3496         list_add(&params->list, &hdev->le_conn_params);
3497         INIT_LIST_HEAD(&params->action);
3498
3499         params->conn_min_interval = hdev->le_conn_min_interval;
3500         params->conn_max_interval = hdev->le_conn_max_interval;
3501         params->conn_latency = hdev->le_conn_latency;
3502         params->supervision_timeout = hdev->le_supv_timeout;
3503         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3504
3505         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3506
3507         return params;
3508 }
3509
3510 static void hci_conn_params_free(struct hci_conn_params *params)
3511 {
3512         if (params->conn) {
3513                 hci_conn_drop(params->conn);
3514                 hci_conn_put(params->conn);
3515         }
3516
3517         list_del(&params->action);
3518         list_del(&params->list);
3519         kfree(params);
3520 }
3521
3522 /* This function requires the caller holds hdev->lock */
3523 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3524 {
3525         struct hci_conn_params *params;
3526
3527         params = hci_conn_params_lookup(hdev, addr, addr_type);
3528         if (!params)
3529                 return;
3530
3531         hci_conn_params_free(params);
3532
3533         hci_update_background_scan(hdev);
3534
3535         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3536 }
3537
3538 /* This function requires the caller holds hdev->lock */
3539 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3540 {
3541         struct hci_conn_params *params, *tmp;
3542
3543         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3544                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3545                         continue;
3546
3547                 /* If trying to estabilish one time connection to disabled
3548                  * device, leave the params, but mark them as just once.
3549                  */
3550                 if (params->explicit_connect) {
3551                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3552                         continue;
3553                 }
3554
3555                 list_del(&params->list);
3556                 kfree(params);
3557         }
3558
3559         BT_DBG("All LE disabled connection parameters were removed");
3560 }
3561
3562 /* This function requires the caller holds hdev->lock */
3563 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3564 {
3565         struct hci_conn_params *params, *tmp;
3566
3567         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3568                 hci_conn_params_free(params);
3569
3570         BT_DBG("All LE connection parameters were removed");
3571 }
3572
3573 /* Copy the Identity Address of the controller.
3574  *
3575  * If the controller has a public BD_ADDR, then by default use that one.
3576  * If this is a LE only controller without a public address, default to
3577  * the static random address.
3578  *
3579  * For debugging purposes it is possible to force controllers with a
3580  * public address to use the static random address instead.
3581  *
3582  * In case BR/EDR has been disabled on a dual-mode controller and
3583  * userspace has configured a static address, then that address
3584  * becomes the identity address instead of the public BR/EDR address.
3585  */
3586 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3587                                u8 *bdaddr_type)
3588 {
3589         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3590             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3591             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3592              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3593                 bacpy(bdaddr, &hdev->static_addr);
3594                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3595         } else {
3596                 bacpy(bdaddr, &hdev->bdaddr);
3597                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3598         }
3599 }
3600
3601 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3602 {
3603         int i;
3604
3605         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3606                 clear_bit(i, hdev->suspend_tasks);
3607
3608         wake_up(&hdev->suspend_wait_q);
3609 }
3610
3611 static int hci_suspend_wait_event(struct hci_dev *hdev)
3612 {
3613 #define WAKE_COND                                                              \
3614         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3615          __SUSPEND_NUM_TASKS)
3616
3617         int i;
3618         int ret = wait_event_timeout(hdev->suspend_wait_q,
3619                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3620
3621         if (ret == 0) {
3622                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3623                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3624                         if (test_bit(i, hdev->suspend_tasks))
3625                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3626                         clear_bit(i, hdev->suspend_tasks);
3627                 }
3628
3629                 ret = -ETIMEDOUT;
3630         } else {
3631                 ret = 0;
3632         }
3633
3634         return ret;
3635 }
3636
3637 static void hci_prepare_suspend(struct work_struct *work)
3638 {
3639         struct hci_dev *hdev =
3640                 container_of(work, struct hci_dev, suspend_prepare);
3641
3642         hci_dev_lock(hdev);
3643         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3644         hci_dev_unlock(hdev);
3645 }
3646
3647 static int hci_change_suspend_state(struct hci_dev *hdev,
3648                                     enum suspended_state next)
3649 {
3650         hdev->suspend_state_next = next;
3651         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3652         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3653         return hci_suspend_wait_event(hdev);
3654 }
3655
3656 static void hci_clear_wake_reason(struct hci_dev *hdev)
3657 {
3658         hci_dev_lock(hdev);
3659
3660         hdev->wake_reason = 0;
3661         bacpy(&hdev->wake_addr, BDADDR_ANY);
3662         hdev->wake_addr_type = 0;
3663
3664         hci_dev_unlock(hdev);
3665 }
3666
3667 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3668                                 void *data)
3669 {
3670         struct hci_dev *hdev =
3671                 container_of(nb, struct hci_dev, suspend_notifier);
3672         int ret = 0;
3673         u8 state = BT_RUNNING;
3674
3675         /* If powering down, wait for completion. */
3676         if (mgmt_powering_down(hdev)) {
3677                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3678                 ret = hci_suspend_wait_event(hdev);
3679                 if (ret)
3680                         goto done;
3681         }
3682
3683         /* Suspend notifier should only act on events when powered. */
3684         if (!hdev_is_powered(hdev) ||
3685             hci_dev_test_flag(hdev, HCI_UNREGISTER))
3686                 goto done;
3687
3688         if (action == PM_SUSPEND_PREPARE) {
3689                 /* Suspend consists of two actions:
3690                  *  - First, disconnect everything and make the controller not
3691                  *    connectable (disabling scanning)
3692                  *  - Second, program event filter/whitelist and enable scan
3693                  */
3694                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3695                 if (!ret)
3696                         state = BT_SUSPEND_DISCONNECT;
3697
3698                 /* Only configure whitelist if disconnect succeeded and wake
3699                  * isn't being prevented.
3700                  */
3701                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3702                         ret = hci_change_suspend_state(hdev,
3703                                                 BT_SUSPEND_CONFIGURE_WAKE);
3704                         if (!ret)
3705                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3706                 }
3707
3708                 hci_clear_wake_reason(hdev);
3709                 mgmt_suspending(hdev, state);
3710
3711         } else if (action == PM_POST_SUSPEND) {
3712                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3713
3714                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3715                               hdev->wake_addr_type);
3716         }
3717
3718 done:
3719         /* We always allow suspend even if suspend preparation failed and
3720          * attempt to recover in resume.
3721          */
3722         if (ret)
3723                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3724                            action, ret);
3725
3726         return NOTIFY_DONE;
3727 }
3728
3729 /* Alloc HCI device */
3730 struct hci_dev *hci_alloc_dev(void)
3731 {
3732         struct hci_dev *hdev;
3733
3734         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3735         if (!hdev)
3736                 return NULL;
3737
3738         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3739         hdev->esco_type = (ESCO_HV1);
3740         hdev->link_mode = (HCI_LM_ACCEPT);
3741         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3742         hdev->io_capability = 0x03;     /* No Input No Output */
3743         hdev->manufacturer = 0xffff;    /* Default to internal use */
3744         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3745         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3746         hdev->adv_instance_cnt = 0;
3747         hdev->cur_adv_instance = 0x00;
3748         hdev->adv_instance_timeout = 0;
3749
3750         hdev->advmon_allowlist_duration = 300;
3751         hdev->advmon_no_filter_duration = 500;
3752         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
3753
3754         hdev->sniff_max_interval = 800;
3755         hdev->sniff_min_interval = 80;
3756
3757         hdev->le_adv_channel_map = 0x07;
3758         hdev->le_adv_min_interval = 0x0800;
3759         hdev->le_adv_max_interval = 0x0800;
3760         hdev->le_scan_interval = 0x0060;
3761         hdev->le_scan_window = 0x0030;
3762         hdev->le_scan_int_suspend = 0x0400;
3763         hdev->le_scan_window_suspend = 0x0012;
3764         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3765         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3766         hdev->le_scan_int_adv_monitor = 0x0060;
3767         hdev->le_scan_window_adv_monitor = 0x0030;
3768         hdev->le_scan_int_connect = 0x0060;
3769         hdev->le_scan_window_connect = 0x0060;
3770         hdev->le_conn_min_interval = 0x0018;
3771         hdev->le_conn_max_interval = 0x0028;
3772         hdev->le_conn_latency = 0x0000;
3773         hdev->le_supv_timeout = 0x002a;
3774         hdev->le_def_tx_len = 0x001b;
3775         hdev->le_def_tx_time = 0x0148;
3776         hdev->le_max_tx_len = 0x001b;
3777         hdev->le_max_tx_time = 0x0148;
3778         hdev->le_max_rx_len = 0x001b;
3779         hdev->le_max_rx_time = 0x0148;
3780         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3781         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3782         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3783         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3784         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3785         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3786         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3787         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3788         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3789
3790         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3791         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3792         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3793         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3794         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3795         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3796
3797         /* default 1.28 sec page scan */
3798         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3799         hdev->def_page_scan_int = 0x0800;
3800         hdev->def_page_scan_window = 0x0012;
3801
3802         mutex_init(&hdev->lock);
3803         mutex_init(&hdev->req_lock);
3804
3805         INIT_LIST_HEAD(&hdev->mgmt_pending);
3806         INIT_LIST_HEAD(&hdev->blacklist);
3807         INIT_LIST_HEAD(&hdev->whitelist);
3808         INIT_LIST_HEAD(&hdev->uuids);
3809         INIT_LIST_HEAD(&hdev->link_keys);
3810         INIT_LIST_HEAD(&hdev->long_term_keys);
3811         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3812         INIT_LIST_HEAD(&hdev->remote_oob_data);
3813         INIT_LIST_HEAD(&hdev->le_white_list);
3814         INIT_LIST_HEAD(&hdev->le_resolv_list);
3815         INIT_LIST_HEAD(&hdev->le_conn_params);
3816         INIT_LIST_HEAD(&hdev->pend_le_conns);
3817         INIT_LIST_HEAD(&hdev->pend_le_reports);
3818         INIT_LIST_HEAD(&hdev->conn_hash.list);
3819         INIT_LIST_HEAD(&hdev->adv_instances);
3820         INIT_LIST_HEAD(&hdev->blocked_keys);
3821
3822         INIT_WORK(&hdev->rx_work, hci_rx_work);
3823         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3824         INIT_WORK(&hdev->tx_work, hci_tx_work);
3825         INIT_WORK(&hdev->power_on, hci_power_on);
3826         INIT_WORK(&hdev->error_reset, hci_error_reset);
3827         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3828
3829         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3830
3831         skb_queue_head_init(&hdev->rx_q);
3832         skb_queue_head_init(&hdev->cmd_q);
3833         skb_queue_head_init(&hdev->raw_q);
3834
3835         init_waitqueue_head(&hdev->req_wait_q);
3836         init_waitqueue_head(&hdev->suspend_wait_q);
3837
3838         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3839
3840         hci_request_setup(hdev);
3841
3842         hci_init_sysfs(hdev);
3843         discovery_init(hdev);
3844
3845         return hdev;
3846 }
3847 EXPORT_SYMBOL(hci_alloc_dev);
3848
3849 /* Free HCI device */
3850 void hci_free_dev(struct hci_dev *hdev)
3851 {
3852         /* will free via device release */
3853         put_device(&hdev->dev);
3854 }
3855 EXPORT_SYMBOL(hci_free_dev);
3856
3857 /* Register HCI device */
3858 int hci_register_dev(struct hci_dev *hdev)
3859 {
3860         int id, error;
3861
3862         if (!hdev->open || !hdev->close || !hdev->send)
3863                 return -EINVAL;
3864
3865         /* Do not allow HCI_AMP devices to register at index 0,
3866          * so the index can be used as the AMP controller ID.
3867          */
3868         switch (hdev->dev_type) {
3869         case HCI_PRIMARY:
3870                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3871                 break;
3872         case HCI_AMP:
3873                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3874                 break;
3875         default:
3876                 return -EINVAL;
3877         }
3878
3879         if (id < 0)
3880                 return id;
3881
3882         sprintf(hdev->name, "hci%d", id);
3883         hdev->id = id;
3884
3885         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3886
3887         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3888         if (!hdev->workqueue) {
3889                 error = -ENOMEM;
3890                 goto err;
3891         }
3892
3893         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3894                                                       hdev->name);
3895         if (!hdev->req_workqueue) {
3896                 destroy_workqueue(hdev->workqueue);
3897                 error = -ENOMEM;
3898                 goto err;
3899         }
3900
3901         if (!IS_ERR_OR_NULL(bt_debugfs))
3902                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3903
3904         dev_set_name(&hdev->dev, "%s", hdev->name);
3905
3906         error = device_add(&hdev->dev);
3907         if (error < 0)
3908                 goto err_wqueue;
3909
3910         hci_leds_init(hdev);
3911
3912         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3913                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3914                                     hdev);
3915         if (hdev->rfkill) {
3916                 if (rfkill_register(hdev->rfkill) < 0) {
3917                         rfkill_destroy(hdev->rfkill);
3918                         hdev->rfkill = NULL;
3919                 }
3920         }
3921
3922         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3923                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3924
3925         hci_dev_set_flag(hdev, HCI_SETUP);
3926         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3927
3928         if (hdev->dev_type == HCI_PRIMARY) {
3929                 /* Assume BR/EDR support until proven otherwise (such as
3930                  * through reading supported features during init.
3931                  */
3932                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3933         }
3934
3935         write_lock(&hci_dev_list_lock);
3936         list_add(&hdev->list, &hci_dev_list);
3937         write_unlock(&hci_dev_list_lock);
3938
3939         /* Devices that are marked for raw-only usage are unconfigured
3940          * and should not be included in normal operation.
3941          */
3942         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3943                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3944
3945         hci_sock_dev_event(hdev, HCI_DEV_REG);
3946         hci_dev_hold(hdev);
3947
3948         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3949                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3950                 error = register_pm_notifier(&hdev->suspend_notifier);
3951                 if (error)
3952                         goto err_wqueue;
3953         }
3954
3955         queue_work(hdev->req_workqueue, &hdev->power_on);
3956
3957         idr_init(&hdev->adv_monitors_idr);
3958
3959         return id;
3960
3961 err_wqueue:
3962         destroy_workqueue(hdev->workqueue);
3963         destroy_workqueue(hdev->req_workqueue);
3964 err:
3965         ida_simple_remove(&hci_index_ida, hdev->id);
3966
3967         return error;
3968 }
3969 EXPORT_SYMBOL(hci_register_dev);
3970
3971 /* Unregister HCI device */
3972 void hci_unregister_dev(struct hci_dev *hdev)
3973 {
3974         int id;
3975
3976         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3977
3978         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3979
3980         id = hdev->id;
3981
3982         write_lock(&hci_dev_list_lock);
3983         list_del(&hdev->list);
3984         write_unlock(&hci_dev_list_lock);
3985
3986         cancel_work_sync(&hdev->power_on);
3987
3988         if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3989                 hci_suspend_clear_tasks(hdev);
3990                 unregister_pm_notifier(&hdev->suspend_notifier);
3991                 cancel_work_sync(&hdev->suspend_prepare);
3992         }
3993
3994         hci_dev_do_close(hdev);
3995
3996         if (!test_bit(HCI_INIT, &hdev->flags) &&
3997             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3998             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3999                 hci_dev_lock(hdev);
4000                 mgmt_index_removed(hdev);
4001                 hci_dev_unlock(hdev);
4002         }
4003
4004         /* mgmt_index_removed should take care of emptying the
4005          * pending list */
4006         BUG_ON(!list_empty(&hdev->mgmt_pending));
4007
4008         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4009
4010         if (hdev->rfkill) {
4011                 rfkill_unregister(hdev->rfkill);
4012                 rfkill_destroy(hdev->rfkill);
4013         }
4014
4015         device_del(&hdev->dev);
4016
4017         debugfs_remove_recursive(hdev->debugfs);
4018         kfree_const(hdev->hw_info);
4019         kfree_const(hdev->fw_info);
4020
4021         destroy_workqueue(hdev->workqueue);
4022         destroy_workqueue(hdev->req_workqueue);
4023
4024         hci_dev_lock(hdev);
4025         hci_bdaddr_list_clear(&hdev->blacklist);
4026         hci_bdaddr_list_clear(&hdev->whitelist);
4027         hci_uuids_clear(hdev);
4028         hci_link_keys_clear(hdev);
4029         hci_smp_ltks_clear(hdev);
4030         hci_smp_irks_clear(hdev);
4031         hci_remote_oob_data_clear(hdev);
4032         hci_adv_instances_clear(hdev);
4033         hci_adv_monitors_clear(hdev);
4034         hci_bdaddr_list_clear(&hdev->le_white_list);
4035         hci_bdaddr_list_clear(&hdev->le_resolv_list);
4036         hci_conn_params_clear_all(hdev);
4037         hci_discovery_filter_clear(hdev);
4038         hci_blocked_keys_clear(hdev);
4039         hci_dev_unlock(hdev);
4040
4041         hci_dev_put(hdev);
4042
4043         ida_simple_remove(&hci_index_ida, id);
4044 }
4045 EXPORT_SYMBOL(hci_unregister_dev);
4046
4047 /* Suspend HCI device */
4048 int hci_suspend_dev(struct hci_dev *hdev)
4049 {
4050         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4051         return 0;
4052 }
4053 EXPORT_SYMBOL(hci_suspend_dev);
4054
4055 /* Resume HCI device */
4056 int hci_resume_dev(struct hci_dev *hdev)
4057 {
4058         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4059         return 0;
4060 }
4061 EXPORT_SYMBOL(hci_resume_dev);
4062
4063 /* Reset HCI device */
4064 int hci_reset_dev(struct hci_dev *hdev)
4065 {
4066         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4067         struct sk_buff *skb;
4068
4069         skb = bt_skb_alloc(3, GFP_ATOMIC);
4070         if (!skb)
4071                 return -ENOMEM;
4072
4073         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4074         skb_put_data(skb, hw_err, 3);
4075
4076         /* Send Hardware Error to upper stack */
4077         return hci_recv_frame(hdev, skb);
4078 }
4079 EXPORT_SYMBOL(hci_reset_dev);
4080
4081 /* Receive frame from HCI drivers */
4082 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4083 {
4084         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4085                       && !test_bit(HCI_INIT, &hdev->flags))) {
4086                 kfree_skb(skb);
4087                 return -ENXIO;
4088         }
4089
4090         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4091             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4092             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4093             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4094                 kfree_skb(skb);
4095                 return -EINVAL;
4096         }
4097
4098         /* Incoming skb */
4099         bt_cb(skb)->incoming = 1;
4100
4101         /* Time stamp */
4102         __net_timestamp(skb);
4103
4104         skb_queue_tail(&hdev->rx_q, skb);
4105         queue_work(hdev->workqueue, &hdev->rx_work);
4106
4107         return 0;
4108 }
4109 EXPORT_SYMBOL(hci_recv_frame);
4110
4111 /* Receive diagnostic message from HCI drivers */
4112 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4113 {
4114         /* Mark as diagnostic packet */
4115         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4116
4117         /* Time stamp */
4118         __net_timestamp(skb);
4119
4120         skb_queue_tail(&hdev->rx_q, skb);
4121         queue_work(hdev->workqueue, &hdev->rx_work);
4122
4123         return 0;
4124 }
4125 EXPORT_SYMBOL(hci_recv_diag);
4126
4127 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4128 {
4129         va_list vargs;
4130
4131         va_start(vargs, fmt);
4132         kfree_const(hdev->hw_info);
4133         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4134         va_end(vargs);
4135 }
4136 EXPORT_SYMBOL(hci_set_hw_info);
4137
4138 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4139 {
4140         va_list vargs;
4141
4142         va_start(vargs, fmt);
4143         kfree_const(hdev->fw_info);
4144         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4145         va_end(vargs);
4146 }
4147 EXPORT_SYMBOL(hci_set_fw_info);
4148
4149 /* ---- Interface to upper protocols ---- */
4150
4151 int hci_register_cb(struct hci_cb *cb)
4152 {
4153         BT_DBG("%p name %s", cb, cb->name);
4154
4155         mutex_lock(&hci_cb_list_lock);
4156         list_add_tail(&cb->list, &hci_cb_list);
4157         mutex_unlock(&hci_cb_list_lock);
4158
4159         return 0;
4160 }
4161 EXPORT_SYMBOL(hci_register_cb);
4162
4163 int hci_unregister_cb(struct hci_cb *cb)
4164 {
4165         BT_DBG("%p name %s", cb, cb->name);
4166
4167         mutex_lock(&hci_cb_list_lock);
4168         list_del(&cb->list);
4169         mutex_unlock(&hci_cb_list_lock);
4170
4171         return 0;
4172 }
4173 EXPORT_SYMBOL(hci_unregister_cb);
4174
4175 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4176 {
4177         int err;
4178
4179         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4180                skb->len);
4181
4182         /* Time stamp */
4183         __net_timestamp(skb);
4184
4185         /* Send copy to monitor */
4186         hci_send_to_monitor(hdev, skb);
4187
4188         if (atomic_read(&hdev->promisc)) {
4189                 /* Send copy to the sockets */
4190                 hci_send_to_sock(hdev, skb);
4191         }
4192
4193         /* Get rid of skb owner, prior to sending to the driver. */
4194         skb_orphan(skb);
4195
4196         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4197                 kfree_skb(skb);
4198                 return;
4199         }
4200
4201         err = hdev->send(hdev, skb);
4202         if (err < 0) {
4203                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4204                 kfree_skb(skb);
4205         }
4206 }
4207
4208 /* Send HCI command */
4209 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4210                  const void *param)
4211 {
4212         struct sk_buff *skb;
4213
4214         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4215
4216         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4217         if (!skb) {
4218                 bt_dev_err(hdev, "no memory for command");
4219                 return -ENOMEM;
4220         }
4221
4222         /* Stand-alone HCI commands must be flagged as
4223          * single-command requests.
4224          */
4225         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4226
4227         skb_queue_tail(&hdev->cmd_q, skb);
4228         queue_work(hdev->workqueue, &hdev->cmd_work);
4229
4230         return 0;
4231 }
4232
4233 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4234                    const void *param)
4235 {
4236         struct sk_buff *skb;
4237
4238         if (hci_opcode_ogf(opcode) != 0x3f) {
4239                 /* A controller receiving a command shall respond with either
4240                  * a Command Status Event or a Command Complete Event.
4241                  * Therefore, all standard HCI commands must be sent via the
4242                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4243                  * Some vendors do not comply with this rule for vendor-specific
4244                  * commands and do not return any event. We want to support
4245                  * unresponded commands for such cases only.
4246                  */
4247                 bt_dev_err(hdev, "unresponded command not supported");
4248                 return -EINVAL;
4249         }
4250
4251         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4252         if (!skb) {
4253                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4254                            opcode);
4255                 return -ENOMEM;
4256         }
4257
4258         hci_send_frame(hdev, skb);
4259
4260         return 0;
4261 }
4262 EXPORT_SYMBOL(__hci_cmd_send);
4263
4264 /* Get data from the previously sent command */
4265 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4266 {
4267         struct hci_command_hdr *hdr;
4268
4269         if (!hdev->sent_cmd)
4270                 return NULL;
4271
4272         hdr = (void *) hdev->sent_cmd->data;
4273
4274         if (hdr->opcode != cpu_to_le16(opcode))
4275                 return NULL;
4276
4277         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4278
4279         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4280 }
4281
4282 /* Send HCI command and wait for command commplete event */
4283 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4284                              const void *param, u32 timeout)
4285 {
4286         struct sk_buff *skb;
4287
4288         if (!test_bit(HCI_UP, &hdev->flags))
4289                 return ERR_PTR(-ENETDOWN);
4290
4291         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4292
4293         hci_req_sync_lock(hdev);
4294         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4295         hci_req_sync_unlock(hdev);
4296
4297         return skb;
4298 }
4299 EXPORT_SYMBOL(hci_cmd_sync);
4300
4301 /* Send ACL data */
4302 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4303 {
4304         struct hci_acl_hdr *hdr;
4305         int len = skb->len;
4306
4307         skb_push(skb, HCI_ACL_HDR_SIZE);
4308         skb_reset_transport_header(skb);
4309         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4310         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4311         hdr->dlen   = cpu_to_le16(len);
4312 }
4313
4314 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4315                           struct sk_buff *skb, __u16 flags)
4316 {
4317         struct hci_conn *conn = chan->conn;
4318         struct hci_dev *hdev = conn->hdev;
4319         struct sk_buff *list;
4320
4321         skb->len = skb_headlen(skb);
4322         skb->data_len = 0;
4323
4324         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4325
4326         switch (hdev->dev_type) {
4327         case HCI_PRIMARY:
4328                 hci_add_acl_hdr(skb, conn->handle, flags);
4329                 break;
4330         case HCI_AMP:
4331                 hci_add_acl_hdr(skb, chan->handle, flags);
4332                 break;
4333         default:
4334                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4335                 return;
4336         }
4337
4338         list = skb_shinfo(skb)->frag_list;
4339         if (!list) {
4340                 /* Non fragmented */
4341                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4342
4343                 skb_queue_tail(queue, skb);
4344         } else {
4345                 /* Fragmented */
4346                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4347
4348                 skb_shinfo(skb)->frag_list = NULL;
4349
4350                 /* Queue all fragments atomically. We need to use spin_lock_bh
4351                  * here because of 6LoWPAN links, as there this function is
4352                  * called from softirq and using normal spin lock could cause
4353                  * deadlocks.
4354                  */
4355                 spin_lock_bh(&queue->lock);
4356
4357                 __skb_queue_tail(queue, skb);
4358
4359                 flags &= ~ACL_START;
4360                 flags |= ACL_CONT;
4361                 do {
4362                         skb = list; list = list->next;
4363
4364                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4365                         hci_add_acl_hdr(skb, conn->handle, flags);
4366
4367                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4368
4369                         __skb_queue_tail(queue, skb);
4370                 } while (list);
4371
4372                 spin_unlock_bh(&queue->lock);
4373         }
4374 }
4375
4376 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4377 {
4378         struct hci_dev *hdev = chan->conn->hdev;
4379
4380         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4381
4382         hci_queue_acl(chan, &chan->data_q, skb, flags);
4383
4384         queue_work(hdev->workqueue, &hdev->tx_work);
4385 }
4386
4387 /* Send SCO data */
4388 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4389 {
4390         struct hci_dev *hdev = conn->hdev;
4391         struct hci_sco_hdr hdr;
4392
4393         BT_DBG("%s len %d", hdev->name, skb->len);
4394
4395         hdr.handle = cpu_to_le16(conn->handle);
4396         hdr.dlen   = skb->len;
4397
4398         skb_push(skb, HCI_SCO_HDR_SIZE);
4399         skb_reset_transport_header(skb);
4400         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4401
4402         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4403
4404         skb_queue_tail(&conn->data_q, skb);
4405         queue_work(hdev->workqueue, &hdev->tx_work);
4406 }
4407
4408 /* ---- HCI TX task (outgoing data) ---- */
4409
4410 /* HCI Connection scheduler */
4411 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4412                                      int *quote)
4413 {
4414         struct hci_conn_hash *h = &hdev->conn_hash;
4415         struct hci_conn *conn = NULL, *c;
4416         unsigned int num = 0, min = ~0;
4417
4418         /* We don't have to lock device here. Connections are always
4419          * added and removed with TX task disabled. */
4420
4421         rcu_read_lock();
4422
4423         list_for_each_entry_rcu(c, &h->list, list) {
4424                 if (c->type != type || skb_queue_empty(&c->data_q))
4425                         continue;
4426
4427                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4428                         continue;
4429
4430                 num++;
4431
4432                 if (c->sent < min) {
4433                         min  = c->sent;
4434                         conn = c;
4435                 }
4436
4437                 if (hci_conn_num(hdev, type) == num)
4438                         break;
4439         }
4440
4441         rcu_read_unlock();
4442
4443         if (conn) {
4444                 int cnt, q;
4445
4446                 switch (conn->type) {
4447                 case ACL_LINK:
4448                         cnt = hdev->acl_cnt;
4449                         break;
4450                 case SCO_LINK:
4451                 case ESCO_LINK:
4452                         cnt = hdev->sco_cnt;
4453                         break;
4454                 case LE_LINK:
4455                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4456                         break;
4457                 default:
4458                         cnt = 0;
4459                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4460                 }
4461
4462                 q = cnt / num;
4463                 *quote = q ? q : 1;
4464         } else
4465                 *quote = 0;
4466
4467         BT_DBG("conn %p quote %d", conn, *quote);
4468         return conn;
4469 }
4470
4471 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4472 {
4473         struct hci_conn_hash *h = &hdev->conn_hash;
4474         struct hci_conn *c;
4475
4476         bt_dev_err(hdev, "link tx timeout");
4477
4478         rcu_read_lock();
4479
4480         /* Kill stalled connections */
4481         list_for_each_entry_rcu(c, &h->list, list) {
4482                 if (c->type == type && c->sent) {
4483                         bt_dev_err(hdev, "killing stalled connection %pMR",
4484                                    &c->dst);
4485                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4486                 }
4487         }
4488
4489         rcu_read_unlock();
4490 }
4491
4492 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4493                                       int *quote)
4494 {
4495         struct hci_conn_hash *h = &hdev->conn_hash;
4496         struct hci_chan *chan = NULL;
4497         unsigned int num = 0, min = ~0, cur_prio = 0;
4498         struct hci_conn *conn;
4499         int cnt, q, conn_num = 0;
4500
4501         BT_DBG("%s", hdev->name);
4502
4503         rcu_read_lock();
4504
4505         list_for_each_entry_rcu(conn, &h->list, list) {
4506                 struct hci_chan *tmp;
4507
4508                 if (conn->type != type)
4509                         continue;
4510
4511                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4512                         continue;
4513
4514                 conn_num++;
4515
4516                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4517                         struct sk_buff *skb;
4518
4519                         if (skb_queue_empty(&tmp->data_q))
4520                                 continue;
4521
4522                         skb = skb_peek(&tmp->data_q);
4523                         if (skb->priority < cur_prio)
4524                                 continue;
4525
4526                         if (skb->priority > cur_prio) {
4527                                 num = 0;
4528                                 min = ~0;
4529                                 cur_prio = skb->priority;
4530                         }
4531
4532                         num++;
4533
4534                         if (conn->sent < min) {
4535                                 min  = conn->sent;
4536                                 chan = tmp;
4537                         }
4538                 }
4539
4540                 if (hci_conn_num(hdev, type) == conn_num)
4541                         break;
4542         }
4543
4544         rcu_read_unlock();
4545
4546         if (!chan)
4547                 return NULL;
4548
4549         switch (chan->conn->type) {
4550         case ACL_LINK:
4551                 cnt = hdev->acl_cnt;
4552                 break;
4553         case AMP_LINK:
4554                 cnt = hdev->block_cnt;
4555                 break;
4556         case SCO_LINK:
4557         case ESCO_LINK:
4558                 cnt = hdev->sco_cnt;
4559                 break;
4560         case LE_LINK:
4561                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4562                 break;
4563         default:
4564                 cnt = 0;
4565                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4566         }
4567
4568         q = cnt / num;
4569         *quote = q ? q : 1;
4570         BT_DBG("chan %p quote %d", chan, *quote);
4571         return chan;
4572 }
4573
4574 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4575 {
4576         struct hci_conn_hash *h = &hdev->conn_hash;
4577         struct hci_conn *conn;
4578         int num = 0;
4579
4580         BT_DBG("%s", hdev->name);
4581
4582         rcu_read_lock();
4583
4584         list_for_each_entry_rcu(conn, &h->list, list) {
4585                 struct hci_chan *chan;
4586
4587                 if (conn->type != type)
4588                         continue;
4589
4590                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4591                         continue;
4592
4593                 num++;
4594
4595                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4596                         struct sk_buff *skb;
4597
4598                         if (chan->sent) {
4599                                 chan->sent = 0;
4600                                 continue;
4601                         }
4602
4603                         if (skb_queue_empty(&chan->data_q))
4604                                 continue;
4605
4606                         skb = skb_peek(&chan->data_q);
4607                         if (skb->priority >= HCI_PRIO_MAX - 1)
4608                                 continue;
4609
4610                         skb->priority = HCI_PRIO_MAX - 1;
4611
4612                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4613                                skb->priority);
4614                 }
4615
4616                 if (hci_conn_num(hdev, type) == num)
4617                         break;
4618         }
4619
4620         rcu_read_unlock();
4621
4622 }
4623
4624 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4625 {
4626         /* Calculate count of blocks used by this packet */
4627         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4628 }
4629
4630 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4631 {
4632         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4633                 /* ACL tx timeout must be longer than maximum
4634                  * link supervision timeout (40.9 seconds) */
4635                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4636                                        HCI_ACL_TX_TIMEOUT))
4637                         hci_link_tx_to(hdev, ACL_LINK);
4638         }
4639 }
4640
4641 /* Schedule SCO */
4642 static void hci_sched_sco(struct hci_dev *hdev)
4643 {
4644         struct hci_conn *conn;
4645         struct sk_buff *skb;
4646         int quote;
4647
4648         BT_DBG("%s", hdev->name);
4649
4650         if (!hci_conn_num(hdev, SCO_LINK))
4651                 return;
4652
4653         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4654                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4655                         BT_DBG("skb %p len %d", skb, skb->len);
4656                         hci_send_frame(hdev, skb);
4657
4658                         conn->sent++;
4659                         if (conn->sent == ~0)
4660                                 conn->sent = 0;
4661                 }
4662         }
4663 }
4664
4665 static void hci_sched_esco(struct hci_dev *hdev)
4666 {
4667         struct hci_conn *conn;
4668         struct sk_buff *skb;
4669         int quote;
4670
4671         BT_DBG("%s", hdev->name);
4672
4673         if (!hci_conn_num(hdev, ESCO_LINK))
4674                 return;
4675
4676         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4677                                                      &quote))) {
4678                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4679                         BT_DBG("skb %p len %d", skb, skb->len);
4680                         hci_send_frame(hdev, skb);
4681
4682                         conn->sent++;
4683                         if (conn->sent == ~0)
4684                                 conn->sent = 0;
4685                 }
4686         }
4687 }
4688
4689 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4690 {
4691         unsigned int cnt = hdev->acl_cnt;
4692         struct hci_chan *chan;
4693         struct sk_buff *skb;
4694         int quote;
4695
4696         __check_timeout(hdev, cnt);
4697
4698         while (hdev->acl_cnt &&
4699                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4700                 u32 priority = (skb_peek(&chan->data_q))->priority;
4701                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4702                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4703                                skb->len, skb->priority);
4704
4705                         /* Stop if priority has changed */
4706                         if (skb->priority < priority)
4707                                 break;
4708
4709                         skb = skb_dequeue(&chan->data_q);
4710
4711                         hci_conn_enter_active_mode(chan->conn,
4712                                                    bt_cb(skb)->force_active);
4713
4714                         hci_send_frame(hdev, skb);
4715                         hdev->acl_last_tx = jiffies;
4716
4717                         hdev->acl_cnt--;
4718                         chan->sent++;
4719                         chan->conn->sent++;
4720
4721                         /* Send pending SCO packets right away */
4722                         hci_sched_sco(hdev);
4723                         hci_sched_esco(hdev);
4724                 }
4725         }
4726
4727         if (cnt != hdev->acl_cnt)
4728                 hci_prio_recalculate(hdev, ACL_LINK);
4729 }
4730
4731 static void hci_sched_acl_blk(struct hci_dev *hdev)
4732 {
4733         unsigned int cnt = hdev->block_cnt;
4734         struct hci_chan *chan;
4735         struct sk_buff *skb;
4736         int quote;
4737         u8 type;
4738
4739         __check_timeout(hdev, cnt);
4740
4741         BT_DBG("%s", hdev->name);
4742
4743         if (hdev->dev_type == HCI_AMP)
4744                 type = AMP_LINK;
4745         else
4746                 type = ACL_LINK;
4747
4748         while (hdev->block_cnt > 0 &&
4749                (chan = hci_chan_sent(hdev, type, &quote))) {
4750                 u32 priority = (skb_peek(&chan->data_q))->priority;
4751                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4752                         int blocks;
4753
4754                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4755                                skb->len, skb->priority);
4756
4757                         /* Stop if priority has changed */
4758                         if (skb->priority < priority)
4759                                 break;
4760
4761                         skb = skb_dequeue(&chan->data_q);
4762
4763                         blocks = __get_blocks(hdev, skb);
4764                         if (blocks > hdev->block_cnt)
4765                                 return;
4766
4767                         hci_conn_enter_active_mode(chan->conn,
4768                                                    bt_cb(skb)->force_active);
4769
4770                         hci_send_frame(hdev, skb);
4771                         hdev->acl_last_tx = jiffies;
4772
4773                         hdev->block_cnt -= blocks;
4774                         quote -= blocks;
4775
4776                         chan->sent += blocks;
4777                         chan->conn->sent += blocks;
4778                 }
4779         }
4780
4781         if (cnt != hdev->block_cnt)
4782                 hci_prio_recalculate(hdev, type);
4783 }
4784
4785 static void hci_sched_acl(struct hci_dev *hdev)
4786 {
4787         BT_DBG("%s", hdev->name);
4788
4789         /* No ACL link over BR/EDR controller */
4790         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4791                 return;
4792
4793         /* No AMP link over AMP controller */
4794         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4795                 return;
4796
4797         switch (hdev->flow_ctl_mode) {
4798         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4799                 hci_sched_acl_pkt(hdev);
4800                 break;
4801
4802         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4803                 hci_sched_acl_blk(hdev);
4804                 break;
4805         }
4806 }
4807
4808 static void hci_sched_le(struct hci_dev *hdev)
4809 {
4810         struct hci_chan *chan;
4811         struct sk_buff *skb;
4812         int quote, cnt, tmp;
4813
4814         BT_DBG("%s", hdev->name);
4815
4816         if (!hci_conn_num(hdev, LE_LINK))
4817                 return;
4818
4819         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4820
4821         __check_timeout(hdev, cnt);
4822
4823         tmp = cnt;
4824         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4825                 u32 priority = (skb_peek(&chan->data_q))->priority;
4826                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4827                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4828                                skb->len, skb->priority);
4829
4830                         /* Stop if priority has changed */
4831                         if (skb->priority < priority)
4832                                 break;
4833
4834                         skb = skb_dequeue(&chan->data_q);
4835
4836                         hci_send_frame(hdev, skb);
4837                         hdev->le_last_tx = jiffies;
4838
4839                         cnt--;
4840                         chan->sent++;
4841                         chan->conn->sent++;
4842
4843                         /* Send pending SCO packets right away */
4844                         hci_sched_sco(hdev);
4845                         hci_sched_esco(hdev);
4846                 }
4847         }
4848
4849         if (hdev->le_pkts)
4850                 hdev->le_cnt = cnt;
4851         else
4852                 hdev->acl_cnt = cnt;
4853
4854         if (cnt != tmp)
4855                 hci_prio_recalculate(hdev, LE_LINK);
4856 }
4857
4858 static void hci_tx_work(struct work_struct *work)
4859 {
4860         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4861         struct sk_buff *skb;
4862
4863         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4864                hdev->sco_cnt, hdev->le_cnt);
4865
4866         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4867                 /* Schedule queues and send stuff to HCI driver */
4868                 hci_sched_sco(hdev);
4869                 hci_sched_esco(hdev);
4870                 hci_sched_acl(hdev);
4871                 hci_sched_le(hdev);
4872         }
4873
4874         /* Send next queued raw (unknown type) packet */
4875         while ((skb = skb_dequeue(&hdev->raw_q)))
4876                 hci_send_frame(hdev, skb);
4877 }
4878
4879 /* ----- HCI RX task (incoming data processing) ----- */
4880
4881 /* ACL data packet */
4882 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4883 {
4884         struct hci_acl_hdr *hdr = (void *) skb->data;
4885         struct hci_conn *conn;
4886         __u16 handle, flags;
4887
4888         skb_pull(skb, HCI_ACL_HDR_SIZE);
4889
4890         handle = __le16_to_cpu(hdr->handle);
4891         flags  = hci_flags(handle);
4892         handle = hci_handle(handle);
4893
4894         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4895                handle, flags);
4896
4897         hdev->stat.acl_rx++;
4898
4899         hci_dev_lock(hdev);
4900         conn = hci_conn_hash_lookup_handle(hdev, handle);
4901         hci_dev_unlock(hdev);
4902
4903         if (conn) {
4904                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4905
4906                 /* Send to upper protocol */
4907                 l2cap_recv_acldata(conn, skb, flags);
4908                 return;
4909         } else {
4910                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4911                            handle);
4912         }
4913
4914         kfree_skb(skb);
4915 }
4916
4917 /* SCO data packet */
4918 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4919 {
4920         struct hci_sco_hdr *hdr = (void *) skb->data;
4921         struct hci_conn *conn;
4922         __u16 handle, flags;
4923
4924         skb_pull(skb, HCI_SCO_HDR_SIZE);
4925
4926         handle = __le16_to_cpu(hdr->handle);
4927         flags  = hci_flags(handle);
4928         handle = hci_handle(handle);
4929
4930         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4931                handle, flags);
4932
4933         hdev->stat.sco_rx++;
4934
4935         hci_dev_lock(hdev);
4936         conn = hci_conn_hash_lookup_handle(hdev, handle);
4937         hci_dev_unlock(hdev);
4938
4939         if (conn) {
4940                 /* Send to upper protocol */
4941                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4942                 sco_recv_scodata(conn, skb);
4943                 return;
4944         } else {
4945                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4946                            handle);
4947         }
4948
4949         kfree_skb(skb);
4950 }
4951
4952 static bool hci_req_is_complete(struct hci_dev *hdev)
4953 {
4954         struct sk_buff *skb;
4955
4956         skb = skb_peek(&hdev->cmd_q);
4957         if (!skb)
4958                 return true;
4959
4960         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4961 }
4962
4963 static void hci_resend_last(struct hci_dev *hdev)
4964 {
4965         struct hci_command_hdr *sent;
4966         struct sk_buff *skb;
4967         u16 opcode;
4968
4969         if (!hdev->sent_cmd)
4970                 return;
4971
4972         sent = (void *) hdev->sent_cmd->data;
4973         opcode = __le16_to_cpu(sent->opcode);
4974         if (opcode == HCI_OP_RESET)
4975                 return;
4976
4977         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4978         if (!skb)
4979                 return;
4980
4981         skb_queue_head(&hdev->cmd_q, skb);
4982         queue_work(hdev->workqueue, &hdev->cmd_work);
4983 }
4984
4985 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4986                           hci_req_complete_t *req_complete,
4987                           hci_req_complete_skb_t *req_complete_skb)
4988 {
4989         struct sk_buff *skb;
4990         unsigned long flags;
4991
4992         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4993
4994         /* If the completed command doesn't match the last one that was
4995          * sent we need to do special handling of it.
4996          */
4997         if (!hci_sent_cmd_data(hdev, opcode)) {
4998                 /* Some CSR based controllers generate a spontaneous
4999                  * reset complete event during init and any pending
5000                  * command will never be completed. In such a case we
5001                  * need to resend whatever was the last sent
5002                  * command.
5003                  */
5004                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5005                         hci_resend_last(hdev);
5006
5007                 return;
5008         }
5009
5010         /* If we reach this point this event matches the last command sent */
5011         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5012
5013         /* If the command succeeded and there's still more commands in
5014          * this request the request is not yet complete.
5015          */
5016         if (!status && !hci_req_is_complete(hdev))
5017                 return;
5018
5019         /* If this was the last command in a request the complete
5020          * callback would be found in hdev->sent_cmd instead of the
5021          * command queue (hdev->cmd_q).
5022          */
5023         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5024                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5025                 return;
5026         }
5027
5028         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5029                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5030                 return;
5031         }
5032
5033         /* Remove all pending commands belonging to this request */
5034         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5035         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5036                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5037                         __skb_queue_head(&hdev->cmd_q, skb);
5038                         break;
5039                 }
5040
5041                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5042                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5043                 else
5044                         *req_complete = bt_cb(skb)->hci.req_complete;
5045                 kfree_skb(skb);
5046         }
5047         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5048 }
5049
5050 static void hci_rx_work(struct work_struct *work)
5051 {
5052         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5053         struct sk_buff *skb;
5054
5055         BT_DBG("%s", hdev->name);
5056
5057         while ((skb = skb_dequeue(&hdev->rx_q))) {
5058                 /* Send copy to monitor */
5059                 hci_send_to_monitor(hdev, skb);
5060
5061                 if (atomic_read(&hdev->promisc)) {
5062                         /* Send copy to the sockets */
5063                         hci_send_to_sock(hdev, skb);
5064                 }
5065
5066                 /* If the device has been opened in HCI_USER_CHANNEL,
5067                  * the userspace has exclusive access to device.
5068                  * When device is HCI_INIT, we still need to process
5069                  * the data packets to the driver in order
5070                  * to complete its setup().
5071                  */
5072                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5073                     !test_bit(HCI_INIT, &hdev->flags)) {
5074                         kfree_skb(skb);
5075                         continue;
5076                 }
5077
5078                 if (test_bit(HCI_INIT, &hdev->flags)) {
5079                         /* Don't process data packets in this states. */
5080                         switch (hci_skb_pkt_type(skb)) {
5081                         case HCI_ACLDATA_PKT:
5082                         case HCI_SCODATA_PKT:
5083                         case HCI_ISODATA_PKT:
5084                                 kfree_skb(skb);
5085                                 continue;
5086                         }
5087                 }
5088
5089                 /* Process frame */
5090                 switch (hci_skb_pkt_type(skb)) {
5091                 case HCI_EVENT_PKT:
5092                         BT_DBG("%s Event packet", hdev->name);
5093                         hci_event_packet(hdev, skb);
5094                         break;
5095
5096                 case HCI_ACLDATA_PKT:
5097                         BT_DBG("%s ACL data packet", hdev->name);
5098                         hci_acldata_packet(hdev, skb);
5099                         break;
5100
5101                 case HCI_SCODATA_PKT:
5102                         BT_DBG("%s SCO data packet", hdev->name);
5103                         hci_scodata_packet(hdev, skb);
5104                         break;
5105
5106                 default:
5107                         kfree_skb(skb);
5108                         break;
5109                 }
5110         }
5111 }
5112
5113 static void hci_cmd_work(struct work_struct *work)
5114 {
5115         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5116         struct sk_buff *skb;
5117
5118         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5119                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5120
5121         /* Send queued commands */
5122         if (atomic_read(&hdev->cmd_cnt)) {
5123                 skb = skb_dequeue(&hdev->cmd_q);
5124                 if (!skb)
5125                         return;
5126
5127                 kfree_skb(hdev->sent_cmd);
5128
5129                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5130                 if (hdev->sent_cmd) {
5131                         if (hci_req_status_pend(hdev))
5132                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5133                         atomic_dec(&hdev->cmd_cnt);
5134                         hci_send_frame(hdev, skb);
5135                         if (test_bit(HCI_RESET, &hdev->flags))
5136                                 cancel_delayed_work(&hdev->cmd_timer);
5137                         else
5138                                 schedule_delayed_work(&hdev->cmd_timer,
5139                                                       HCI_CMD_TIMEOUT);
5140                 } else {
5141                         skb_queue_head(&hdev->cmd_q, skb);
5142                         queue_work(hdev->workqueue, &hdev->cmd_work);
5143                 }
5144         }
5145 }