Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[linux-2.6-microblaze.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
36
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
41
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
44 #include "smp.h"
45 #include "leds.h"
46 #include "msft.h"
47
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
51
52 /* HCI device list */
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
55
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_MUTEX(hci_cb_list_lock);
59
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         bool enable;
83         int err;
84
85         if (!test_bit(HCI_UP, &hdev->flags))
86                 return -ENETDOWN;
87
88         err = kstrtobool_from_user(user_buf, count, &enable);
89         if (err)
90                 return err;
91
92         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
93                 return -EALREADY;
94
95         hci_req_sync_lock(hdev);
96         if (enable)
97                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         else
100                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101                                      HCI_CMD_TIMEOUT);
102         hci_req_sync_unlock(hdev);
103
104         if (IS_ERR(skb))
105                 return PTR_ERR(skb);
106
107         kfree_skb(skb);
108
109         hci_dev_change_flag(hdev, HCI_DUT_MODE);
110
111         return count;
112 }
113
114 static const struct file_operations dut_mode_fops = {
115         .open           = simple_open,
116         .read           = dut_mode_read,
117         .write          = dut_mode_write,
118         .llseek         = default_llseek,
119 };
120
121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122                                 size_t count, loff_t *ppos)
123 {
124         struct hci_dev *hdev = file->private_data;
125         char buf[3];
126
127         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
128         buf[1] = '\n';
129         buf[2] = '\0';
130         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131 }
132
133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134                                  size_t count, loff_t *ppos)
135 {
136         struct hci_dev *hdev = file->private_data;
137         bool enable;
138         int err;
139
140         err = kstrtobool_from_user(user_buf, count, &enable);
141         if (err)
142                 return err;
143
144         /* When the diagnostic flags are not persistent and the transport
145          * is not active or in user channel operation, then there is no need
146          * for the vendor callback. Instead just store the desired value and
147          * the setting will be programmed when the controller gets powered on.
148          */
149         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
150             (!test_bit(HCI_RUNNING, &hdev->flags) ||
151              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
152                 goto done;
153
154         hci_req_sync_lock(hdev);
155         err = hdev->set_diag(hdev, enable);
156         hci_req_sync_unlock(hdev);
157
158         if (err < 0)
159                 return err;
160
161 done:
162         if (enable)
163                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
164         else
165                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
166
167         return count;
168 }
169
170 static const struct file_operations vendor_diag_fops = {
171         .open           = simple_open,
172         .read           = vendor_diag_read,
173         .write          = vendor_diag_write,
174         .llseek         = default_llseek,
175 };
176
177 static void hci_debugfs_create_basic(struct hci_dev *hdev)
178 {
179         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
180                             &dut_mode_fops);
181
182         if (hdev->set_diag)
183                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
184                                     &vendor_diag_fops);
185 }
186
187 static int hci_reset_req(struct hci_request *req, unsigned long opt)
188 {
189         BT_DBG("%s %ld", req->hdev->name, opt);
190
191         /* Reset device */
192         set_bit(HCI_RESET, &req->hdev->flags);
193         hci_req_add(req, HCI_OP_RESET, 0, NULL);
194         return 0;
195 }
196
197 static void bredr_init(struct hci_request *req)
198 {
199         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201         /* Read Local Supported Features */
202         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
203
204         /* Read Local Version */
205         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
206
207         /* Read BD Address */
208         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
209 }
210
211 static void amp_init1(struct hci_request *req)
212 {
213         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
214
215         /* Read Local Version */
216         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218         /* Read Local Supported Commands */
219         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
220
221         /* Read Local AMP Info */
222         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
223
224         /* Read Data Blk size */
225         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
226
227         /* Read Flow Control Mode */
228         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
229
230         /* Read Location Data */
231         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
232 }
233
234 static int amp_init2(struct hci_request *req)
235 {
236         /* Read Local Supported Features. Not all AMP controllers
237          * support this so it's placed conditionally in the second
238          * stage init.
239          */
240         if (req->hdev->commands[14] & 0x20)
241                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
242
243         return 0;
244 }
245
246 static int hci_init1_req(struct hci_request *req, unsigned long opt)
247 {
248         struct hci_dev *hdev = req->hdev;
249
250         BT_DBG("%s %ld", hdev->name, opt);
251
252         /* Reset */
253         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
254                 hci_reset_req(req, 0);
255
256         switch (hdev->dev_type) {
257         case HCI_PRIMARY:
258                 bredr_init(req);
259                 break;
260         case HCI_AMP:
261                 amp_init1(req);
262                 break;
263         default:
264                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
265                 break;
266         }
267
268         return 0;
269 }
270
271 static void bredr_setup(struct hci_request *req)
272 {
273         __le16 param;
274         __u8 flt_type;
275
276         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
277         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
278
279         /* Read Class of Device */
280         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
281
282         /* Read Local Name */
283         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
284
285         /* Read Voice Setting */
286         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
287
288         /* Read Number of Supported IAC */
289         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
290
291         /* Read Current IAC LAP */
292         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
293
294         /* Clear Event Filters */
295         flt_type = HCI_FLT_CLEAR_ALL;
296         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
297
298         /* Connection accept timeout ~20 secs */
299         param = cpu_to_le16(0x7d00);
300         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
301 }
302
303 static void le_setup(struct hci_request *req)
304 {
305         struct hci_dev *hdev = req->hdev;
306
307         /* Read LE Buffer Size */
308         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
309
310         /* Read LE Local Supported Features */
311         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
312
313         /* Read LE Supported States */
314         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
315
316         /* LE-only controllers have LE implicitly enabled */
317         if (!lmp_bredr_capable(hdev))
318                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
319 }
320
321 static void hci_setup_event_mask(struct hci_request *req)
322 {
323         struct hci_dev *hdev = req->hdev;
324
325         /* The second byte is 0xff instead of 0x9f (two reserved bits
326          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
327          * command otherwise.
328          */
329         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
330
331         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
332          * any event mask for pre 1.2 devices.
333          */
334         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
335                 return;
336
337         if (lmp_bredr_capable(hdev)) {
338                 events[4] |= 0x01; /* Flow Specification Complete */
339         } else {
340                 /* Use a different default for LE-only devices */
341                 memset(events, 0, sizeof(events));
342                 events[1] |= 0x20; /* Command Complete */
343                 events[1] |= 0x40; /* Command Status */
344                 events[1] |= 0x80; /* Hardware Error */
345
346                 /* If the controller supports the Disconnect command, enable
347                  * the corresponding event. In addition enable packet flow
348                  * control related events.
349                  */
350                 if (hdev->commands[0] & 0x20) {
351                         events[0] |= 0x10; /* Disconnection Complete */
352                         events[2] |= 0x04; /* Number of Completed Packets */
353                         events[3] |= 0x02; /* Data Buffer Overflow */
354                 }
355
356                 /* If the controller supports the Read Remote Version
357                  * Information command, enable the corresponding event.
358                  */
359                 if (hdev->commands[2] & 0x80)
360                         events[1] |= 0x08; /* Read Remote Version Information
361                                             * Complete
362                                             */
363
364                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
365                         events[0] |= 0x80; /* Encryption Change */
366                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
367                 }
368         }
369
370         if (lmp_inq_rssi_capable(hdev) ||
371             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
372                 events[4] |= 0x02; /* Inquiry Result with RSSI */
373
374         if (lmp_ext_feat_capable(hdev))
375                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
376
377         if (lmp_esco_capable(hdev)) {
378                 events[5] |= 0x08; /* Synchronous Connection Complete */
379                 events[5] |= 0x10; /* Synchronous Connection Changed */
380         }
381
382         if (lmp_sniffsubr_capable(hdev))
383                 events[5] |= 0x20; /* Sniff Subrating */
384
385         if (lmp_pause_enc_capable(hdev))
386                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
387
388         if (lmp_ext_inq_capable(hdev))
389                 events[5] |= 0x40; /* Extended Inquiry Result */
390
391         if (lmp_no_flush_capable(hdev))
392                 events[7] |= 0x01; /* Enhanced Flush Complete */
393
394         if (lmp_lsto_capable(hdev))
395                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
396
397         if (lmp_ssp_capable(hdev)) {
398                 events[6] |= 0x01;      /* IO Capability Request */
399                 events[6] |= 0x02;      /* IO Capability Response */
400                 events[6] |= 0x04;      /* User Confirmation Request */
401                 events[6] |= 0x08;      /* User Passkey Request */
402                 events[6] |= 0x10;      /* Remote OOB Data Request */
403                 events[6] |= 0x20;      /* Simple Pairing Complete */
404                 events[7] |= 0x04;      /* User Passkey Notification */
405                 events[7] |= 0x08;      /* Keypress Notification */
406                 events[7] |= 0x10;      /* Remote Host Supported
407                                          * Features Notification
408                                          */
409         }
410
411         if (lmp_le_capable(hdev))
412                 events[7] |= 0x20;      /* LE Meta-Event */
413
414         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
415 }
416
417 static int hci_init2_req(struct hci_request *req, unsigned long opt)
418 {
419         struct hci_dev *hdev = req->hdev;
420
421         if (hdev->dev_type == HCI_AMP)
422                 return amp_init2(req);
423
424         if (lmp_bredr_capable(hdev))
425                 bredr_setup(req);
426         else
427                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
428
429         if (lmp_le_capable(hdev))
430                 le_setup(req);
431
432         /* All Bluetooth 1.2 and later controllers should support the
433          * HCI command for reading the local supported commands.
434          *
435          * Unfortunately some controllers indicate Bluetooth 1.2 support,
436          * but do not have support for this command. If that is the case,
437          * the driver can quirk the behavior and skip reading the local
438          * supported commands.
439          */
440         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
441             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
442                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
443
444         if (lmp_ssp_capable(hdev)) {
445                 /* When SSP is available, then the host features page
446                  * should also be available as well. However some
447                  * controllers list the max_page as 0 as long as SSP
448                  * has not been enabled. To achieve proper debugging
449                  * output, force the minimum max_page to 1 at least.
450                  */
451                 hdev->max_page = 0x01;
452
453                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
454                         u8 mode = 0x01;
455
456                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
457                                     sizeof(mode), &mode);
458                 } else {
459                         struct hci_cp_write_eir cp;
460
461                         memset(hdev->eir, 0, sizeof(hdev->eir));
462                         memset(&cp, 0, sizeof(cp));
463
464                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
465                 }
466         }
467
468         if (lmp_inq_rssi_capable(hdev) ||
469             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
470                 u8 mode;
471
472                 /* If Extended Inquiry Result events are supported, then
473                  * they are clearly preferred over Inquiry Result with RSSI
474                  * events.
475                  */
476                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
477
478                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
479         }
480
481         if (lmp_inq_tx_pwr_capable(hdev))
482                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
483
484         if (lmp_ext_feat_capable(hdev)) {
485                 struct hci_cp_read_local_ext_features cp;
486
487                 cp.page = 0x01;
488                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
489                             sizeof(cp), &cp);
490         }
491
492         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
493                 u8 enable = 1;
494                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
495                             &enable);
496         }
497
498         return 0;
499 }
500
501 static void hci_setup_link_policy(struct hci_request *req)
502 {
503         struct hci_dev *hdev = req->hdev;
504         struct hci_cp_write_def_link_policy cp;
505         u16 link_policy = 0;
506
507         if (lmp_rswitch_capable(hdev))
508                 link_policy |= HCI_LP_RSWITCH;
509         if (lmp_hold_capable(hdev))
510                 link_policy |= HCI_LP_HOLD;
511         if (lmp_sniff_capable(hdev))
512                 link_policy |= HCI_LP_SNIFF;
513         if (lmp_park_capable(hdev))
514                 link_policy |= HCI_LP_PARK;
515
516         cp.policy = cpu_to_le16(link_policy);
517         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
518 }
519
520 static void hci_set_le_support(struct hci_request *req)
521 {
522         struct hci_dev *hdev = req->hdev;
523         struct hci_cp_write_le_host_supported cp;
524
525         /* LE-only devices do not support explicit enablement */
526         if (!lmp_bredr_capable(hdev))
527                 return;
528
529         memset(&cp, 0, sizeof(cp));
530
531         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
532                 cp.le = 0x01;
533                 cp.simul = 0x00;
534         }
535
536         if (cp.le != lmp_host_le_capable(hdev))
537                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538                             &cp);
539 }
540
541 static void hci_set_event_mask_page_2(struct hci_request *req)
542 {
543         struct hci_dev *hdev = req->hdev;
544         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545         bool changed = false;
546
547         /* If Connectionless Slave Broadcast master role is supported
548          * enable all necessary events for it.
549          */
550         if (lmp_csb_master_capable(hdev)) {
551                 events[1] |= 0x40;      /* Triggered Clock Capture */
552                 events[1] |= 0x80;      /* Synchronization Train Complete */
553                 events[2] |= 0x10;      /* Slave Page Response Timeout */
554                 events[2] |= 0x20;      /* CSB Channel Map Change */
555                 changed = true;
556         }
557
558         /* If Connectionless Slave Broadcast slave role is supported
559          * enable all necessary events for it.
560          */
561         if (lmp_csb_slave_capable(hdev)) {
562                 events[2] |= 0x01;      /* Synchronization Train Received */
563                 events[2] |= 0x02;      /* CSB Receive */
564                 events[2] |= 0x04;      /* CSB Timeout */
565                 events[2] |= 0x08;      /* Truncated Page Complete */
566                 changed = true;
567         }
568
569         /* Enable Authenticated Payload Timeout Expired event if supported */
570         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
571                 events[2] |= 0x80;
572                 changed = true;
573         }
574
575         /* Some Broadcom based controllers indicate support for Set Event
576          * Mask Page 2 command, but then actually do not support it. Since
577          * the default value is all bits set to zero, the command is only
578          * required if the event mask has to be changed. In case no change
579          * to the event mask is needed, skip this command.
580          */
581         if (changed)
582                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
583                             sizeof(events), events);
584 }
585
586 static int hci_init3_req(struct hci_request *req, unsigned long opt)
587 {
588         struct hci_dev *hdev = req->hdev;
589         u8 p;
590
591         hci_setup_event_mask(req);
592
593         if (hdev->commands[6] & 0x20 &&
594             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
595                 struct hci_cp_read_stored_link_key cp;
596
597                 bacpy(&cp.bdaddr, BDADDR_ANY);
598                 cp.read_all = 0x01;
599                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
600         }
601
602         if (hdev->commands[5] & 0x10)
603                 hci_setup_link_policy(req);
604
605         if (hdev->commands[8] & 0x01)
606                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
607
608         if (hdev->commands[18] & 0x04 &&
609             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
610                 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
611
612         /* Some older Broadcom based Bluetooth 1.2 controllers do not
613          * support the Read Page Scan Type command. Check support for
614          * this command in the bit mask of supported commands.
615          */
616         if (hdev->commands[13] & 0x01)
617                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
618
619         if (lmp_le_capable(hdev)) {
620                 u8 events[8];
621
622                 memset(events, 0, sizeof(events));
623
624                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
625                         events[0] |= 0x10;      /* LE Long Term Key Request */
626
627                 /* If controller supports the Connection Parameters Request
628                  * Link Layer Procedure, enable the corresponding event.
629                  */
630                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
631                         events[0] |= 0x20;      /* LE Remote Connection
632                                                  * Parameter Request
633                                                  */
634
635                 /* If the controller supports the Data Length Extension
636                  * feature, enable the corresponding event.
637                  */
638                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
639                         events[0] |= 0x40;      /* LE Data Length Change */
640
641                 /* If the controller supports LL Privacy feature, enable
642                  * the corresponding event.
643                  */
644                 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
645                         events[1] |= 0x02;      /* LE Enhanced Connection
646                                                  * Complete
647                                                  */
648
649                 /* If the controller supports Extended Scanner Filter
650                  * Policies, enable the correspondig event.
651                  */
652                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
653                         events[1] |= 0x04;      /* LE Direct Advertising
654                                                  * Report
655                                                  */
656
657                 /* If the controller supports Channel Selection Algorithm #2
658                  * feature, enable the corresponding event.
659                  */
660                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
661                         events[2] |= 0x08;      /* LE Channel Selection
662                                                  * Algorithm
663                                                  */
664
665                 /* If the controller supports the LE Set Scan Enable command,
666                  * enable the corresponding advertising report event.
667                  */
668                 if (hdev->commands[26] & 0x08)
669                         events[0] |= 0x02;      /* LE Advertising Report */
670
671                 /* If the controller supports the LE Create Connection
672                  * command, enable the corresponding event.
673                  */
674                 if (hdev->commands[26] & 0x10)
675                         events[0] |= 0x01;      /* LE Connection Complete */
676
677                 /* If the controller supports the LE Connection Update
678                  * command, enable the corresponding event.
679                  */
680                 if (hdev->commands[27] & 0x04)
681                         events[0] |= 0x04;      /* LE Connection Update
682                                                  * Complete
683                                                  */
684
685                 /* If the controller supports the LE Read Remote Used Features
686                  * command, enable the corresponding event.
687                  */
688                 if (hdev->commands[27] & 0x20)
689                         events[0] |= 0x08;      /* LE Read Remote Used
690                                                  * Features Complete
691                                                  */
692
693                 /* If the controller supports the LE Read Local P-256
694                  * Public Key command, enable the corresponding event.
695                  */
696                 if (hdev->commands[34] & 0x02)
697                         events[0] |= 0x80;      /* LE Read Local P-256
698                                                  * Public Key Complete
699                                                  */
700
701                 /* If the controller supports the LE Generate DHKey
702                  * command, enable the corresponding event.
703                  */
704                 if (hdev->commands[34] & 0x04)
705                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
706
707                 /* If the controller supports the LE Set Default PHY or
708                  * LE Set PHY commands, enable the corresponding event.
709                  */
710                 if (hdev->commands[35] & (0x20 | 0x40))
711                         events[1] |= 0x08;        /* LE PHY Update Complete */
712
713                 /* If the controller supports LE Set Extended Scan Parameters
714                  * and LE Set Extended Scan Enable commands, enable the
715                  * corresponding event.
716                  */
717                 if (use_ext_scan(hdev))
718                         events[1] |= 0x10;      /* LE Extended Advertising
719                                                  * Report
720                                                  */
721
722                 /* If the controller supports the LE Extended Advertising
723                  * command, enable the corresponding event.
724                  */
725                 if (ext_adv_capable(hdev))
726                         events[2] |= 0x02;      /* LE Advertising Set
727                                                  * Terminated
728                                                  */
729
730                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
731                             events);
732
733                 /* Read LE Advertising Channel TX Power */
734                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
735                         /* HCI TS spec forbids mixing of legacy and extended
736                          * advertising commands wherein READ_ADV_TX_POWER is
737                          * also included. So do not call it if extended adv
738                          * is supported otherwise controller will return
739                          * COMMAND_DISALLOWED for extended commands.
740                          */
741                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
742                 }
743
744                 if (hdev->commands[26] & 0x40) {
745                         /* Read LE White List Size */
746                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
747                                     0, NULL);
748                 }
749
750                 if (hdev->commands[26] & 0x80) {
751                         /* Clear LE White List */
752                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
753                 }
754
755                 if (hdev->commands[34] & 0x40) {
756                         /* Read LE Resolving List Size */
757                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
758                                     0, NULL);
759                 }
760
761                 if (hdev->commands[34] & 0x20) {
762                         /* Clear LE Resolving List */
763                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
764                 }
765
766                 if (hdev->commands[35] & 0x40) {
767                         __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
768
769                         /* Set RPA timeout */
770                         hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
771                                     &rpa_timeout);
772                 }
773
774                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
775                         /* Read LE Maximum Data Length */
776                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
777
778                         /* Read LE Suggested Default Data Length */
779                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
780                 }
781
782                 if (ext_adv_capable(hdev)) {
783                         /* Read LE Number of Supported Advertising Sets */
784                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
785                                     0, NULL);
786                 }
787
788                 hci_set_le_support(req);
789         }
790
791         /* Read features beyond page 1 if available */
792         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
793                 struct hci_cp_read_local_ext_features cp;
794
795                 cp.page = p;
796                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
797                             sizeof(cp), &cp);
798         }
799
800         return 0;
801 }
802
803 static int hci_init4_req(struct hci_request *req, unsigned long opt)
804 {
805         struct hci_dev *hdev = req->hdev;
806
807         /* Some Broadcom based Bluetooth controllers do not support the
808          * Delete Stored Link Key command. They are clearly indicating its
809          * absence in the bit mask of supported commands.
810          *
811          * Check the supported commands and only if the command is marked
812          * as supported send it. If not supported assume that the controller
813          * does not have actual support for stored link keys which makes this
814          * command redundant anyway.
815          *
816          * Some controllers indicate that they support handling deleting
817          * stored link keys, but they don't. The quirk lets a driver
818          * just disable this command.
819          */
820         if (hdev->commands[6] & 0x80 &&
821             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
822                 struct hci_cp_delete_stored_link_key cp;
823
824                 bacpy(&cp.bdaddr, BDADDR_ANY);
825                 cp.delete_all = 0x01;
826                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
827                             sizeof(cp), &cp);
828         }
829
830         /* Set event mask page 2 if the HCI command for it is supported */
831         if (hdev->commands[22] & 0x04)
832                 hci_set_event_mask_page_2(req);
833
834         /* Read local codec list if the HCI command is supported */
835         if (hdev->commands[29] & 0x20)
836                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
837
838         /* Read local pairing options if the HCI command is supported */
839         if (hdev->commands[41] & 0x08)
840                 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
841
842         /* Get MWS transport configuration if the HCI command is supported */
843         if (hdev->commands[30] & 0x08)
844                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
845
846         /* Check for Synchronization Train support */
847         if (lmp_sync_train_capable(hdev))
848                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
849
850         /* Enable Secure Connections if supported and configured */
851         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
852             bredr_sc_enabled(hdev)) {
853                 u8 support = 0x01;
854
855                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
856                             sizeof(support), &support);
857         }
858
859         /* Set erroneous data reporting if supported to the wideband speech
860          * setting value
861          */
862         if (hdev->commands[18] & 0x08 &&
863             !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
864                 bool enabled = hci_dev_test_flag(hdev,
865                                                  HCI_WIDEBAND_SPEECH_ENABLED);
866
867                 if (enabled !=
868                     (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
869                         struct hci_cp_write_def_err_data_reporting cp;
870
871                         cp.err_data_reporting = enabled ?
872                                                 ERR_DATA_REPORTING_ENABLED :
873                                                 ERR_DATA_REPORTING_DISABLED;
874
875                         hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
876                                     sizeof(cp), &cp);
877                 }
878         }
879
880         /* Set Suggested Default Data Length to maximum if supported */
881         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
882                 struct hci_cp_le_write_def_data_len cp;
883
884                 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
885                 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
886                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
887         }
888
889         /* Set Default PHY parameters if command is supported */
890         if (hdev->commands[35] & 0x20) {
891                 struct hci_cp_le_set_default_phy cp;
892
893                 cp.all_phys = 0x00;
894                 cp.tx_phys = hdev->le_tx_def_phys;
895                 cp.rx_phys = hdev->le_rx_def_phys;
896
897                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
898         }
899
900         return 0;
901 }
902
903 static int __hci_init(struct hci_dev *hdev)
904 {
905         int err;
906
907         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
908         if (err < 0)
909                 return err;
910
911         if (hci_dev_test_flag(hdev, HCI_SETUP))
912                 hci_debugfs_create_basic(hdev);
913
914         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
915         if (err < 0)
916                 return err;
917
918         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
919          * BR/EDR/LE type controllers. AMP controllers only need the
920          * first two stages of init.
921          */
922         if (hdev->dev_type != HCI_PRIMARY)
923                 return 0;
924
925         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
926         if (err < 0)
927                 return err;
928
929         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
930         if (err < 0)
931                 return err;
932
933         /* This function is only called when the controller is actually in
934          * configured state. When the controller is marked as unconfigured,
935          * this initialization procedure is not run.
936          *
937          * It means that it is possible that a controller runs through its
938          * setup phase and then discovers missing settings. If that is the
939          * case, then this function will not be called. It then will only
940          * be called during the config phase.
941          *
942          * So only when in setup phase or config phase, create the debugfs
943          * entries and register the SMP channels.
944          */
945         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
946             !hci_dev_test_flag(hdev, HCI_CONFIG))
947                 return 0;
948
949         hci_debugfs_create_common(hdev);
950
951         if (lmp_bredr_capable(hdev))
952                 hci_debugfs_create_bredr(hdev);
953
954         if (lmp_le_capable(hdev))
955                 hci_debugfs_create_le(hdev);
956
957         return 0;
958 }
959
960 static int hci_init0_req(struct hci_request *req, unsigned long opt)
961 {
962         struct hci_dev *hdev = req->hdev;
963
964         BT_DBG("%s %ld", hdev->name, opt);
965
966         /* Reset */
967         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
968                 hci_reset_req(req, 0);
969
970         /* Read Local Version */
971         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
972
973         /* Read BD Address */
974         if (hdev->set_bdaddr)
975                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
976
977         return 0;
978 }
979
980 static int __hci_unconf_init(struct hci_dev *hdev)
981 {
982         int err;
983
984         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985                 return 0;
986
987         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
988         if (err < 0)
989                 return err;
990
991         if (hci_dev_test_flag(hdev, HCI_SETUP))
992                 hci_debugfs_create_basic(hdev);
993
994         return 0;
995 }
996
997 static int hci_scan_req(struct hci_request *req, unsigned long opt)
998 {
999         __u8 scan = opt;
1000
1001         BT_DBG("%s %x", req->hdev->name, scan);
1002
1003         /* Inquiry and Page scans */
1004         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1005         return 0;
1006 }
1007
1008 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1009 {
1010         __u8 auth = opt;
1011
1012         BT_DBG("%s %x", req->hdev->name, auth);
1013
1014         /* Authentication */
1015         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1016         return 0;
1017 }
1018
1019 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1020 {
1021         __u8 encrypt = opt;
1022
1023         BT_DBG("%s %x", req->hdev->name, encrypt);
1024
1025         /* Encryption */
1026         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1027         return 0;
1028 }
1029
1030 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1031 {
1032         __le16 policy = cpu_to_le16(opt);
1033
1034         BT_DBG("%s %x", req->hdev->name, policy);
1035
1036         /* Default link policy */
1037         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1038         return 0;
1039 }
1040
1041 /* Get HCI device by index.
1042  * Device is held on return. */
1043 struct hci_dev *hci_dev_get(int index)
1044 {
1045         struct hci_dev *hdev = NULL, *d;
1046
1047         BT_DBG("%d", index);
1048
1049         if (index < 0)
1050                 return NULL;
1051
1052         read_lock(&hci_dev_list_lock);
1053         list_for_each_entry(d, &hci_dev_list, list) {
1054                 if (d->id == index) {
1055                         hdev = hci_dev_hold(d);
1056                         break;
1057                 }
1058         }
1059         read_unlock(&hci_dev_list_lock);
1060         return hdev;
1061 }
1062
1063 /* ---- Inquiry support ---- */
1064
1065 bool hci_discovery_active(struct hci_dev *hdev)
1066 {
1067         struct discovery_state *discov = &hdev->discovery;
1068
1069         switch (discov->state) {
1070         case DISCOVERY_FINDING:
1071         case DISCOVERY_RESOLVING:
1072                 return true;
1073
1074         default:
1075                 return false;
1076         }
1077 }
1078
1079 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1080 {
1081         int old_state = hdev->discovery.state;
1082
1083         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1084
1085         if (old_state == state)
1086                 return;
1087
1088         hdev->discovery.state = state;
1089
1090         switch (state) {
1091         case DISCOVERY_STOPPED:
1092                 hci_update_background_scan(hdev);
1093
1094                 if (old_state != DISCOVERY_STARTING)
1095                         mgmt_discovering(hdev, 0);
1096                 break;
1097         case DISCOVERY_STARTING:
1098                 break;
1099         case DISCOVERY_FINDING:
1100                 mgmt_discovering(hdev, 1);
1101                 break;
1102         case DISCOVERY_RESOLVING:
1103                 break;
1104         case DISCOVERY_STOPPING:
1105                 break;
1106         }
1107 }
1108
1109 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1110 {
1111         struct discovery_state *cache = &hdev->discovery;
1112         struct inquiry_entry *p, *n;
1113
1114         list_for_each_entry_safe(p, n, &cache->all, all) {
1115                 list_del(&p->all);
1116                 kfree(p);
1117         }
1118
1119         INIT_LIST_HEAD(&cache->unknown);
1120         INIT_LIST_HEAD(&cache->resolve);
1121 }
1122
1123 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1124                                                bdaddr_t *bdaddr)
1125 {
1126         struct discovery_state *cache = &hdev->discovery;
1127         struct inquiry_entry *e;
1128
1129         BT_DBG("cache %p, %pMR", cache, bdaddr);
1130
1131         list_for_each_entry(e, &cache->all, all) {
1132                 if (!bacmp(&e->data.bdaddr, bdaddr))
1133                         return e;
1134         }
1135
1136         return NULL;
1137 }
1138
1139 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1140                                                        bdaddr_t *bdaddr)
1141 {
1142         struct discovery_state *cache = &hdev->discovery;
1143         struct inquiry_entry *e;
1144
1145         BT_DBG("cache %p, %pMR", cache, bdaddr);
1146
1147         list_for_each_entry(e, &cache->unknown, list) {
1148                 if (!bacmp(&e->data.bdaddr, bdaddr))
1149                         return e;
1150         }
1151
1152         return NULL;
1153 }
1154
1155 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1156                                                        bdaddr_t *bdaddr,
1157                                                        int state)
1158 {
1159         struct discovery_state *cache = &hdev->discovery;
1160         struct inquiry_entry *e;
1161
1162         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1163
1164         list_for_each_entry(e, &cache->resolve, list) {
1165                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1166                         return e;
1167                 if (!bacmp(&e->data.bdaddr, bdaddr))
1168                         return e;
1169         }
1170
1171         return NULL;
1172 }
1173
1174 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1175                                       struct inquiry_entry *ie)
1176 {
1177         struct discovery_state *cache = &hdev->discovery;
1178         struct list_head *pos = &cache->resolve;
1179         struct inquiry_entry *p;
1180
1181         list_del(&ie->list);
1182
1183         list_for_each_entry(p, &cache->resolve, list) {
1184                 if (p->name_state != NAME_PENDING &&
1185                     abs(p->data.rssi) >= abs(ie->data.rssi))
1186                         break;
1187                 pos = &p->list;
1188         }
1189
1190         list_add(&ie->list, pos);
1191 }
1192
1193 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1194                              bool name_known)
1195 {
1196         struct discovery_state *cache = &hdev->discovery;
1197         struct inquiry_entry *ie;
1198         u32 flags = 0;
1199
1200         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1201
1202         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1203
1204         if (!data->ssp_mode)
1205                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1206
1207         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1208         if (ie) {
1209                 if (!ie->data.ssp_mode)
1210                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1211
1212                 if (ie->name_state == NAME_NEEDED &&
1213                     data->rssi != ie->data.rssi) {
1214                         ie->data.rssi = data->rssi;
1215                         hci_inquiry_cache_update_resolve(hdev, ie);
1216                 }
1217
1218                 goto update;
1219         }
1220
1221         /* Entry not in the cache. Add new one. */
1222         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1223         if (!ie) {
1224                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225                 goto done;
1226         }
1227
1228         list_add(&ie->all, &cache->all);
1229
1230         if (name_known) {
1231                 ie->name_state = NAME_KNOWN;
1232         } else {
1233                 ie->name_state = NAME_NOT_KNOWN;
1234                 list_add(&ie->list, &cache->unknown);
1235         }
1236
1237 update:
1238         if (name_known && ie->name_state != NAME_KNOWN &&
1239             ie->name_state != NAME_PENDING) {
1240                 ie->name_state = NAME_KNOWN;
1241                 list_del(&ie->list);
1242         }
1243
1244         memcpy(&ie->data, data, sizeof(*data));
1245         ie->timestamp = jiffies;
1246         cache->timestamp = jiffies;
1247
1248         if (ie->name_state == NAME_NOT_KNOWN)
1249                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1250
1251 done:
1252         return flags;
1253 }
1254
1255 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1256 {
1257         struct discovery_state *cache = &hdev->discovery;
1258         struct inquiry_info *info = (struct inquiry_info *) buf;
1259         struct inquiry_entry *e;
1260         int copied = 0;
1261
1262         list_for_each_entry(e, &cache->all, all) {
1263                 struct inquiry_data *data = &e->data;
1264
1265                 if (copied >= num)
1266                         break;
1267
1268                 bacpy(&info->bdaddr, &data->bdaddr);
1269                 info->pscan_rep_mode    = data->pscan_rep_mode;
1270                 info->pscan_period_mode = data->pscan_period_mode;
1271                 info->pscan_mode        = data->pscan_mode;
1272                 memcpy(info->dev_class, data->dev_class, 3);
1273                 info->clock_offset      = data->clock_offset;
1274
1275                 info++;
1276                 copied++;
1277         }
1278
1279         BT_DBG("cache %p, copied %d", cache, copied);
1280         return copied;
1281 }
1282
1283 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1284 {
1285         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1286         struct hci_dev *hdev = req->hdev;
1287         struct hci_cp_inquiry cp;
1288
1289         BT_DBG("%s", hdev->name);
1290
1291         if (test_bit(HCI_INQUIRY, &hdev->flags))
1292                 return 0;
1293
1294         /* Start Inquiry */
1295         memcpy(&cp.lap, &ir->lap, 3);
1296         cp.length  = ir->length;
1297         cp.num_rsp = ir->num_rsp;
1298         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1299
1300         return 0;
1301 }
1302
1303 int hci_inquiry(void __user *arg)
1304 {
1305         __u8 __user *ptr = arg;
1306         struct hci_inquiry_req ir;
1307         struct hci_dev *hdev;
1308         int err = 0, do_inquiry = 0, max_rsp;
1309         long timeo;
1310         __u8 *buf;
1311
1312         if (copy_from_user(&ir, ptr, sizeof(ir)))
1313                 return -EFAULT;
1314
1315         hdev = hci_dev_get(ir.dev_id);
1316         if (!hdev)
1317                 return -ENODEV;
1318
1319         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1320                 err = -EBUSY;
1321                 goto done;
1322         }
1323
1324         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1325                 err = -EOPNOTSUPP;
1326                 goto done;
1327         }
1328
1329         if (hdev->dev_type != HCI_PRIMARY) {
1330                 err = -EOPNOTSUPP;
1331                 goto done;
1332         }
1333
1334         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1335                 err = -EOPNOTSUPP;
1336                 goto done;
1337         }
1338
1339         hci_dev_lock(hdev);
1340         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1341             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1342                 hci_inquiry_cache_flush(hdev);
1343                 do_inquiry = 1;
1344         }
1345         hci_dev_unlock(hdev);
1346
1347         timeo = ir.length * msecs_to_jiffies(2000);
1348
1349         if (do_inquiry) {
1350                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1351                                    timeo, NULL);
1352                 if (err < 0)
1353                         goto done;
1354
1355                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1356                  * cleared). If it is interrupted by a signal, return -EINTR.
1357                  */
1358                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1359                                 TASK_INTERRUPTIBLE))
1360                         return -EINTR;
1361         }
1362
1363         /* for unlimited number of responses we will use buffer with
1364          * 255 entries
1365          */
1366         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1367
1368         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1369          * copy it to the user space.
1370          */
1371         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1372         if (!buf) {
1373                 err = -ENOMEM;
1374                 goto done;
1375         }
1376
1377         hci_dev_lock(hdev);
1378         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1379         hci_dev_unlock(hdev);
1380
1381         BT_DBG("num_rsp %d", ir.num_rsp);
1382
1383         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1384                 ptr += sizeof(ir);
1385                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1386                                  ir.num_rsp))
1387                         err = -EFAULT;
1388         } else
1389                 err = -EFAULT;
1390
1391         kfree(buf);
1392
1393 done:
1394         hci_dev_put(hdev);
1395         return err;
1396 }
1397
1398 /**
1399  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1400  *                                     (BD_ADDR) for a HCI device from
1401  *                                     a firmware node property.
1402  * @hdev:       The HCI device
1403  *
1404  * Search the firmware node for 'local-bd-address'.
1405  *
1406  * All-zero BD addresses are rejected, because those could be properties
1407  * that exist in the firmware tables, but were not updated by the firmware. For
1408  * example, the DTS could define 'local-bd-address', with zero BD addresses.
1409  */
1410 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1411 {
1412         struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1413         bdaddr_t ba;
1414         int ret;
1415
1416         ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1417                                             (u8 *)&ba, sizeof(ba));
1418         if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1419                 return;
1420
1421         bacpy(&hdev->public_addr, &ba);
1422 }
1423
1424 static int hci_dev_do_open(struct hci_dev *hdev)
1425 {
1426         int ret = 0;
1427
1428         BT_DBG("%s %p", hdev->name, hdev);
1429
1430         hci_req_sync_lock(hdev);
1431
1432         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1433                 ret = -ENODEV;
1434                 goto done;
1435         }
1436
1437         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1438             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1439                 /* Check for rfkill but allow the HCI setup stage to
1440                  * proceed (which in itself doesn't cause any RF activity).
1441                  */
1442                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1443                         ret = -ERFKILL;
1444                         goto done;
1445                 }
1446
1447                 /* Check for valid public address or a configured static
1448                  * random adddress, but let the HCI setup proceed to
1449                  * be able to determine if there is a public address
1450                  * or not.
1451                  *
1452                  * In case of user channel usage, it is not important
1453                  * if a public address or static random address is
1454                  * available.
1455                  *
1456                  * This check is only valid for BR/EDR controllers
1457                  * since AMP controllers do not have an address.
1458                  */
1459                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1460                     hdev->dev_type == HCI_PRIMARY &&
1461                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1462                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1463                         ret = -EADDRNOTAVAIL;
1464                         goto done;
1465                 }
1466         }
1467
1468         if (test_bit(HCI_UP, &hdev->flags)) {
1469                 ret = -EALREADY;
1470                 goto done;
1471         }
1472
1473         if (hdev->open(hdev)) {
1474                 ret = -EIO;
1475                 goto done;
1476         }
1477
1478         set_bit(HCI_RUNNING, &hdev->flags);
1479         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1480
1481         atomic_set(&hdev->cmd_cnt, 1);
1482         set_bit(HCI_INIT, &hdev->flags);
1483
1484         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1485             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1486                 bool invalid_bdaddr;
1487
1488                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1489
1490                 if (hdev->setup)
1491                         ret = hdev->setup(hdev);
1492
1493                 /* The transport driver can set the quirk to mark the
1494                  * BD_ADDR invalid before creating the HCI device or in
1495                  * its setup callback.
1496                  */
1497                 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1498                                           &hdev->quirks);
1499
1500                 if (ret)
1501                         goto setup_failed;
1502
1503                 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1504                         if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1505                                 hci_dev_get_bd_addr_from_property(hdev);
1506
1507                         if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1508                             hdev->set_bdaddr) {
1509                                 ret = hdev->set_bdaddr(hdev,
1510                                                        &hdev->public_addr);
1511
1512                                 /* If setting of the BD_ADDR from the device
1513                                  * property succeeds, then treat the address
1514                                  * as valid even if the invalid BD_ADDR
1515                                  * quirk indicates otherwise.
1516                                  */
1517                                 if (!ret)
1518                                         invalid_bdaddr = false;
1519                         }
1520                 }
1521
1522 setup_failed:
1523                 /* The transport driver can set these quirks before
1524                  * creating the HCI device or in its setup callback.
1525                  *
1526                  * For the invalid BD_ADDR quirk it is possible that
1527                  * it becomes a valid address if the bootloader does
1528                  * provide it (see above).
1529                  *
1530                  * In case any of them is set, the controller has to
1531                  * start up as unconfigured.
1532                  */
1533                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1534                     invalid_bdaddr)
1535                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1536
1537                 /* For an unconfigured controller it is required to
1538                  * read at least the version information provided by
1539                  * the Read Local Version Information command.
1540                  *
1541                  * If the set_bdaddr driver callback is provided, then
1542                  * also the original Bluetooth public device address
1543                  * will be read using the Read BD Address command.
1544                  */
1545                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1546                         ret = __hci_unconf_init(hdev);
1547         }
1548
1549         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1550                 /* If public address change is configured, ensure that
1551                  * the address gets programmed. If the driver does not
1552                  * support changing the public address, fail the power
1553                  * on procedure.
1554                  */
1555                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1556                     hdev->set_bdaddr)
1557                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1558                 else
1559                         ret = -EADDRNOTAVAIL;
1560         }
1561
1562         if (!ret) {
1563                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1564                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1565                         ret = __hci_init(hdev);
1566                         if (!ret && hdev->post_init)
1567                                 ret = hdev->post_init(hdev);
1568                 }
1569         }
1570
1571         /* If the HCI Reset command is clearing all diagnostic settings,
1572          * then they need to be reprogrammed after the init procedure
1573          * completed.
1574          */
1575         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1576             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1577             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1578                 ret = hdev->set_diag(hdev, true);
1579
1580         msft_do_open(hdev);
1581
1582         clear_bit(HCI_INIT, &hdev->flags);
1583
1584         if (!ret) {
1585                 hci_dev_hold(hdev);
1586                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1587                 hci_adv_instances_set_rpa_expired(hdev, true);
1588                 set_bit(HCI_UP, &hdev->flags);
1589                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1590                 hci_leds_update_powered(hdev, true);
1591                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1592                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1593                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1594                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1595                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1596                     hdev->dev_type == HCI_PRIMARY) {
1597                         ret = __hci_req_hci_power_on(hdev);
1598                         mgmt_power_on(hdev, ret);
1599                 }
1600         } else {
1601                 /* Init failed, cleanup */
1602                 flush_work(&hdev->tx_work);
1603                 flush_work(&hdev->cmd_work);
1604                 flush_work(&hdev->rx_work);
1605
1606                 skb_queue_purge(&hdev->cmd_q);
1607                 skb_queue_purge(&hdev->rx_q);
1608
1609                 if (hdev->flush)
1610                         hdev->flush(hdev);
1611
1612                 if (hdev->sent_cmd) {
1613                         kfree_skb(hdev->sent_cmd);
1614                         hdev->sent_cmd = NULL;
1615                 }
1616
1617                 clear_bit(HCI_RUNNING, &hdev->flags);
1618                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1619
1620                 hdev->close(hdev);
1621                 hdev->flags &= BIT(HCI_RAW);
1622         }
1623
1624 done:
1625         hci_req_sync_unlock(hdev);
1626         return ret;
1627 }
1628
1629 /* ---- HCI ioctl helpers ---- */
1630
1631 int hci_dev_open(__u16 dev)
1632 {
1633         struct hci_dev *hdev;
1634         int err;
1635
1636         hdev = hci_dev_get(dev);
1637         if (!hdev)
1638                 return -ENODEV;
1639
1640         /* Devices that are marked as unconfigured can only be powered
1641          * up as user channel. Trying to bring them up as normal devices
1642          * will result into a failure. Only user channel operation is
1643          * possible.
1644          *
1645          * When this function is called for a user channel, the flag
1646          * HCI_USER_CHANNEL will be set first before attempting to
1647          * open the device.
1648          */
1649         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1650             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1651                 err = -EOPNOTSUPP;
1652                 goto done;
1653         }
1654
1655         /* We need to ensure that no other power on/off work is pending
1656          * before proceeding to call hci_dev_do_open. This is
1657          * particularly important if the setup procedure has not yet
1658          * completed.
1659          */
1660         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1661                 cancel_delayed_work(&hdev->power_off);
1662
1663         /* After this call it is guaranteed that the setup procedure
1664          * has finished. This means that error conditions like RFKILL
1665          * or no valid public or static random address apply.
1666          */
1667         flush_workqueue(hdev->req_workqueue);
1668
1669         /* For controllers not using the management interface and that
1670          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1671          * so that pairing works for them. Once the management interface
1672          * is in use this bit will be cleared again and userspace has
1673          * to explicitly enable it.
1674          */
1675         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1676             !hci_dev_test_flag(hdev, HCI_MGMT))
1677                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1678
1679         err = hci_dev_do_open(hdev);
1680
1681 done:
1682         hci_dev_put(hdev);
1683         return err;
1684 }
1685
1686 /* This function requires the caller holds hdev->lock */
1687 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1688 {
1689         struct hci_conn_params *p;
1690
1691         list_for_each_entry(p, &hdev->le_conn_params, list) {
1692                 if (p->conn) {
1693                         hci_conn_drop(p->conn);
1694                         hci_conn_put(p->conn);
1695                         p->conn = NULL;
1696                 }
1697                 list_del_init(&p->action);
1698         }
1699
1700         BT_DBG("All LE pending actions cleared");
1701 }
1702
1703 int hci_dev_do_close(struct hci_dev *hdev)
1704 {
1705         bool auto_off;
1706
1707         BT_DBG("%s %p", hdev->name, hdev);
1708
1709         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1710             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1711             test_bit(HCI_UP, &hdev->flags)) {
1712                 /* Execute vendor specific shutdown routine */
1713                 if (hdev->shutdown)
1714                         hdev->shutdown(hdev);
1715         }
1716
1717         cancel_delayed_work(&hdev->power_off);
1718
1719         hci_request_cancel_all(hdev);
1720         hci_req_sync_lock(hdev);
1721
1722         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1723                 cancel_delayed_work_sync(&hdev->cmd_timer);
1724                 hci_req_sync_unlock(hdev);
1725                 return 0;
1726         }
1727
1728         hci_leds_update_powered(hdev, false);
1729
1730         /* Flush RX and TX works */
1731         flush_work(&hdev->tx_work);
1732         flush_work(&hdev->rx_work);
1733
1734         if (hdev->discov_timeout > 0) {
1735                 hdev->discov_timeout = 0;
1736                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1737                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1738         }
1739
1740         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1741                 cancel_delayed_work(&hdev->service_cache);
1742
1743         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1744                 struct adv_info *adv_instance;
1745
1746                 cancel_delayed_work_sync(&hdev->rpa_expired);
1747
1748                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1749                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1750         }
1751
1752         /* Avoid potential lockdep warnings from the *_flush() calls by
1753          * ensuring the workqueue is empty up front.
1754          */
1755         drain_workqueue(hdev->workqueue);
1756
1757         hci_dev_lock(hdev);
1758
1759         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1760
1761         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1762
1763         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1764             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1765             hci_dev_test_flag(hdev, HCI_MGMT))
1766                 __mgmt_power_off(hdev);
1767
1768         hci_inquiry_cache_flush(hdev);
1769         hci_pend_le_actions_clear(hdev);
1770         hci_conn_hash_flush(hdev);
1771         hci_dev_unlock(hdev);
1772
1773         smp_unregister(hdev);
1774
1775         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1776
1777         msft_do_close(hdev);
1778
1779         if (hdev->flush)
1780                 hdev->flush(hdev);
1781
1782         /* Reset device */
1783         skb_queue_purge(&hdev->cmd_q);
1784         atomic_set(&hdev->cmd_cnt, 1);
1785         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1786             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1787                 set_bit(HCI_INIT, &hdev->flags);
1788                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1789                 clear_bit(HCI_INIT, &hdev->flags);
1790         }
1791
1792         /* flush cmd  work */
1793         flush_work(&hdev->cmd_work);
1794
1795         /* Drop queues */
1796         skb_queue_purge(&hdev->rx_q);
1797         skb_queue_purge(&hdev->cmd_q);
1798         skb_queue_purge(&hdev->raw_q);
1799
1800         /* Drop last sent command */
1801         if (hdev->sent_cmd) {
1802                 cancel_delayed_work_sync(&hdev->cmd_timer);
1803                 kfree_skb(hdev->sent_cmd);
1804                 hdev->sent_cmd = NULL;
1805         }
1806
1807         clear_bit(HCI_RUNNING, &hdev->flags);
1808         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1809
1810         if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1811                 wake_up(&hdev->suspend_wait_q);
1812
1813         /* After this point our queues are empty
1814          * and no tasks are scheduled. */
1815         hdev->close(hdev);
1816
1817         /* Clear flags */
1818         hdev->flags &= BIT(HCI_RAW);
1819         hci_dev_clear_volatile_flags(hdev);
1820
1821         /* Controller radio is available but is currently powered down */
1822         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1823
1824         memset(hdev->eir, 0, sizeof(hdev->eir));
1825         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1826         bacpy(&hdev->random_addr, BDADDR_ANY);
1827
1828         hci_req_sync_unlock(hdev);
1829
1830         hci_dev_put(hdev);
1831         return 0;
1832 }
1833
1834 int hci_dev_close(__u16 dev)
1835 {
1836         struct hci_dev *hdev;
1837         int err;
1838
1839         hdev = hci_dev_get(dev);
1840         if (!hdev)
1841                 return -ENODEV;
1842
1843         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1844                 err = -EBUSY;
1845                 goto done;
1846         }
1847
1848         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1849                 cancel_delayed_work(&hdev->power_off);
1850
1851         err = hci_dev_do_close(hdev);
1852
1853 done:
1854         hci_dev_put(hdev);
1855         return err;
1856 }
1857
1858 static int hci_dev_do_reset(struct hci_dev *hdev)
1859 {
1860         int ret;
1861
1862         BT_DBG("%s %p", hdev->name, hdev);
1863
1864         hci_req_sync_lock(hdev);
1865
1866         /* Drop queues */
1867         skb_queue_purge(&hdev->rx_q);
1868         skb_queue_purge(&hdev->cmd_q);
1869
1870         /* Avoid potential lockdep warnings from the *_flush() calls by
1871          * ensuring the workqueue is empty up front.
1872          */
1873         drain_workqueue(hdev->workqueue);
1874
1875         hci_dev_lock(hdev);
1876         hci_inquiry_cache_flush(hdev);
1877         hci_conn_hash_flush(hdev);
1878         hci_dev_unlock(hdev);
1879
1880         if (hdev->flush)
1881                 hdev->flush(hdev);
1882
1883         atomic_set(&hdev->cmd_cnt, 1);
1884         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1885
1886         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1887
1888         hci_req_sync_unlock(hdev);
1889         return ret;
1890 }
1891
1892 int hci_dev_reset(__u16 dev)
1893 {
1894         struct hci_dev *hdev;
1895         int err;
1896
1897         hdev = hci_dev_get(dev);
1898         if (!hdev)
1899                 return -ENODEV;
1900
1901         if (!test_bit(HCI_UP, &hdev->flags)) {
1902                 err = -ENETDOWN;
1903                 goto done;
1904         }
1905
1906         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1907                 err = -EBUSY;
1908                 goto done;
1909         }
1910
1911         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1912                 err = -EOPNOTSUPP;
1913                 goto done;
1914         }
1915
1916         err = hci_dev_do_reset(hdev);
1917
1918 done:
1919         hci_dev_put(hdev);
1920         return err;
1921 }
1922
1923 int hci_dev_reset_stat(__u16 dev)
1924 {
1925         struct hci_dev *hdev;
1926         int ret = 0;
1927
1928         hdev = hci_dev_get(dev);
1929         if (!hdev)
1930                 return -ENODEV;
1931
1932         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1933                 ret = -EBUSY;
1934                 goto done;
1935         }
1936
1937         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1938                 ret = -EOPNOTSUPP;
1939                 goto done;
1940         }
1941
1942         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1943
1944 done:
1945         hci_dev_put(hdev);
1946         return ret;
1947 }
1948
1949 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1950 {
1951         bool conn_changed, discov_changed;
1952
1953         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1954
1955         if ((scan & SCAN_PAGE))
1956                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1957                                                           HCI_CONNECTABLE);
1958         else
1959                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1960                                                            HCI_CONNECTABLE);
1961
1962         if ((scan & SCAN_INQUIRY)) {
1963                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1964                                                             HCI_DISCOVERABLE);
1965         } else {
1966                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1967                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1968                                                              HCI_DISCOVERABLE);
1969         }
1970
1971         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1972                 return;
1973
1974         if (conn_changed || discov_changed) {
1975                 /* In case this was disabled through mgmt */
1976                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1977
1978                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1979                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1980
1981                 mgmt_new_settings(hdev);
1982         }
1983 }
1984
1985 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1986 {
1987         struct hci_dev *hdev;
1988         struct hci_dev_req dr;
1989         int err = 0;
1990
1991         if (copy_from_user(&dr, arg, sizeof(dr)))
1992                 return -EFAULT;
1993
1994         hdev = hci_dev_get(dr.dev_id);
1995         if (!hdev)
1996                 return -ENODEV;
1997
1998         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1999                 err = -EBUSY;
2000                 goto done;
2001         }
2002
2003         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2004                 err = -EOPNOTSUPP;
2005                 goto done;
2006         }
2007
2008         if (hdev->dev_type != HCI_PRIMARY) {
2009                 err = -EOPNOTSUPP;
2010                 goto done;
2011         }
2012
2013         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2014                 err = -EOPNOTSUPP;
2015                 goto done;
2016         }
2017
2018         switch (cmd) {
2019         case HCISETAUTH:
2020                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2021                                    HCI_INIT_TIMEOUT, NULL);
2022                 break;
2023
2024         case HCISETENCRYPT:
2025                 if (!lmp_encrypt_capable(hdev)) {
2026                         err = -EOPNOTSUPP;
2027                         break;
2028                 }
2029
2030                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2031                         /* Auth must be enabled first */
2032                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2033                                            HCI_INIT_TIMEOUT, NULL);
2034                         if (err)
2035                                 break;
2036                 }
2037
2038                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2039                                    HCI_INIT_TIMEOUT, NULL);
2040                 break;
2041
2042         case HCISETSCAN:
2043                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2044                                    HCI_INIT_TIMEOUT, NULL);
2045
2046                 /* Ensure that the connectable and discoverable states
2047                  * get correctly modified as this was a non-mgmt change.
2048                  */
2049                 if (!err)
2050                         hci_update_scan_state(hdev, dr.dev_opt);
2051                 break;
2052
2053         case HCISETLINKPOL:
2054                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2055                                    HCI_INIT_TIMEOUT, NULL);
2056                 break;
2057
2058         case HCISETLINKMODE:
2059                 hdev->link_mode = ((__u16) dr.dev_opt) &
2060                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2061                 break;
2062
2063         case HCISETPTYPE:
2064                 if (hdev->pkt_type == (__u16) dr.dev_opt)
2065                         break;
2066
2067                 hdev->pkt_type = (__u16) dr.dev_opt;
2068                 mgmt_phy_configuration_changed(hdev, NULL);
2069                 break;
2070
2071         case HCISETACLMTU:
2072                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2073                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2074                 break;
2075
2076         case HCISETSCOMTU:
2077                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2078                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2079                 break;
2080
2081         default:
2082                 err = -EINVAL;
2083                 break;
2084         }
2085
2086 done:
2087         hci_dev_put(hdev);
2088         return err;
2089 }
2090
2091 int hci_get_dev_list(void __user *arg)
2092 {
2093         struct hci_dev *hdev;
2094         struct hci_dev_list_req *dl;
2095         struct hci_dev_req *dr;
2096         int n = 0, size, err;
2097         __u16 dev_num;
2098
2099         if (get_user(dev_num, (__u16 __user *) arg))
2100                 return -EFAULT;
2101
2102         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2103                 return -EINVAL;
2104
2105         size = sizeof(*dl) + dev_num * sizeof(*dr);
2106
2107         dl = kzalloc(size, GFP_KERNEL);
2108         if (!dl)
2109                 return -ENOMEM;
2110
2111         dr = dl->dev_req;
2112
2113         read_lock(&hci_dev_list_lock);
2114         list_for_each_entry(hdev, &hci_dev_list, list) {
2115                 unsigned long flags = hdev->flags;
2116
2117                 /* When the auto-off is configured it means the transport
2118                  * is running, but in that case still indicate that the
2119                  * device is actually down.
2120                  */
2121                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2122                         flags &= ~BIT(HCI_UP);
2123
2124                 (dr + n)->dev_id  = hdev->id;
2125                 (dr + n)->dev_opt = flags;
2126
2127                 if (++n >= dev_num)
2128                         break;
2129         }
2130         read_unlock(&hci_dev_list_lock);
2131
2132         dl->dev_num = n;
2133         size = sizeof(*dl) + n * sizeof(*dr);
2134
2135         err = copy_to_user(arg, dl, size);
2136         kfree(dl);
2137
2138         return err ? -EFAULT : 0;
2139 }
2140
2141 int hci_get_dev_info(void __user *arg)
2142 {
2143         struct hci_dev *hdev;
2144         struct hci_dev_info di;
2145         unsigned long flags;
2146         int err = 0;
2147
2148         if (copy_from_user(&di, arg, sizeof(di)))
2149                 return -EFAULT;
2150
2151         hdev = hci_dev_get(di.dev_id);
2152         if (!hdev)
2153                 return -ENODEV;
2154
2155         /* When the auto-off is configured it means the transport
2156          * is running, but in that case still indicate that the
2157          * device is actually down.
2158          */
2159         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2160                 flags = hdev->flags & ~BIT(HCI_UP);
2161         else
2162                 flags = hdev->flags;
2163
2164         strcpy(di.name, hdev->name);
2165         di.bdaddr   = hdev->bdaddr;
2166         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2167         di.flags    = flags;
2168         di.pkt_type = hdev->pkt_type;
2169         if (lmp_bredr_capable(hdev)) {
2170                 di.acl_mtu  = hdev->acl_mtu;
2171                 di.acl_pkts = hdev->acl_pkts;
2172                 di.sco_mtu  = hdev->sco_mtu;
2173                 di.sco_pkts = hdev->sco_pkts;
2174         } else {
2175                 di.acl_mtu  = hdev->le_mtu;
2176                 di.acl_pkts = hdev->le_pkts;
2177                 di.sco_mtu  = 0;
2178                 di.sco_pkts = 0;
2179         }
2180         di.link_policy = hdev->link_policy;
2181         di.link_mode   = hdev->link_mode;
2182
2183         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2184         memcpy(&di.features, &hdev->features, sizeof(di.features));
2185
2186         if (copy_to_user(arg, &di, sizeof(di)))
2187                 err = -EFAULT;
2188
2189         hci_dev_put(hdev);
2190
2191         return err;
2192 }
2193
2194 /* ---- Interface to HCI drivers ---- */
2195
2196 static int hci_rfkill_set_block(void *data, bool blocked)
2197 {
2198         struct hci_dev *hdev = data;
2199
2200         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2201
2202         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2203                 return -EBUSY;
2204
2205         if (blocked) {
2206                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2207                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2208                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2209                         hci_dev_do_close(hdev);
2210         } else {
2211                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2212         }
2213
2214         return 0;
2215 }
2216
2217 static const struct rfkill_ops hci_rfkill_ops = {
2218         .set_block = hci_rfkill_set_block,
2219 };
2220
2221 static void hci_power_on(struct work_struct *work)
2222 {
2223         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2224         int err;
2225
2226         BT_DBG("%s", hdev->name);
2227
2228         if (test_bit(HCI_UP, &hdev->flags) &&
2229             hci_dev_test_flag(hdev, HCI_MGMT) &&
2230             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2231                 cancel_delayed_work(&hdev->power_off);
2232                 hci_req_sync_lock(hdev);
2233                 err = __hci_req_hci_power_on(hdev);
2234                 hci_req_sync_unlock(hdev);
2235                 mgmt_power_on(hdev, err);
2236                 return;
2237         }
2238
2239         err = hci_dev_do_open(hdev);
2240         if (err < 0) {
2241                 hci_dev_lock(hdev);
2242                 mgmt_set_powered_failed(hdev, err);
2243                 hci_dev_unlock(hdev);
2244                 return;
2245         }
2246
2247         /* During the HCI setup phase, a few error conditions are
2248          * ignored and they need to be checked now. If they are still
2249          * valid, it is important to turn the device back off.
2250          */
2251         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2252             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2253             (hdev->dev_type == HCI_PRIMARY &&
2254              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2255              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2256                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2257                 hci_dev_do_close(hdev);
2258         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2259                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2260                                    HCI_AUTO_OFF_TIMEOUT);
2261         }
2262
2263         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2264                 /* For unconfigured devices, set the HCI_RAW flag
2265                  * so that userspace can easily identify them.
2266                  */
2267                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2268                         set_bit(HCI_RAW, &hdev->flags);
2269
2270                 /* For fully configured devices, this will send
2271                  * the Index Added event. For unconfigured devices,
2272                  * it will send Unconfigued Index Added event.
2273                  *
2274                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2275                  * and no event will be send.
2276                  */
2277                 mgmt_index_added(hdev);
2278         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2279                 /* When the controller is now configured, then it
2280                  * is important to clear the HCI_RAW flag.
2281                  */
2282                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2283                         clear_bit(HCI_RAW, &hdev->flags);
2284
2285                 /* Powering on the controller with HCI_CONFIG set only
2286                  * happens with the transition from unconfigured to
2287                  * configured. This will send the Index Added event.
2288                  */
2289                 mgmt_index_added(hdev);
2290         }
2291 }
2292
2293 static void hci_power_off(struct work_struct *work)
2294 {
2295         struct hci_dev *hdev = container_of(work, struct hci_dev,
2296                                             power_off.work);
2297
2298         BT_DBG("%s", hdev->name);
2299
2300         hci_dev_do_close(hdev);
2301 }
2302
2303 static void hci_error_reset(struct work_struct *work)
2304 {
2305         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2306
2307         BT_DBG("%s", hdev->name);
2308
2309         if (hdev->hw_error)
2310                 hdev->hw_error(hdev, hdev->hw_error_code);
2311         else
2312                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2313
2314         if (hci_dev_do_close(hdev))
2315                 return;
2316
2317         hci_dev_do_open(hdev);
2318 }
2319
2320 void hci_uuids_clear(struct hci_dev *hdev)
2321 {
2322         struct bt_uuid *uuid, *tmp;
2323
2324         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2325                 list_del(&uuid->list);
2326                 kfree(uuid);
2327         }
2328 }
2329
2330 void hci_link_keys_clear(struct hci_dev *hdev)
2331 {
2332         struct link_key *key;
2333
2334         list_for_each_entry(key, &hdev->link_keys, list) {
2335                 list_del_rcu(&key->list);
2336                 kfree_rcu(key, rcu);
2337         }
2338 }
2339
2340 void hci_smp_ltks_clear(struct hci_dev *hdev)
2341 {
2342         struct smp_ltk *k;
2343
2344         list_for_each_entry(k, &hdev->long_term_keys, list) {
2345                 list_del_rcu(&k->list);
2346                 kfree_rcu(k, rcu);
2347         }
2348 }
2349
2350 void hci_smp_irks_clear(struct hci_dev *hdev)
2351 {
2352         struct smp_irk *k;
2353
2354         list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2355                 list_del_rcu(&k->list);
2356                 kfree_rcu(k, rcu);
2357         }
2358 }
2359
2360 void hci_blocked_keys_clear(struct hci_dev *hdev)
2361 {
2362         struct blocked_key *b;
2363
2364         list_for_each_entry(b, &hdev->blocked_keys, list) {
2365                 list_del_rcu(&b->list);
2366                 kfree_rcu(b, rcu);
2367         }
2368 }
2369
2370 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2371 {
2372         bool blocked = false;
2373         struct blocked_key *b;
2374
2375         rcu_read_lock();
2376         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2377                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2378                         blocked = true;
2379                         break;
2380                 }
2381         }
2382
2383         rcu_read_unlock();
2384         return blocked;
2385 }
2386
2387 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2388 {
2389         struct link_key *k;
2390
2391         rcu_read_lock();
2392         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2393                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2394                         rcu_read_unlock();
2395
2396                         if (hci_is_blocked_key(hdev,
2397                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
2398                                                k->val)) {
2399                                 bt_dev_warn_ratelimited(hdev,
2400                                                         "Link key blocked for %pMR",
2401                                                         &k->bdaddr);
2402                                 return NULL;
2403                         }
2404
2405                         return k;
2406                 }
2407         }
2408         rcu_read_unlock();
2409
2410         return NULL;
2411 }
2412
2413 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2414                                u8 key_type, u8 old_key_type)
2415 {
2416         /* Legacy key */
2417         if (key_type < 0x03)
2418                 return true;
2419
2420         /* Debug keys are insecure so don't store them persistently */
2421         if (key_type == HCI_LK_DEBUG_COMBINATION)
2422                 return false;
2423
2424         /* Changed combination key and there's no previous one */
2425         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2426                 return false;
2427
2428         /* Security mode 3 case */
2429         if (!conn)
2430                 return true;
2431
2432         /* BR/EDR key derived using SC from an LE link */
2433         if (conn->type == LE_LINK)
2434                 return true;
2435
2436         /* Neither local nor remote side had no-bonding as requirement */
2437         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2438                 return true;
2439
2440         /* Local side had dedicated bonding as requirement */
2441         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2442                 return true;
2443
2444         /* Remote side had dedicated bonding as requirement */
2445         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2446                 return true;
2447
2448         /* If none of the above criteria match, then don't store the key
2449          * persistently */
2450         return false;
2451 }
2452
2453 static u8 ltk_role(u8 type)
2454 {
2455         if (type == SMP_LTK)
2456                 return HCI_ROLE_MASTER;
2457
2458         return HCI_ROLE_SLAVE;
2459 }
2460
2461 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2462                              u8 addr_type, u8 role)
2463 {
2464         struct smp_ltk *k;
2465
2466         rcu_read_lock();
2467         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2468                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2469                         continue;
2470
2471                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2472                         rcu_read_unlock();
2473
2474                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2475                                                k->val)) {
2476                                 bt_dev_warn_ratelimited(hdev,
2477                                                         "LTK blocked for %pMR",
2478                                                         &k->bdaddr);
2479                                 return NULL;
2480                         }
2481
2482                         return k;
2483                 }
2484         }
2485         rcu_read_unlock();
2486
2487         return NULL;
2488 }
2489
2490 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2491 {
2492         struct smp_irk *irk_to_return = NULL;
2493         struct smp_irk *irk;
2494
2495         rcu_read_lock();
2496         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2497                 if (!bacmp(&irk->rpa, rpa)) {
2498                         irk_to_return = irk;
2499                         goto done;
2500                 }
2501         }
2502
2503         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2504                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2505                         bacpy(&irk->rpa, rpa);
2506                         irk_to_return = irk;
2507                         goto done;
2508                 }
2509         }
2510
2511 done:
2512         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2513                                                 irk_to_return->val)) {
2514                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2515                                         &irk_to_return->bdaddr);
2516                 irk_to_return = NULL;
2517         }
2518
2519         rcu_read_unlock();
2520
2521         return irk_to_return;
2522 }
2523
2524 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2525                                      u8 addr_type)
2526 {
2527         struct smp_irk *irk_to_return = NULL;
2528         struct smp_irk *irk;
2529
2530         /* Identity Address must be public or static random */
2531         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2532                 return NULL;
2533
2534         rcu_read_lock();
2535         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2536                 if (addr_type == irk->addr_type &&
2537                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2538                         irk_to_return = irk;
2539                         goto done;
2540                 }
2541         }
2542
2543 done:
2544
2545         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2546                                                 irk_to_return->val)) {
2547                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2548                                         &irk_to_return->bdaddr);
2549                 irk_to_return = NULL;
2550         }
2551
2552         rcu_read_unlock();
2553
2554         return irk_to_return;
2555 }
2556
2557 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2558                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2559                                   u8 pin_len, bool *persistent)
2560 {
2561         struct link_key *key, *old_key;
2562         u8 old_key_type;
2563
2564         old_key = hci_find_link_key(hdev, bdaddr);
2565         if (old_key) {
2566                 old_key_type = old_key->type;
2567                 key = old_key;
2568         } else {
2569                 old_key_type = conn ? conn->key_type : 0xff;
2570                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2571                 if (!key)
2572                         return NULL;
2573                 list_add_rcu(&key->list, &hdev->link_keys);
2574         }
2575
2576         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2577
2578         /* Some buggy controller combinations generate a changed
2579          * combination key for legacy pairing even when there's no
2580          * previous key */
2581         if (type == HCI_LK_CHANGED_COMBINATION &&
2582             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2583                 type = HCI_LK_COMBINATION;
2584                 if (conn)
2585                         conn->key_type = type;
2586         }
2587
2588         bacpy(&key->bdaddr, bdaddr);
2589         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2590         key->pin_len = pin_len;
2591
2592         if (type == HCI_LK_CHANGED_COMBINATION)
2593                 key->type = old_key_type;
2594         else
2595                 key->type = type;
2596
2597         if (persistent)
2598                 *persistent = hci_persistent_key(hdev, conn, type,
2599                                                  old_key_type);
2600
2601         return key;
2602 }
2603
2604 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2605                             u8 addr_type, u8 type, u8 authenticated,
2606                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2607 {
2608         struct smp_ltk *key, *old_key;
2609         u8 role = ltk_role(type);
2610
2611         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2612         if (old_key)
2613                 key = old_key;
2614         else {
2615                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2616                 if (!key)
2617                         return NULL;
2618                 list_add_rcu(&key->list, &hdev->long_term_keys);
2619         }
2620
2621         bacpy(&key->bdaddr, bdaddr);
2622         key->bdaddr_type = addr_type;
2623         memcpy(key->val, tk, sizeof(key->val));
2624         key->authenticated = authenticated;
2625         key->ediv = ediv;
2626         key->rand = rand;
2627         key->enc_size = enc_size;
2628         key->type = type;
2629
2630         return key;
2631 }
2632
2633 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2635 {
2636         struct smp_irk *irk;
2637
2638         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2639         if (!irk) {
2640                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2641                 if (!irk)
2642                         return NULL;
2643
2644                 bacpy(&irk->bdaddr, bdaddr);
2645                 irk->addr_type = addr_type;
2646
2647                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2648         }
2649
2650         memcpy(irk->val, val, 16);
2651         bacpy(&irk->rpa, rpa);
2652
2653         return irk;
2654 }
2655
2656 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2657 {
2658         struct link_key *key;
2659
2660         key = hci_find_link_key(hdev, bdaddr);
2661         if (!key)
2662                 return -ENOENT;
2663
2664         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2665
2666         list_del_rcu(&key->list);
2667         kfree_rcu(key, rcu);
2668
2669         return 0;
2670 }
2671
2672 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2673 {
2674         struct smp_ltk *k;
2675         int removed = 0;
2676
2677         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2678                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2679                         continue;
2680
2681                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2682
2683                 list_del_rcu(&k->list);
2684                 kfree_rcu(k, rcu);
2685                 removed++;
2686         }
2687
2688         return removed ? 0 : -ENOENT;
2689 }
2690
2691 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2692 {
2693         struct smp_irk *k;
2694
2695         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2696                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2697                         continue;
2698
2699                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2700
2701                 list_del_rcu(&k->list);
2702                 kfree_rcu(k, rcu);
2703         }
2704 }
2705
2706 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2707 {
2708         struct smp_ltk *k;
2709         struct smp_irk *irk;
2710         u8 addr_type;
2711
2712         if (type == BDADDR_BREDR) {
2713                 if (hci_find_link_key(hdev, bdaddr))
2714                         return true;
2715                 return false;
2716         }
2717
2718         /* Convert to HCI addr type which struct smp_ltk uses */
2719         if (type == BDADDR_LE_PUBLIC)
2720                 addr_type = ADDR_LE_DEV_PUBLIC;
2721         else
2722                 addr_type = ADDR_LE_DEV_RANDOM;
2723
2724         irk = hci_get_irk(hdev, bdaddr, addr_type);
2725         if (irk) {
2726                 bdaddr = &irk->bdaddr;
2727                 addr_type = irk->addr_type;
2728         }
2729
2730         rcu_read_lock();
2731         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2732                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2733                         rcu_read_unlock();
2734                         return true;
2735                 }
2736         }
2737         rcu_read_unlock();
2738
2739         return false;
2740 }
2741
2742 /* HCI command timer function */
2743 static void hci_cmd_timeout(struct work_struct *work)
2744 {
2745         struct hci_dev *hdev = container_of(work, struct hci_dev,
2746                                             cmd_timer.work);
2747
2748         if (hdev->sent_cmd) {
2749                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2750                 u16 opcode = __le16_to_cpu(sent->opcode);
2751
2752                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2753         } else {
2754                 bt_dev_err(hdev, "command tx timeout");
2755         }
2756
2757         if (hdev->cmd_timeout)
2758                 hdev->cmd_timeout(hdev);
2759
2760         atomic_set(&hdev->cmd_cnt, 1);
2761         queue_work(hdev->workqueue, &hdev->cmd_work);
2762 }
2763
2764 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2765                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2766 {
2767         struct oob_data *data;
2768
2769         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2770                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2771                         continue;
2772                 if (data->bdaddr_type != bdaddr_type)
2773                         continue;
2774                 return data;
2775         }
2776
2777         return NULL;
2778 }
2779
2780 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2781                                u8 bdaddr_type)
2782 {
2783         struct oob_data *data;
2784
2785         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2786         if (!data)
2787                 return -ENOENT;
2788
2789         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2790
2791         list_del(&data->list);
2792         kfree(data);
2793
2794         return 0;
2795 }
2796
2797 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2798 {
2799         struct oob_data *data, *n;
2800
2801         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2802                 list_del(&data->list);
2803                 kfree(data);
2804         }
2805 }
2806
2807 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2808                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2809                             u8 *hash256, u8 *rand256)
2810 {
2811         struct oob_data *data;
2812
2813         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2814         if (!data) {
2815                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2816                 if (!data)
2817                         return -ENOMEM;
2818
2819                 bacpy(&data->bdaddr, bdaddr);
2820                 data->bdaddr_type = bdaddr_type;
2821                 list_add(&data->list, &hdev->remote_oob_data);
2822         }
2823
2824         if (hash192 && rand192) {
2825                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2826                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2827                 if (hash256 && rand256)
2828                         data->present = 0x03;
2829         } else {
2830                 memset(data->hash192, 0, sizeof(data->hash192));
2831                 memset(data->rand192, 0, sizeof(data->rand192));
2832                 if (hash256 && rand256)
2833                         data->present = 0x02;
2834                 else
2835                         data->present = 0x00;
2836         }
2837
2838         if (hash256 && rand256) {
2839                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2840                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2841         } else {
2842                 memset(data->hash256, 0, sizeof(data->hash256));
2843                 memset(data->rand256, 0, sizeof(data->rand256));
2844                 if (hash192 && rand192)
2845                         data->present = 0x01;
2846         }
2847
2848         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2849
2850         return 0;
2851 }
2852
2853 /* This function requires the caller holds hdev->lock */
2854 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2855 {
2856         struct adv_info *adv_instance;
2857
2858         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2859                 if (adv_instance->instance == instance)
2860                         return adv_instance;
2861         }
2862
2863         return NULL;
2864 }
2865
2866 /* This function requires the caller holds hdev->lock */
2867 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2868 {
2869         struct adv_info *cur_instance;
2870
2871         cur_instance = hci_find_adv_instance(hdev, instance);
2872         if (!cur_instance)
2873                 return NULL;
2874
2875         if (cur_instance == list_last_entry(&hdev->adv_instances,
2876                                             struct adv_info, list))
2877                 return list_first_entry(&hdev->adv_instances,
2878                                                  struct adv_info, list);
2879         else
2880                 return list_next_entry(cur_instance, list);
2881 }
2882
2883 /* This function requires the caller holds hdev->lock */
2884 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2885 {
2886         struct adv_info *adv_instance;
2887
2888         adv_instance = hci_find_adv_instance(hdev, instance);
2889         if (!adv_instance)
2890                 return -ENOENT;
2891
2892         BT_DBG("%s removing %dMR", hdev->name, instance);
2893
2894         if (hdev->cur_adv_instance == instance) {
2895                 if (hdev->adv_instance_timeout) {
2896                         cancel_delayed_work(&hdev->adv_instance_expire);
2897                         hdev->adv_instance_timeout = 0;
2898                 }
2899                 hdev->cur_adv_instance = 0x00;
2900         }
2901
2902         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2903
2904         list_del(&adv_instance->list);
2905         kfree(adv_instance);
2906
2907         hdev->adv_instance_cnt--;
2908
2909         return 0;
2910 }
2911
2912 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2913 {
2914         struct adv_info *adv_instance, *n;
2915
2916         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2917                 adv_instance->rpa_expired = rpa_expired;
2918 }
2919
2920 /* This function requires the caller holds hdev->lock */
2921 void hci_adv_instances_clear(struct hci_dev *hdev)
2922 {
2923         struct adv_info *adv_instance, *n;
2924
2925         if (hdev->adv_instance_timeout) {
2926                 cancel_delayed_work(&hdev->adv_instance_expire);
2927                 hdev->adv_instance_timeout = 0;
2928         }
2929
2930         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2931                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2932                 list_del(&adv_instance->list);
2933                 kfree(adv_instance);
2934         }
2935
2936         hdev->adv_instance_cnt = 0;
2937         hdev->cur_adv_instance = 0x00;
2938 }
2939
2940 static void adv_instance_rpa_expired(struct work_struct *work)
2941 {
2942         struct adv_info *adv_instance = container_of(work, struct adv_info,
2943                                                      rpa_expired_cb.work);
2944
2945         BT_DBG("");
2946
2947         adv_instance->rpa_expired = true;
2948 }
2949
2950 /* This function requires the caller holds hdev->lock */
2951 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2952                          u16 adv_data_len, u8 *adv_data,
2953                          u16 scan_rsp_len, u8 *scan_rsp_data,
2954                          u16 timeout, u16 duration)
2955 {
2956         struct adv_info *adv_instance;
2957
2958         adv_instance = hci_find_adv_instance(hdev, instance);
2959         if (adv_instance) {
2960                 memset(adv_instance->adv_data, 0,
2961                        sizeof(adv_instance->adv_data));
2962                 memset(adv_instance->scan_rsp_data, 0,
2963                        sizeof(adv_instance->scan_rsp_data));
2964         } else {
2965                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
2966                     instance < 1 || instance > hdev->le_num_of_adv_sets)
2967                         return -EOVERFLOW;
2968
2969                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2970                 if (!adv_instance)
2971                         return -ENOMEM;
2972
2973                 adv_instance->pending = true;
2974                 adv_instance->instance = instance;
2975                 list_add(&adv_instance->list, &hdev->adv_instances);
2976                 hdev->adv_instance_cnt++;
2977         }
2978
2979         adv_instance->flags = flags;
2980         adv_instance->adv_data_len = adv_data_len;
2981         adv_instance->scan_rsp_len = scan_rsp_len;
2982
2983         if (adv_data_len)
2984                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2985
2986         if (scan_rsp_len)
2987                 memcpy(adv_instance->scan_rsp_data,
2988                        scan_rsp_data, scan_rsp_len);
2989
2990         adv_instance->timeout = timeout;
2991         adv_instance->remaining_time = timeout;
2992
2993         if (duration == 0)
2994                 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
2995         else
2996                 adv_instance->duration = duration;
2997
2998         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2999
3000         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3001                           adv_instance_rpa_expired);
3002
3003         BT_DBG("%s for %dMR", hdev->name, instance);
3004
3005         return 0;
3006 }
3007
3008 /* This function requires the caller holds hdev->lock */
3009 void hci_adv_monitors_clear(struct hci_dev *hdev)
3010 {
3011         struct adv_monitor *monitor;
3012         int handle;
3013
3014         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3015                 hci_free_adv_monitor(monitor);
3016
3017         idr_destroy(&hdev->adv_monitors_idr);
3018 }
3019
3020 void hci_free_adv_monitor(struct adv_monitor *monitor)
3021 {
3022         struct adv_pattern *pattern;
3023         struct adv_pattern *tmp;
3024
3025         if (!monitor)
3026                 return;
3027
3028         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list)
3029                 kfree(pattern);
3030
3031         kfree(monitor);
3032 }
3033
3034 /* This function requires the caller holds hdev->lock */
3035 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3036 {
3037         int min, max, handle;
3038
3039         if (!monitor)
3040                 return -EINVAL;
3041
3042         min = HCI_MIN_ADV_MONITOR_HANDLE;
3043         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3044         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3045                            GFP_KERNEL);
3046         if (handle < 0)
3047                 return handle;
3048
3049         hdev->adv_monitors_cnt++;
3050         monitor->handle = handle;
3051
3052         hci_update_background_scan(hdev);
3053
3054         return 0;
3055 }
3056
3057 static int free_adv_monitor(int id, void *ptr, void *data)
3058 {
3059         struct hci_dev *hdev = data;
3060         struct adv_monitor *monitor = ptr;
3061
3062         idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3063         hci_free_adv_monitor(monitor);
3064         hdev->adv_monitors_cnt--;
3065
3066         return 0;
3067 }
3068
3069 /* This function requires the caller holds hdev->lock */
3070 int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
3071 {
3072         struct adv_monitor *monitor;
3073
3074         if (handle) {
3075                 monitor = idr_find(&hdev->adv_monitors_idr, handle);
3076                 if (!monitor)
3077                         return -ENOENT;
3078
3079                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3080                 hci_free_adv_monitor(monitor);
3081                 hdev->adv_monitors_cnt--;
3082         } else {
3083                 /* Remove all monitors if handle is 0. */
3084                 idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
3085         }
3086
3087         hci_update_background_scan(hdev);
3088
3089         return 0;
3090 }
3091
3092 /* This function requires the caller holds hdev->lock */
3093 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3094 {
3095         return !idr_is_empty(&hdev->adv_monitors_idr);
3096 }
3097
3098 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3099                                          bdaddr_t *bdaddr, u8 type)
3100 {
3101         struct bdaddr_list *b;
3102
3103         list_for_each_entry(b, bdaddr_list, list) {
3104                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3105                         return b;
3106         }
3107
3108         return NULL;
3109 }
3110
3111 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3112                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3113                                 u8 type)
3114 {
3115         struct bdaddr_list_with_irk *b;
3116
3117         list_for_each_entry(b, bdaddr_list, list) {
3118                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3119                         return b;
3120         }
3121
3122         return NULL;
3123 }
3124
3125 struct bdaddr_list_with_flags *
3126 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3127                                   bdaddr_t *bdaddr, u8 type)
3128 {
3129         struct bdaddr_list_with_flags *b;
3130
3131         list_for_each_entry(b, bdaddr_list, list) {
3132                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3133                         return b;
3134         }
3135
3136         return NULL;
3137 }
3138
3139 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3140 {
3141         struct bdaddr_list *b, *n;
3142
3143         list_for_each_entry_safe(b, n, bdaddr_list, list) {
3144                 list_del(&b->list);
3145                 kfree(b);
3146         }
3147 }
3148
3149 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3150 {
3151         struct bdaddr_list *entry;
3152
3153         if (!bacmp(bdaddr, BDADDR_ANY))
3154                 return -EBADF;
3155
3156         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3157                 return -EEXIST;
3158
3159         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3160         if (!entry)
3161                 return -ENOMEM;
3162
3163         bacpy(&entry->bdaddr, bdaddr);
3164         entry->bdaddr_type = type;
3165
3166         list_add(&entry->list, list);
3167
3168         return 0;
3169 }
3170
3171 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3172                                         u8 type, u8 *peer_irk, u8 *local_irk)
3173 {
3174         struct bdaddr_list_with_irk *entry;
3175
3176         if (!bacmp(bdaddr, BDADDR_ANY))
3177                 return -EBADF;
3178
3179         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3180                 return -EEXIST;
3181
3182         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3183         if (!entry)
3184                 return -ENOMEM;
3185
3186         bacpy(&entry->bdaddr, bdaddr);
3187         entry->bdaddr_type = type;
3188
3189         if (peer_irk)
3190                 memcpy(entry->peer_irk, peer_irk, 16);
3191
3192         if (local_irk)
3193                 memcpy(entry->local_irk, local_irk, 16);
3194
3195         list_add(&entry->list, list);
3196
3197         return 0;
3198 }
3199
3200 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3201                                    u8 type, u32 flags)
3202 {
3203         struct bdaddr_list_with_flags *entry;
3204
3205         if (!bacmp(bdaddr, BDADDR_ANY))
3206                 return -EBADF;
3207
3208         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3209                 return -EEXIST;
3210
3211         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3212         if (!entry)
3213                 return -ENOMEM;
3214
3215         bacpy(&entry->bdaddr, bdaddr);
3216         entry->bdaddr_type = type;
3217         entry->current_flags = flags;
3218
3219         list_add(&entry->list, list);
3220
3221         return 0;
3222 }
3223
3224 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3225 {
3226         struct bdaddr_list *entry;
3227
3228         if (!bacmp(bdaddr, BDADDR_ANY)) {
3229                 hci_bdaddr_list_clear(list);
3230                 return 0;
3231         }
3232
3233         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3234         if (!entry)
3235                 return -ENOENT;
3236
3237         list_del(&entry->list);
3238         kfree(entry);
3239
3240         return 0;
3241 }
3242
3243 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3244                                                         u8 type)
3245 {
3246         struct bdaddr_list_with_irk *entry;
3247
3248         if (!bacmp(bdaddr, BDADDR_ANY)) {
3249                 hci_bdaddr_list_clear(list);
3250                 return 0;
3251         }
3252
3253         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3254         if (!entry)
3255                 return -ENOENT;
3256
3257         list_del(&entry->list);
3258         kfree(entry);
3259
3260         return 0;
3261 }
3262
3263 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3264                                    u8 type)
3265 {
3266         struct bdaddr_list_with_flags *entry;
3267
3268         if (!bacmp(bdaddr, BDADDR_ANY)) {
3269                 hci_bdaddr_list_clear(list);
3270                 return 0;
3271         }
3272
3273         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3274         if (!entry)
3275                 return -ENOENT;
3276
3277         list_del(&entry->list);
3278         kfree(entry);
3279
3280         return 0;
3281 }
3282
3283 /* This function requires the caller holds hdev->lock */
3284 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3285                                                bdaddr_t *addr, u8 addr_type)
3286 {
3287         struct hci_conn_params *params;
3288
3289         list_for_each_entry(params, &hdev->le_conn_params, list) {
3290                 if (bacmp(&params->addr, addr) == 0 &&
3291                     params->addr_type == addr_type) {
3292                         return params;
3293                 }
3294         }
3295
3296         return NULL;
3297 }
3298
3299 /* This function requires the caller holds hdev->lock */
3300 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3301                                                   bdaddr_t *addr, u8 addr_type)
3302 {
3303         struct hci_conn_params *param;
3304
3305         switch (addr_type) {
3306         case ADDR_LE_DEV_PUBLIC_RESOLVED:
3307                 addr_type = ADDR_LE_DEV_PUBLIC;
3308                 break;
3309         case ADDR_LE_DEV_RANDOM_RESOLVED:
3310                 addr_type = ADDR_LE_DEV_RANDOM;
3311                 break;
3312         }
3313
3314         list_for_each_entry(param, list, action) {
3315                 if (bacmp(&param->addr, addr) == 0 &&
3316                     param->addr_type == addr_type)
3317                         return param;
3318         }
3319
3320         return NULL;
3321 }
3322
3323 /* This function requires the caller holds hdev->lock */
3324 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3325                                             bdaddr_t *addr, u8 addr_type)
3326 {
3327         struct hci_conn_params *params;
3328
3329         params = hci_conn_params_lookup(hdev, addr, addr_type);
3330         if (params)
3331                 return params;
3332
3333         params = kzalloc(sizeof(*params), GFP_KERNEL);
3334         if (!params) {
3335                 bt_dev_err(hdev, "out of memory");
3336                 return NULL;
3337         }
3338
3339         bacpy(&params->addr, addr);
3340         params->addr_type = addr_type;
3341
3342         list_add(&params->list, &hdev->le_conn_params);
3343         INIT_LIST_HEAD(&params->action);
3344
3345         params->conn_min_interval = hdev->le_conn_min_interval;
3346         params->conn_max_interval = hdev->le_conn_max_interval;
3347         params->conn_latency = hdev->le_conn_latency;
3348         params->supervision_timeout = hdev->le_supv_timeout;
3349         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3350
3351         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3352
3353         return params;
3354 }
3355
3356 static void hci_conn_params_free(struct hci_conn_params *params)
3357 {
3358         if (params->conn) {
3359                 hci_conn_drop(params->conn);
3360                 hci_conn_put(params->conn);
3361         }
3362
3363         list_del(&params->action);
3364         list_del(&params->list);
3365         kfree(params);
3366 }
3367
3368 /* This function requires the caller holds hdev->lock */
3369 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3370 {
3371         struct hci_conn_params *params;
3372
3373         params = hci_conn_params_lookup(hdev, addr, addr_type);
3374         if (!params)
3375                 return;
3376
3377         hci_conn_params_free(params);
3378
3379         hci_update_background_scan(hdev);
3380
3381         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3382 }
3383
3384 /* This function requires the caller holds hdev->lock */
3385 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3386 {
3387         struct hci_conn_params *params, *tmp;
3388
3389         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3390                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3391                         continue;
3392
3393                 /* If trying to estabilish one time connection to disabled
3394                  * device, leave the params, but mark them as just once.
3395                  */
3396                 if (params->explicit_connect) {
3397                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3398                         continue;
3399                 }
3400
3401                 list_del(&params->list);
3402                 kfree(params);
3403         }
3404
3405         BT_DBG("All LE disabled connection parameters were removed");
3406 }
3407
3408 /* This function requires the caller holds hdev->lock */
3409 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3410 {
3411         struct hci_conn_params *params, *tmp;
3412
3413         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3414                 hci_conn_params_free(params);
3415
3416         BT_DBG("All LE connection parameters were removed");
3417 }
3418
3419 /* Copy the Identity Address of the controller.
3420  *
3421  * If the controller has a public BD_ADDR, then by default use that one.
3422  * If this is a LE only controller without a public address, default to
3423  * the static random address.
3424  *
3425  * For debugging purposes it is possible to force controllers with a
3426  * public address to use the static random address instead.
3427  *
3428  * In case BR/EDR has been disabled on a dual-mode controller and
3429  * userspace has configured a static address, then that address
3430  * becomes the identity address instead of the public BR/EDR address.
3431  */
3432 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3433                                u8 *bdaddr_type)
3434 {
3435         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3436             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3437             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3438              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3439                 bacpy(bdaddr, &hdev->static_addr);
3440                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3441         } else {
3442                 bacpy(bdaddr, &hdev->bdaddr);
3443                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3444         }
3445 }
3446
3447 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3448 {
3449         int i;
3450
3451         for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3452                 clear_bit(i, hdev->suspend_tasks);
3453
3454         wake_up(&hdev->suspend_wait_q);
3455 }
3456
3457 static int hci_suspend_wait_event(struct hci_dev *hdev)
3458 {
3459 #define WAKE_COND                                                              \
3460         (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) ==           \
3461          __SUSPEND_NUM_TASKS)
3462
3463         int i;
3464         int ret = wait_event_timeout(hdev->suspend_wait_q,
3465                                      WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3466
3467         if (ret == 0) {
3468                 bt_dev_err(hdev, "Timed out waiting for suspend events");
3469                 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3470                         if (test_bit(i, hdev->suspend_tasks))
3471                                 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3472                         clear_bit(i, hdev->suspend_tasks);
3473                 }
3474
3475                 ret = -ETIMEDOUT;
3476         } else {
3477                 ret = 0;
3478         }
3479
3480         return ret;
3481 }
3482
3483 static void hci_prepare_suspend(struct work_struct *work)
3484 {
3485         struct hci_dev *hdev =
3486                 container_of(work, struct hci_dev, suspend_prepare);
3487
3488         hci_dev_lock(hdev);
3489         hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3490         hci_dev_unlock(hdev);
3491 }
3492
3493 static int hci_change_suspend_state(struct hci_dev *hdev,
3494                                     enum suspended_state next)
3495 {
3496         hdev->suspend_state_next = next;
3497         set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3498         queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3499         return hci_suspend_wait_event(hdev);
3500 }
3501
3502 static void hci_clear_wake_reason(struct hci_dev *hdev)
3503 {
3504         hci_dev_lock(hdev);
3505
3506         hdev->wake_reason = 0;
3507         bacpy(&hdev->wake_addr, BDADDR_ANY);
3508         hdev->wake_addr_type = 0;
3509
3510         hci_dev_unlock(hdev);
3511 }
3512
3513 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3514                                 void *data)
3515 {
3516         struct hci_dev *hdev =
3517                 container_of(nb, struct hci_dev, suspend_notifier);
3518         int ret = 0;
3519         u8 state = BT_RUNNING;
3520
3521         /* If powering down, wait for completion. */
3522         if (mgmt_powering_down(hdev)) {
3523                 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3524                 ret = hci_suspend_wait_event(hdev);
3525                 if (ret)
3526                         goto done;
3527         }
3528
3529         /* Suspend notifier should only act on events when powered. */
3530         if (!hdev_is_powered(hdev))
3531                 goto done;
3532
3533         if (action == PM_SUSPEND_PREPARE) {
3534                 /* Suspend consists of two actions:
3535                  *  - First, disconnect everything and make the controller not
3536                  *    connectable (disabling scanning)
3537                  *  - Second, program event filter/whitelist and enable scan
3538                  */
3539                 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3540                 if (!ret)
3541                         state = BT_SUSPEND_DISCONNECT;
3542
3543                 /* Only configure whitelist if disconnect succeeded and wake
3544                  * isn't being prevented.
3545                  */
3546                 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3547                         ret = hci_change_suspend_state(hdev,
3548                                                 BT_SUSPEND_CONFIGURE_WAKE);
3549                         if (!ret)
3550                                 state = BT_SUSPEND_CONFIGURE_WAKE;
3551                 }
3552
3553                 hci_clear_wake_reason(hdev);
3554                 mgmt_suspending(hdev, state);
3555
3556         } else if (action == PM_POST_SUSPEND) {
3557                 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3558
3559                 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3560                               hdev->wake_addr_type);
3561         }
3562
3563 done:
3564         /* We always allow suspend even if suspend preparation failed and
3565          * attempt to recover in resume.
3566          */
3567         if (ret)
3568                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3569                            action, ret);
3570
3571         return NOTIFY_DONE;
3572 }
3573
3574 /* Alloc HCI device */
3575 struct hci_dev *hci_alloc_dev(void)
3576 {
3577         struct hci_dev *hdev;
3578
3579         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3580         if (!hdev)
3581                 return NULL;
3582
3583         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3584         hdev->esco_type = (ESCO_HV1);
3585         hdev->link_mode = (HCI_LM_ACCEPT);
3586         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3587         hdev->io_capability = 0x03;     /* No Input No Output */
3588         hdev->manufacturer = 0xffff;    /* Default to internal use */
3589         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3590         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3591         hdev->adv_instance_cnt = 0;
3592         hdev->cur_adv_instance = 0x00;
3593         hdev->adv_instance_timeout = 0;
3594
3595         hdev->sniff_max_interval = 800;
3596         hdev->sniff_min_interval = 80;
3597
3598         hdev->le_adv_channel_map = 0x07;
3599         hdev->le_adv_min_interval = 0x0800;
3600         hdev->le_adv_max_interval = 0x0800;
3601         hdev->le_scan_interval = 0x0060;
3602         hdev->le_scan_window = 0x0030;
3603         hdev->le_scan_int_suspend = 0x0400;
3604         hdev->le_scan_window_suspend = 0x0012;
3605         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3606         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3607         hdev->le_scan_int_connect = 0x0060;
3608         hdev->le_scan_window_connect = 0x0060;
3609         hdev->le_conn_min_interval = 0x0018;
3610         hdev->le_conn_max_interval = 0x0028;
3611         hdev->le_conn_latency = 0x0000;
3612         hdev->le_supv_timeout = 0x002a;
3613         hdev->le_def_tx_len = 0x001b;
3614         hdev->le_def_tx_time = 0x0148;
3615         hdev->le_max_tx_len = 0x001b;
3616         hdev->le_max_tx_time = 0x0148;
3617         hdev->le_max_rx_len = 0x001b;
3618         hdev->le_max_rx_time = 0x0148;
3619         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3620         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3621         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3622         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3623         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3624         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3625         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3626
3627         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3628         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3629         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3630         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3631         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3632         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3633
3634         /* default 1.28 sec page scan */
3635         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3636         hdev->def_page_scan_int = 0x0800;
3637         hdev->def_page_scan_window = 0x0012;
3638
3639         mutex_init(&hdev->lock);
3640         mutex_init(&hdev->req_lock);
3641
3642         INIT_LIST_HEAD(&hdev->mgmt_pending);
3643         INIT_LIST_HEAD(&hdev->blacklist);
3644         INIT_LIST_HEAD(&hdev->whitelist);
3645         INIT_LIST_HEAD(&hdev->uuids);
3646         INIT_LIST_HEAD(&hdev->link_keys);
3647         INIT_LIST_HEAD(&hdev->long_term_keys);
3648         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3649         INIT_LIST_HEAD(&hdev->remote_oob_data);
3650         INIT_LIST_HEAD(&hdev->le_white_list);
3651         INIT_LIST_HEAD(&hdev->le_resolv_list);
3652         INIT_LIST_HEAD(&hdev->le_conn_params);
3653         INIT_LIST_HEAD(&hdev->pend_le_conns);
3654         INIT_LIST_HEAD(&hdev->pend_le_reports);
3655         INIT_LIST_HEAD(&hdev->conn_hash.list);
3656         INIT_LIST_HEAD(&hdev->adv_instances);
3657         INIT_LIST_HEAD(&hdev->blocked_keys);
3658
3659         INIT_WORK(&hdev->rx_work, hci_rx_work);
3660         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3661         INIT_WORK(&hdev->tx_work, hci_tx_work);
3662         INIT_WORK(&hdev->power_on, hci_power_on);
3663         INIT_WORK(&hdev->error_reset, hci_error_reset);
3664         INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3665
3666         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3667
3668         skb_queue_head_init(&hdev->rx_q);
3669         skb_queue_head_init(&hdev->cmd_q);
3670         skb_queue_head_init(&hdev->raw_q);
3671
3672         init_waitqueue_head(&hdev->req_wait_q);
3673         init_waitqueue_head(&hdev->suspend_wait_q);
3674
3675         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3676
3677         hci_request_setup(hdev);
3678
3679         hci_init_sysfs(hdev);
3680         discovery_init(hdev);
3681
3682         return hdev;
3683 }
3684 EXPORT_SYMBOL(hci_alloc_dev);
3685
3686 /* Free HCI device */
3687 void hci_free_dev(struct hci_dev *hdev)
3688 {
3689         /* will free via device release */
3690         put_device(&hdev->dev);
3691 }
3692 EXPORT_SYMBOL(hci_free_dev);
3693
3694 /* Register HCI device */
3695 int hci_register_dev(struct hci_dev *hdev)
3696 {
3697         int id, error;
3698
3699         if (!hdev->open || !hdev->close || !hdev->send)
3700                 return -EINVAL;
3701
3702         /* Do not allow HCI_AMP devices to register at index 0,
3703          * so the index can be used as the AMP controller ID.
3704          */
3705         switch (hdev->dev_type) {
3706         case HCI_PRIMARY:
3707                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3708                 break;
3709         case HCI_AMP:
3710                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3711                 break;
3712         default:
3713                 return -EINVAL;
3714         }
3715
3716         if (id < 0)
3717                 return id;
3718
3719         sprintf(hdev->name, "hci%d", id);
3720         hdev->id = id;
3721
3722         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3723
3724         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3725         if (!hdev->workqueue) {
3726                 error = -ENOMEM;
3727                 goto err;
3728         }
3729
3730         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3731                                                       hdev->name);
3732         if (!hdev->req_workqueue) {
3733                 destroy_workqueue(hdev->workqueue);
3734                 error = -ENOMEM;
3735                 goto err;
3736         }
3737
3738         if (!IS_ERR_OR_NULL(bt_debugfs))
3739                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3740
3741         dev_set_name(&hdev->dev, "%s", hdev->name);
3742
3743         error = device_add(&hdev->dev);
3744         if (error < 0)
3745                 goto err_wqueue;
3746
3747         hci_leds_init(hdev);
3748
3749         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3750                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3751                                     hdev);
3752         if (hdev->rfkill) {
3753                 if (rfkill_register(hdev->rfkill) < 0) {
3754                         rfkill_destroy(hdev->rfkill);
3755                         hdev->rfkill = NULL;
3756                 }
3757         }
3758
3759         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3760                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3761
3762         hci_dev_set_flag(hdev, HCI_SETUP);
3763         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3764
3765         if (hdev->dev_type == HCI_PRIMARY) {
3766                 /* Assume BR/EDR support until proven otherwise (such as
3767                  * through reading supported features during init.
3768                  */
3769                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3770         }
3771
3772         write_lock(&hci_dev_list_lock);
3773         list_add(&hdev->list, &hci_dev_list);
3774         write_unlock(&hci_dev_list_lock);
3775
3776         /* Devices that are marked for raw-only usage are unconfigured
3777          * and should not be included in normal operation.
3778          */
3779         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3780                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3781
3782         hci_sock_dev_event(hdev, HCI_DEV_REG);
3783         hci_dev_hold(hdev);
3784
3785         hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3786         error = register_pm_notifier(&hdev->suspend_notifier);
3787         if (error)
3788                 goto err_wqueue;
3789
3790         queue_work(hdev->req_workqueue, &hdev->power_on);
3791
3792         idr_init(&hdev->adv_monitors_idr);
3793
3794         return id;
3795
3796 err_wqueue:
3797         destroy_workqueue(hdev->workqueue);
3798         destroy_workqueue(hdev->req_workqueue);
3799 err:
3800         ida_simple_remove(&hci_index_ida, hdev->id);
3801
3802         return error;
3803 }
3804 EXPORT_SYMBOL(hci_register_dev);
3805
3806 /* Unregister HCI device */
3807 void hci_unregister_dev(struct hci_dev *hdev)
3808 {
3809         int id;
3810
3811         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3812
3813         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3814
3815         id = hdev->id;
3816
3817         write_lock(&hci_dev_list_lock);
3818         list_del(&hdev->list);
3819         write_unlock(&hci_dev_list_lock);
3820
3821         cancel_work_sync(&hdev->power_on);
3822
3823         hci_suspend_clear_tasks(hdev);
3824         unregister_pm_notifier(&hdev->suspend_notifier);
3825         cancel_work_sync(&hdev->suspend_prepare);
3826
3827         hci_dev_do_close(hdev);
3828
3829         if (!test_bit(HCI_INIT, &hdev->flags) &&
3830             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3831             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3832                 hci_dev_lock(hdev);
3833                 mgmt_index_removed(hdev);
3834                 hci_dev_unlock(hdev);
3835         }
3836
3837         /* mgmt_index_removed should take care of emptying the
3838          * pending list */
3839         BUG_ON(!list_empty(&hdev->mgmt_pending));
3840
3841         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3842
3843         if (hdev->rfkill) {
3844                 rfkill_unregister(hdev->rfkill);
3845                 rfkill_destroy(hdev->rfkill);
3846         }
3847
3848         device_del(&hdev->dev);
3849
3850         debugfs_remove_recursive(hdev->debugfs);
3851         kfree_const(hdev->hw_info);
3852         kfree_const(hdev->fw_info);
3853
3854         destroy_workqueue(hdev->workqueue);
3855         destroy_workqueue(hdev->req_workqueue);
3856
3857         hci_dev_lock(hdev);
3858         hci_bdaddr_list_clear(&hdev->blacklist);
3859         hci_bdaddr_list_clear(&hdev->whitelist);
3860         hci_uuids_clear(hdev);
3861         hci_link_keys_clear(hdev);
3862         hci_smp_ltks_clear(hdev);
3863         hci_smp_irks_clear(hdev);
3864         hci_remote_oob_data_clear(hdev);
3865         hci_adv_instances_clear(hdev);
3866         hci_adv_monitors_clear(hdev);
3867         hci_bdaddr_list_clear(&hdev->le_white_list);
3868         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3869         hci_conn_params_clear_all(hdev);
3870         hci_discovery_filter_clear(hdev);
3871         hci_blocked_keys_clear(hdev);
3872         hci_dev_unlock(hdev);
3873
3874         hci_dev_put(hdev);
3875
3876         ida_simple_remove(&hci_index_ida, id);
3877 }
3878 EXPORT_SYMBOL(hci_unregister_dev);
3879
3880 /* Suspend HCI device */
3881 int hci_suspend_dev(struct hci_dev *hdev)
3882 {
3883         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3884         return 0;
3885 }
3886 EXPORT_SYMBOL(hci_suspend_dev);
3887
3888 /* Resume HCI device */
3889 int hci_resume_dev(struct hci_dev *hdev)
3890 {
3891         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3892         return 0;
3893 }
3894 EXPORT_SYMBOL(hci_resume_dev);
3895
3896 /* Reset HCI device */
3897 int hci_reset_dev(struct hci_dev *hdev)
3898 {
3899         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3900         struct sk_buff *skb;
3901
3902         skb = bt_skb_alloc(3, GFP_ATOMIC);
3903         if (!skb)
3904                 return -ENOMEM;
3905
3906         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3907         skb_put_data(skb, hw_err, 3);
3908
3909         /* Send Hardware Error to upper stack */
3910         return hci_recv_frame(hdev, skb);
3911 }
3912 EXPORT_SYMBOL(hci_reset_dev);
3913
3914 /* Receive frame from HCI drivers */
3915 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3916 {
3917         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3918                       && !test_bit(HCI_INIT, &hdev->flags))) {
3919                 kfree_skb(skb);
3920                 return -ENXIO;
3921         }
3922
3923         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3924             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3925             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
3926             hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
3927                 kfree_skb(skb);
3928                 return -EINVAL;
3929         }
3930
3931         /* Incoming skb */
3932         bt_cb(skb)->incoming = 1;
3933
3934         /* Time stamp */
3935         __net_timestamp(skb);
3936
3937         skb_queue_tail(&hdev->rx_q, skb);
3938         queue_work(hdev->workqueue, &hdev->rx_work);
3939
3940         return 0;
3941 }
3942 EXPORT_SYMBOL(hci_recv_frame);
3943
3944 /* Receive diagnostic message from HCI drivers */
3945 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3946 {
3947         /* Mark as diagnostic packet */
3948         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3949
3950         /* Time stamp */
3951         __net_timestamp(skb);
3952
3953         skb_queue_tail(&hdev->rx_q, skb);
3954         queue_work(hdev->workqueue, &hdev->rx_work);
3955
3956         return 0;
3957 }
3958 EXPORT_SYMBOL(hci_recv_diag);
3959
3960 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3961 {
3962         va_list vargs;
3963
3964         va_start(vargs, fmt);
3965         kfree_const(hdev->hw_info);
3966         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3967         va_end(vargs);
3968 }
3969 EXPORT_SYMBOL(hci_set_hw_info);
3970
3971 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3972 {
3973         va_list vargs;
3974
3975         va_start(vargs, fmt);
3976         kfree_const(hdev->fw_info);
3977         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3978         va_end(vargs);
3979 }
3980 EXPORT_SYMBOL(hci_set_fw_info);
3981
3982 /* ---- Interface to upper protocols ---- */
3983
3984 int hci_register_cb(struct hci_cb *cb)
3985 {
3986         BT_DBG("%p name %s", cb, cb->name);
3987
3988         mutex_lock(&hci_cb_list_lock);
3989         list_add_tail(&cb->list, &hci_cb_list);
3990         mutex_unlock(&hci_cb_list_lock);
3991
3992         return 0;
3993 }
3994 EXPORT_SYMBOL(hci_register_cb);
3995
3996 int hci_unregister_cb(struct hci_cb *cb)
3997 {
3998         BT_DBG("%p name %s", cb, cb->name);
3999
4000         mutex_lock(&hci_cb_list_lock);
4001         list_del(&cb->list);
4002         mutex_unlock(&hci_cb_list_lock);
4003
4004         return 0;
4005 }
4006 EXPORT_SYMBOL(hci_unregister_cb);
4007
4008 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4009 {
4010         int err;
4011
4012         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4013                skb->len);
4014
4015         /* Time stamp */
4016         __net_timestamp(skb);
4017
4018         /* Send copy to monitor */
4019         hci_send_to_monitor(hdev, skb);
4020
4021         if (atomic_read(&hdev->promisc)) {
4022                 /* Send copy to the sockets */
4023                 hci_send_to_sock(hdev, skb);
4024         }
4025
4026         /* Get rid of skb owner, prior to sending to the driver. */
4027         skb_orphan(skb);
4028
4029         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4030                 kfree_skb(skb);
4031                 return;
4032         }
4033
4034         err = hdev->send(hdev, skb);
4035         if (err < 0) {
4036                 bt_dev_err(hdev, "sending frame failed (%d)", err);
4037                 kfree_skb(skb);
4038         }
4039 }
4040
4041 /* Send HCI command */
4042 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4043                  const void *param)
4044 {
4045         struct sk_buff *skb;
4046
4047         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4048
4049         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4050         if (!skb) {
4051                 bt_dev_err(hdev, "no memory for command");
4052                 return -ENOMEM;
4053         }
4054
4055         /* Stand-alone HCI commands must be flagged as
4056          * single-command requests.
4057          */
4058         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4059
4060         skb_queue_tail(&hdev->cmd_q, skb);
4061         queue_work(hdev->workqueue, &hdev->cmd_work);
4062
4063         return 0;
4064 }
4065
4066 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4067                    const void *param)
4068 {
4069         struct sk_buff *skb;
4070
4071         if (hci_opcode_ogf(opcode) != 0x3f) {
4072                 /* A controller receiving a command shall respond with either
4073                  * a Command Status Event or a Command Complete Event.
4074                  * Therefore, all standard HCI commands must be sent via the
4075                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4076                  * Some vendors do not comply with this rule for vendor-specific
4077                  * commands and do not return any event. We want to support
4078                  * unresponded commands for such cases only.
4079                  */
4080                 bt_dev_err(hdev, "unresponded command not supported");
4081                 return -EINVAL;
4082         }
4083
4084         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4085         if (!skb) {
4086                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4087                            opcode);
4088                 return -ENOMEM;
4089         }
4090
4091         hci_send_frame(hdev, skb);
4092
4093         return 0;
4094 }
4095 EXPORT_SYMBOL(__hci_cmd_send);
4096
4097 /* Get data from the previously sent command */
4098 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4099 {
4100         struct hci_command_hdr *hdr;
4101
4102         if (!hdev->sent_cmd)
4103                 return NULL;
4104
4105         hdr = (void *) hdev->sent_cmd->data;
4106
4107         if (hdr->opcode != cpu_to_le16(opcode))
4108                 return NULL;
4109
4110         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4111
4112         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4113 }
4114
4115 /* Send HCI command and wait for command commplete event */
4116 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4117                              const void *param, u32 timeout)
4118 {
4119         struct sk_buff *skb;
4120
4121         if (!test_bit(HCI_UP, &hdev->flags))
4122                 return ERR_PTR(-ENETDOWN);
4123
4124         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4125
4126         hci_req_sync_lock(hdev);
4127         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4128         hci_req_sync_unlock(hdev);
4129
4130         return skb;
4131 }
4132 EXPORT_SYMBOL(hci_cmd_sync);
4133
4134 /* Send ACL data */
4135 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4136 {
4137         struct hci_acl_hdr *hdr;
4138         int len = skb->len;
4139
4140         skb_push(skb, HCI_ACL_HDR_SIZE);
4141         skb_reset_transport_header(skb);
4142         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4143         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4144         hdr->dlen   = cpu_to_le16(len);
4145 }
4146
4147 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4148                           struct sk_buff *skb, __u16 flags)
4149 {
4150         struct hci_conn *conn = chan->conn;
4151         struct hci_dev *hdev = conn->hdev;
4152         struct sk_buff *list;
4153
4154         skb->len = skb_headlen(skb);
4155         skb->data_len = 0;
4156
4157         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4158
4159         switch (hdev->dev_type) {
4160         case HCI_PRIMARY:
4161                 hci_add_acl_hdr(skb, conn->handle, flags);
4162                 break;
4163         case HCI_AMP:
4164                 hci_add_acl_hdr(skb, chan->handle, flags);
4165                 break;
4166         default:
4167                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4168                 return;
4169         }
4170
4171         list = skb_shinfo(skb)->frag_list;
4172         if (!list) {
4173                 /* Non fragmented */
4174                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4175
4176                 skb_queue_tail(queue, skb);
4177         } else {
4178                 /* Fragmented */
4179                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4180
4181                 skb_shinfo(skb)->frag_list = NULL;
4182
4183                 /* Queue all fragments atomically. We need to use spin_lock_bh
4184                  * here because of 6LoWPAN links, as there this function is
4185                  * called from softirq and using normal spin lock could cause
4186                  * deadlocks.
4187                  */
4188                 spin_lock_bh(&queue->lock);
4189
4190                 __skb_queue_tail(queue, skb);
4191
4192                 flags &= ~ACL_START;
4193                 flags |= ACL_CONT;
4194                 do {
4195                         skb = list; list = list->next;
4196
4197                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4198                         hci_add_acl_hdr(skb, conn->handle, flags);
4199
4200                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4201
4202                         __skb_queue_tail(queue, skb);
4203                 } while (list);
4204
4205                 spin_unlock_bh(&queue->lock);
4206         }
4207 }
4208
4209 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4210 {
4211         struct hci_dev *hdev = chan->conn->hdev;
4212
4213         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4214
4215         hci_queue_acl(chan, &chan->data_q, skb, flags);
4216
4217         queue_work(hdev->workqueue, &hdev->tx_work);
4218 }
4219
4220 /* Send SCO data */
4221 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4222 {
4223         struct hci_dev *hdev = conn->hdev;
4224         struct hci_sco_hdr hdr;
4225
4226         BT_DBG("%s len %d", hdev->name, skb->len);
4227
4228         hdr.handle = cpu_to_le16(conn->handle);
4229         hdr.dlen   = skb->len;
4230
4231         skb_push(skb, HCI_SCO_HDR_SIZE);
4232         skb_reset_transport_header(skb);
4233         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4234
4235         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4236
4237         skb_queue_tail(&conn->data_q, skb);
4238         queue_work(hdev->workqueue, &hdev->tx_work);
4239 }
4240
4241 /* ---- HCI TX task (outgoing data) ---- */
4242
4243 /* HCI Connection scheduler */
4244 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4245                                      int *quote)
4246 {
4247         struct hci_conn_hash *h = &hdev->conn_hash;
4248         struct hci_conn *conn = NULL, *c;
4249         unsigned int num = 0, min = ~0;
4250
4251         /* We don't have to lock device here. Connections are always
4252          * added and removed with TX task disabled. */
4253
4254         rcu_read_lock();
4255
4256         list_for_each_entry_rcu(c, &h->list, list) {
4257                 if (c->type != type || skb_queue_empty(&c->data_q))
4258                         continue;
4259
4260                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4261                         continue;
4262
4263                 num++;
4264
4265                 if (c->sent < min) {
4266                         min  = c->sent;
4267                         conn = c;
4268                 }
4269
4270                 if (hci_conn_num(hdev, type) == num)
4271                         break;
4272         }
4273
4274         rcu_read_unlock();
4275
4276         if (conn) {
4277                 int cnt, q;
4278
4279                 switch (conn->type) {
4280                 case ACL_LINK:
4281                         cnt = hdev->acl_cnt;
4282                         break;
4283                 case SCO_LINK:
4284                 case ESCO_LINK:
4285                         cnt = hdev->sco_cnt;
4286                         break;
4287                 case LE_LINK:
4288                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4289                         break;
4290                 default:
4291                         cnt = 0;
4292                         bt_dev_err(hdev, "unknown link type %d", conn->type);
4293                 }
4294
4295                 q = cnt / num;
4296                 *quote = q ? q : 1;
4297         } else
4298                 *quote = 0;
4299
4300         BT_DBG("conn %p quote %d", conn, *quote);
4301         return conn;
4302 }
4303
4304 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4305 {
4306         struct hci_conn_hash *h = &hdev->conn_hash;
4307         struct hci_conn *c;
4308
4309         bt_dev_err(hdev, "link tx timeout");
4310
4311         rcu_read_lock();
4312
4313         /* Kill stalled connections */
4314         list_for_each_entry_rcu(c, &h->list, list) {
4315                 if (c->type == type && c->sent) {
4316                         bt_dev_err(hdev, "killing stalled connection %pMR",
4317                                    &c->dst);
4318                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4319                 }
4320         }
4321
4322         rcu_read_unlock();
4323 }
4324
4325 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4326                                       int *quote)
4327 {
4328         struct hci_conn_hash *h = &hdev->conn_hash;
4329         struct hci_chan *chan = NULL;
4330         unsigned int num = 0, min = ~0, cur_prio = 0;
4331         struct hci_conn *conn;
4332         int cnt, q, conn_num = 0;
4333
4334         BT_DBG("%s", hdev->name);
4335
4336         rcu_read_lock();
4337
4338         list_for_each_entry_rcu(conn, &h->list, list) {
4339                 struct hci_chan *tmp;
4340
4341                 if (conn->type != type)
4342                         continue;
4343
4344                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4345                         continue;
4346
4347                 conn_num++;
4348
4349                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4350                         struct sk_buff *skb;
4351
4352                         if (skb_queue_empty(&tmp->data_q))
4353                                 continue;
4354
4355                         skb = skb_peek(&tmp->data_q);
4356                         if (skb->priority < cur_prio)
4357                                 continue;
4358
4359                         if (skb->priority > cur_prio) {
4360                                 num = 0;
4361                                 min = ~0;
4362                                 cur_prio = skb->priority;
4363                         }
4364
4365                         num++;
4366
4367                         if (conn->sent < min) {
4368                                 min  = conn->sent;
4369                                 chan = tmp;
4370                         }
4371                 }
4372
4373                 if (hci_conn_num(hdev, type) == conn_num)
4374                         break;
4375         }
4376
4377         rcu_read_unlock();
4378
4379         if (!chan)
4380                 return NULL;
4381
4382         switch (chan->conn->type) {
4383         case ACL_LINK:
4384                 cnt = hdev->acl_cnt;
4385                 break;
4386         case AMP_LINK:
4387                 cnt = hdev->block_cnt;
4388                 break;
4389         case SCO_LINK:
4390         case ESCO_LINK:
4391                 cnt = hdev->sco_cnt;
4392                 break;
4393         case LE_LINK:
4394                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4395                 break;
4396         default:
4397                 cnt = 0;
4398                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4399         }
4400
4401         q = cnt / num;
4402         *quote = q ? q : 1;
4403         BT_DBG("chan %p quote %d", chan, *quote);
4404         return chan;
4405 }
4406
4407 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4408 {
4409         struct hci_conn_hash *h = &hdev->conn_hash;
4410         struct hci_conn *conn;
4411         int num = 0;
4412
4413         BT_DBG("%s", hdev->name);
4414
4415         rcu_read_lock();
4416
4417         list_for_each_entry_rcu(conn, &h->list, list) {
4418                 struct hci_chan *chan;
4419
4420                 if (conn->type != type)
4421                         continue;
4422
4423                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4424                         continue;
4425
4426                 num++;
4427
4428                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4429                         struct sk_buff *skb;
4430
4431                         if (chan->sent) {
4432                                 chan->sent = 0;
4433                                 continue;
4434                         }
4435
4436                         if (skb_queue_empty(&chan->data_q))
4437                                 continue;
4438
4439                         skb = skb_peek(&chan->data_q);
4440                         if (skb->priority >= HCI_PRIO_MAX - 1)
4441                                 continue;
4442
4443                         skb->priority = HCI_PRIO_MAX - 1;
4444
4445                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4446                                skb->priority);
4447                 }
4448
4449                 if (hci_conn_num(hdev, type) == num)
4450                         break;
4451         }
4452
4453         rcu_read_unlock();
4454
4455 }
4456
4457 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4458 {
4459         /* Calculate count of blocks used by this packet */
4460         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4461 }
4462
4463 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4464 {
4465         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4466                 /* ACL tx timeout must be longer than maximum
4467                  * link supervision timeout (40.9 seconds) */
4468                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4469                                        HCI_ACL_TX_TIMEOUT))
4470                         hci_link_tx_to(hdev, ACL_LINK);
4471         }
4472 }
4473
4474 /* Schedule SCO */
4475 static void hci_sched_sco(struct hci_dev *hdev)
4476 {
4477         struct hci_conn *conn;
4478         struct sk_buff *skb;
4479         int quote;
4480
4481         BT_DBG("%s", hdev->name);
4482
4483         if (!hci_conn_num(hdev, SCO_LINK))
4484                 return;
4485
4486         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4487                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4488                         BT_DBG("skb %p len %d", skb, skb->len);
4489                         hci_send_frame(hdev, skb);
4490
4491                         conn->sent++;
4492                         if (conn->sent == ~0)
4493                                 conn->sent = 0;
4494                 }
4495         }
4496 }
4497
4498 static void hci_sched_esco(struct hci_dev *hdev)
4499 {
4500         struct hci_conn *conn;
4501         struct sk_buff *skb;
4502         int quote;
4503
4504         BT_DBG("%s", hdev->name);
4505
4506         if (!hci_conn_num(hdev, ESCO_LINK))
4507                 return;
4508
4509         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4510                                                      &quote))) {
4511                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4512                         BT_DBG("skb %p len %d", skb, skb->len);
4513                         hci_send_frame(hdev, skb);
4514
4515                         conn->sent++;
4516                         if (conn->sent == ~0)
4517                                 conn->sent = 0;
4518                 }
4519         }
4520 }
4521
4522 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4523 {
4524         unsigned int cnt = hdev->acl_cnt;
4525         struct hci_chan *chan;
4526         struct sk_buff *skb;
4527         int quote;
4528
4529         __check_timeout(hdev, cnt);
4530
4531         while (hdev->acl_cnt &&
4532                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4533                 u32 priority = (skb_peek(&chan->data_q))->priority;
4534                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4535                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4536                                skb->len, skb->priority);
4537
4538                         /* Stop if priority has changed */
4539                         if (skb->priority < priority)
4540                                 break;
4541
4542                         skb = skb_dequeue(&chan->data_q);
4543
4544                         hci_conn_enter_active_mode(chan->conn,
4545                                                    bt_cb(skb)->force_active);
4546
4547                         hci_send_frame(hdev, skb);
4548                         hdev->acl_last_tx = jiffies;
4549
4550                         hdev->acl_cnt--;
4551                         chan->sent++;
4552                         chan->conn->sent++;
4553
4554                         /* Send pending SCO packets right away */
4555                         hci_sched_sco(hdev);
4556                         hci_sched_esco(hdev);
4557                 }
4558         }
4559
4560         if (cnt != hdev->acl_cnt)
4561                 hci_prio_recalculate(hdev, ACL_LINK);
4562 }
4563
4564 static void hci_sched_acl_blk(struct hci_dev *hdev)
4565 {
4566         unsigned int cnt = hdev->block_cnt;
4567         struct hci_chan *chan;
4568         struct sk_buff *skb;
4569         int quote;
4570         u8 type;
4571
4572         __check_timeout(hdev, cnt);
4573
4574         BT_DBG("%s", hdev->name);
4575
4576         if (hdev->dev_type == HCI_AMP)
4577                 type = AMP_LINK;
4578         else
4579                 type = ACL_LINK;
4580
4581         while (hdev->block_cnt > 0 &&
4582                (chan = hci_chan_sent(hdev, type, &quote))) {
4583                 u32 priority = (skb_peek(&chan->data_q))->priority;
4584                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4585                         int blocks;
4586
4587                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4588                                skb->len, skb->priority);
4589
4590                         /* Stop if priority has changed */
4591                         if (skb->priority < priority)
4592                                 break;
4593
4594                         skb = skb_dequeue(&chan->data_q);
4595
4596                         blocks = __get_blocks(hdev, skb);
4597                         if (blocks > hdev->block_cnt)
4598                                 return;
4599
4600                         hci_conn_enter_active_mode(chan->conn,
4601                                                    bt_cb(skb)->force_active);
4602
4603                         hci_send_frame(hdev, skb);
4604                         hdev->acl_last_tx = jiffies;
4605
4606                         hdev->block_cnt -= blocks;
4607                         quote -= blocks;
4608
4609                         chan->sent += blocks;
4610                         chan->conn->sent += blocks;
4611                 }
4612         }
4613
4614         if (cnt != hdev->block_cnt)
4615                 hci_prio_recalculate(hdev, type);
4616 }
4617
4618 static void hci_sched_acl(struct hci_dev *hdev)
4619 {
4620         BT_DBG("%s", hdev->name);
4621
4622         /* No ACL link over BR/EDR controller */
4623         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4624                 return;
4625
4626         /* No AMP link over AMP controller */
4627         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4628                 return;
4629
4630         switch (hdev->flow_ctl_mode) {
4631         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4632                 hci_sched_acl_pkt(hdev);
4633                 break;
4634
4635         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4636                 hci_sched_acl_blk(hdev);
4637                 break;
4638         }
4639 }
4640
4641 static void hci_sched_le(struct hci_dev *hdev)
4642 {
4643         struct hci_chan *chan;
4644         struct sk_buff *skb;
4645         int quote, cnt, tmp;
4646
4647         BT_DBG("%s", hdev->name);
4648
4649         if (!hci_conn_num(hdev, LE_LINK))
4650                 return;
4651
4652         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4653
4654         __check_timeout(hdev, cnt);
4655
4656         tmp = cnt;
4657         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4658                 u32 priority = (skb_peek(&chan->data_q))->priority;
4659                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4660                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4661                                skb->len, skb->priority);
4662
4663                         /* Stop if priority has changed */
4664                         if (skb->priority < priority)
4665                                 break;
4666
4667                         skb = skb_dequeue(&chan->data_q);
4668
4669                         hci_send_frame(hdev, skb);
4670                         hdev->le_last_tx = jiffies;
4671
4672                         cnt--;
4673                         chan->sent++;
4674                         chan->conn->sent++;
4675
4676                         /* Send pending SCO packets right away */
4677                         hci_sched_sco(hdev);
4678                         hci_sched_esco(hdev);
4679                 }
4680         }
4681
4682         if (hdev->le_pkts)
4683                 hdev->le_cnt = cnt;
4684         else
4685                 hdev->acl_cnt = cnt;
4686
4687         if (cnt != tmp)
4688                 hci_prio_recalculate(hdev, LE_LINK);
4689 }
4690
4691 static void hci_tx_work(struct work_struct *work)
4692 {
4693         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4694         struct sk_buff *skb;
4695
4696         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4697                hdev->sco_cnt, hdev->le_cnt);
4698
4699         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4700                 /* Schedule queues and send stuff to HCI driver */
4701                 hci_sched_sco(hdev);
4702                 hci_sched_esco(hdev);
4703                 hci_sched_acl(hdev);
4704                 hci_sched_le(hdev);
4705         }
4706
4707         /* Send next queued raw (unknown type) packet */
4708         while ((skb = skb_dequeue(&hdev->raw_q)))
4709                 hci_send_frame(hdev, skb);
4710 }
4711
4712 /* ----- HCI RX task (incoming data processing) ----- */
4713
4714 /* ACL data packet */
4715 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4716 {
4717         struct hci_acl_hdr *hdr = (void *) skb->data;
4718         struct hci_conn *conn;
4719         __u16 handle, flags;
4720
4721         skb_pull(skb, HCI_ACL_HDR_SIZE);
4722
4723         handle = __le16_to_cpu(hdr->handle);
4724         flags  = hci_flags(handle);
4725         handle = hci_handle(handle);
4726
4727         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4728                handle, flags);
4729
4730         hdev->stat.acl_rx++;
4731
4732         hci_dev_lock(hdev);
4733         conn = hci_conn_hash_lookup_handle(hdev, handle);
4734         hci_dev_unlock(hdev);
4735
4736         if (conn) {
4737                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4738
4739                 /* Send to upper protocol */
4740                 l2cap_recv_acldata(conn, skb, flags);
4741                 return;
4742         } else {
4743                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4744                            handle);
4745         }
4746
4747         kfree_skb(skb);
4748 }
4749
4750 /* SCO data packet */
4751 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4752 {
4753         struct hci_sco_hdr *hdr = (void *) skb->data;
4754         struct hci_conn *conn;
4755         __u16 handle, flags;
4756
4757         skb_pull(skb, HCI_SCO_HDR_SIZE);
4758
4759         handle = __le16_to_cpu(hdr->handle);
4760         flags  = hci_flags(handle);
4761         handle = hci_handle(handle);
4762
4763         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4764                handle, flags);
4765
4766         hdev->stat.sco_rx++;
4767
4768         hci_dev_lock(hdev);
4769         conn = hci_conn_hash_lookup_handle(hdev, handle);
4770         hci_dev_unlock(hdev);
4771
4772         if (conn) {
4773                 /* Send to upper protocol */
4774                 bt_cb(skb)->sco.pkt_status = flags & 0x03;
4775                 sco_recv_scodata(conn, skb);
4776                 return;
4777         } else {
4778                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4779                            handle);
4780         }
4781
4782         kfree_skb(skb);
4783 }
4784
4785 static bool hci_req_is_complete(struct hci_dev *hdev)
4786 {
4787         struct sk_buff *skb;
4788
4789         skb = skb_peek(&hdev->cmd_q);
4790         if (!skb)
4791                 return true;
4792
4793         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4794 }
4795
4796 static void hci_resend_last(struct hci_dev *hdev)
4797 {
4798         struct hci_command_hdr *sent;
4799         struct sk_buff *skb;
4800         u16 opcode;
4801
4802         if (!hdev->sent_cmd)
4803                 return;
4804
4805         sent = (void *) hdev->sent_cmd->data;
4806         opcode = __le16_to_cpu(sent->opcode);
4807         if (opcode == HCI_OP_RESET)
4808                 return;
4809
4810         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4811         if (!skb)
4812                 return;
4813
4814         skb_queue_head(&hdev->cmd_q, skb);
4815         queue_work(hdev->workqueue, &hdev->cmd_work);
4816 }
4817
4818 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4819                           hci_req_complete_t *req_complete,
4820                           hci_req_complete_skb_t *req_complete_skb)
4821 {
4822         struct sk_buff *skb;
4823         unsigned long flags;
4824
4825         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4826
4827         /* If the completed command doesn't match the last one that was
4828          * sent we need to do special handling of it.
4829          */
4830         if (!hci_sent_cmd_data(hdev, opcode)) {
4831                 /* Some CSR based controllers generate a spontaneous
4832                  * reset complete event during init and any pending
4833                  * command will never be completed. In such a case we
4834                  * need to resend whatever was the last sent
4835                  * command.
4836                  */
4837                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4838                         hci_resend_last(hdev);
4839
4840                 return;
4841         }
4842
4843         /* If we reach this point this event matches the last command sent */
4844         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4845
4846         /* If the command succeeded and there's still more commands in
4847          * this request the request is not yet complete.
4848          */
4849         if (!status && !hci_req_is_complete(hdev))
4850                 return;
4851
4852         /* If this was the last command in a request the complete
4853          * callback would be found in hdev->sent_cmd instead of the
4854          * command queue (hdev->cmd_q).
4855          */
4856         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4857                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4858                 return;
4859         }
4860
4861         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4862                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4863                 return;
4864         }
4865
4866         /* Remove all pending commands belonging to this request */
4867         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4868         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4869                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4870                         __skb_queue_head(&hdev->cmd_q, skb);
4871                         break;
4872                 }
4873
4874                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4875                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4876                 else
4877                         *req_complete = bt_cb(skb)->hci.req_complete;
4878                 kfree_skb(skb);
4879         }
4880         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4881 }
4882
4883 static void hci_rx_work(struct work_struct *work)
4884 {
4885         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4886         struct sk_buff *skb;
4887
4888         BT_DBG("%s", hdev->name);
4889
4890         while ((skb = skb_dequeue(&hdev->rx_q))) {
4891                 /* Send copy to monitor */
4892                 hci_send_to_monitor(hdev, skb);
4893
4894                 if (atomic_read(&hdev->promisc)) {
4895                         /* Send copy to the sockets */
4896                         hci_send_to_sock(hdev, skb);
4897                 }
4898
4899                 /* If the device has been opened in HCI_USER_CHANNEL,
4900                  * the userspace has exclusive access to device.
4901                  * When device is HCI_INIT, we still need to process
4902                  * the data packets to the driver in order
4903                  * to complete its setup().
4904                  */
4905                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4906                     !test_bit(HCI_INIT, &hdev->flags)) {
4907                         kfree_skb(skb);
4908                         continue;
4909                 }
4910
4911                 if (test_bit(HCI_INIT, &hdev->flags)) {
4912                         /* Don't process data packets in this states. */
4913                         switch (hci_skb_pkt_type(skb)) {
4914                         case HCI_ACLDATA_PKT:
4915                         case HCI_SCODATA_PKT:
4916                         case HCI_ISODATA_PKT:
4917                                 kfree_skb(skb);
4918                                 continue;
4919                         }
4920                 }
4921
4922                 /* Process frame */
4923                 switch (hci_skb_pkt_type(skb)) {
4924                 case HCI_EVENT_PKT:
4925                         BT_DBG("%s Event packet", hdev->name);
4926                         hci_event_packet(hdev, skb);
4927                         break;
4928
4929                 case HCI_ACLDATA_PKT:
4930                         BT_DBG("%s ACL data packet", hdev->name);
4931                         hci_acldata_packet(hdev, skb);
4932                         break;
4933
4934                 case HCI_SCODATA_PKT:
4935                         BT_DBG("%s SCO data packet", hdev->name);
4936                         hci_scodata_packet(hdev, skb);
4937                         break;
4938
4939                 default:
4940                         kfree_skb(skb);
4941                         break;
4942                 }
4943         }
4944 }
4945
4946 static void hci_cmd_work(struct work_struct *work)
4947 {
4948         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4949         struct sk_buff *skb;
4950
4951         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4952                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4953
4954         /* Send queued commands */
4955         if (atomic_read(&hdev->cmd_cnt)) {
4956                 skb = skb_dequeue(&hdev->cmd_q);
4957                 if (!skb)
4958                         return;
4959
4960                 kfree_skb(hdev->sent_cmd);
4961
4962                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4963                 if (hdev->sent_cmd) {
4964                         if (hci_req_status_pend(hdev))
4965                                 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4966                         atomic_dec(&hdev->cmd_cnt);
4967                         hci_send_frame(hdev, skb);
4968                         if (test_bit(HCI_RESET, &hdev->flags))
4969                                 cancel_delayed_work(&hdev->cmd_timer);
4970                         else
4971                                 schedule_delayed_work(&hdev->cmd_timer,
4972                                                       HCI_CMD_TIMEOUT);
4973                 } else {
4974                         skb_queue_head(&hdev->cmd_q, skb);
4975                         queue_work(hdev->workqueue, &hdev->cmd_work);
4976                 }
4977         }
4978 }