Bluetooth: Add appearance to default scan rsp data
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <asm/unaligned.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45                    hci_req_complete_skb_t complete_skb)
46 {
47         struct hci_dev *hdev = req->hdev;
48         struct sk_buff *skb;
49         unsigned long flags;
50
51         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53         /* If an error occurred during request building, remove all HCI
54          * commands queued on the HCI request queue.
55          */
56         if (req->err) {
57                 skb_queue_purge(&req->cmd_q);
58                 return req->err;
59         }
60
61         /* Do not allow empty requests */
62         if (skb_queue_empty(&req->cmd_q))
63                 return -ENODATA;
64
65         skb = skb_peek_tail(&req->cmd_q);
66         if (complete) {
67                 bt_cb(skb)->hci.req_complete = complete;
68         } else if (complete_skb) {
69                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71         }
72
73         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77         queue_work(hdev->workqueue, &hdev->cmd_work);
78
79         return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84         return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89         return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93                                   struct sk_buff *skb)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 if (skb)
101                         hdev->req_skb = skb_get(skb);
102                 wake_up_interruptible(&hdev->req_wait_q);
103         }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108         BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = err;
112                 hdev->req_status = HCI_REQ_CANCELED;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118                                   const void *param, u8 event, u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         struct hci_request req;
122         struct sk_buff *skb;
123         int err = 0;
124
125         BT_DBG("%s", hdev->name);
126
127         hci_req_init(&req, hdev);
128
129         hci_req_add_ev(&req, opcode, plen, param, event);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         err = hci_req_run_skb(&req, hci_req_sync_complete);
137         if (err < 0) {
138                 remove_wait_queue(&hdev->req_wait_q, &wait);
139                 set_current_state(TASK_RUNNING);
140                 return ERR_PTR(err);
141         }
142
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return ERR_PTR(-EINTR);
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165         skb = hdev->req_skb;
166         hdev->req_skb = NULL;
167
168         BT_DBG("%s end: err %d", hdev->name, err);
169
170         if (err < 0) {
171                 kfree_skb(skb);
172                 return ERR_PTR(err);
173         }
174
175         if (!skb)
176                 return ERR_PTR(-ENODATA);
177
178         return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183                                const void *param, u32 timeout)
184 {
185         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191                                                      unsigned long opt),
192                    unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194         struct hci_request req;
195         DECLARE_WAITQUEUE(wait, current);
196         int err = 0;
197
198         BT_DBG("%s start", hdev->name);
199
200         hci_req_init(&req, hdev);
201
202         hdev->req_status = HCI_REQ_PEND;
203
204         err = func(&req, opt);
205         if (err) {
206                 if (hci_status)
207                         *hci_status = HCI_ERROR_UNSPECIFIED;
208                 return err;
209         }
210
211         add_wait_queue(&hdev->req_wait_q, &wait);
212         set_current_state(TASK_INTERRUPTIBLE);
213
214         err = hci_req_run_skb(&req, hci_req_sync_complete);
215         if (err < 0) {
216                 hdev->req_status = 0;
217
218                 remove_wait_queue(&hdev->req_wait_q, &wait);
219                 set_current_state(TASK_RUNNING);
220
221                 /* ENODATA means the HCI request command queue is empty.
222                  * This can happen when a request with conditionals doesn't
223                  * trigger any commands to be sent. This is normal behavior
224                  * and should not trigger an error return.
225                  */
226                 if (err == -ENODATA) {
227                         if (hci_status)
228                                 *hci_status = 0;
229                         return 0;
230                 }
231
232                 if (hci_status)
233                         *hci_status = HCI_ERROR_UNSPECIFIED;
234
235                 return err;
236         }
237
238         schedule_timeout(timeout);
239
240         remove_wait_queue(&hdev->req_wait_q, &wait);
241
242         if (signal_pending(current))
243                 return -EINTR;
244
245         switch (hdev->req_status) {
246         case HCI_REQ_DONE:
247                 err = -bt_to_errno(hdev->req_result);
248                 if (hci_status)
249                         *hci_status = hdev->req_result;
250                 break;
251
252         case HCI_REQ_CANCELED:
253                 err = -hdev->req_result;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257
258         default:
259                 err = -ETIMEDOUT;
260                 if (hci_status)
261                         *hci_status = HCI_ERROR_UNSPECIFIED;
262                 break;
263         }
264
265         kfree_skb(hdev->req_skb);
266         hdev->req_skb = NULL;
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         return err;
272 }
273
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275                                                   unsigned long opt),
276                  unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278         int ret;
279
280         if (!test_bit(HCI_UP, &hdev->flags))
281                 return -ENETDOWN;
282
283         /* Serialize all requests */
284         hci_req_sync_lock(hdev);
285         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286         hci_req_sync_unlock(hdev);
287
288         return ret;
289 }
290
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292                                 const void *param)
293 {
294         int len = HCI_COMMAND_HDR_SIZE + plen;
295         struct hci_command_hdr *hdr;
296         struct sk_buff *skb;
297
298         skb = bt_skb_alloc(len, GFP_ATOMIC);
299         if (!skb)
300                 return NULL;
301
302         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
303         hdr->opcode = cpu_to_le16(opcode);
304         hdr->plen   = plen;
305
306         if (plen)
307                 memcpy(skb_put(skb, plen), param, plen);
308
309         BT_DBG("skb len %d", skb->len);
310
311         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312         hci_skb_opcode(skb) = opcode;
313
314         return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319                     const void *param, u8 event)
320 {
321         struct hci_dev *hdev = req->hdev;
322         struct sk_buff *skb;
323
324         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326         /* If an error occurred during request building, there is no point in
327          * queueing the HCI command. We can simply return.
328          */
329         if (req->err)
330                 return;
331
332         skb = hci_prepare_cmd(hdev, opcode, plen, param);
333         if (!skb) {
334                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
335                        hdev->name, opcode);
336                 req->err = -ENOMEM;
337                 return;
338         }
339
340         if (skb_queue_empty(&req->cmd_q))
341                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343         bt_cb(skb)->hci.req_event = event;
344
345         skb_queue_tail(&req->cmd_q, skb);
346 }
347
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349                  const void *param)
350 {
351         hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356         struct hci_dev *hdev = req->hdev;
357         struct hci_cp_write_page_scan_activity acp;
358         u8 type;
359
360         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361                 return;
362
363         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364                 return;
365
366         if (enable) {
367                 type = PAGE_SCAN_TYPE_INTERLACED;
368
369                 /* 160 msec page scan interval */
370                 acp.interval = cpu_to_le16(0x0100);
371         } else {
372                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374                 /* default 1.28 sec page scan */
375                 acp.interval = cpu_to_le16(0x0800);
376         }
377
378         acp.window = cpu_to_le16(0x0012);
379
380         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381             __cpu_to_le16(hdev->page_scan_window) != acp.window)
382                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383                             sizeof(acp), &acp);
384
385         if (hdev->page_scan_type != type)
386                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 }
388
389 /* This function controls the background scanning based on hdev->pend_le_conns
390  * list. If there are pending LE connection we start the background scanning,
391  * otherwise we stop it.
392  *
393  * This function requires the caller holds hdev->lock.
394  */
395 static void __hci_update_background_scan(struct hci_request *req)
396 {
397         struct hci_dev *hdev = req->hdev;
398
399         if (!test_bit(HCI_UP, &hdev->flags) ||
400             test_bit(HCI_INIT, &hdev->flags) ||
401             hci_dev_test_flag(hdev, HCI_SETUP) ||
402             hci_dev_test_flag(hdev, HCI_CONFIG) ||
403             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404             hci_dev_test_flag(hdev, HCI_UNREGISTER))
405                 return;
406
407         /* No point in doing scanning if LE support hasn't been enabled */
408         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409                 return;
410
411         /* If discovery is active don't interfere with it */
412         if (hdev->discovery.state != DISCOVERY_STOPPED)
413                 return;
414
415         /* Reset RSSI and UUID filters when starting background scanning
416          * since these filters are meant for service discovery only.
417          *
418          * The Start Discovery and Start Service Discovery operations
419          * ensure to set proper values for RSSI threshold and UUID
420          * filter list. So it is safe to just reset them here.
421          */
422         hci_discovery_filter_clear(hdev);
423
424         if (list_empty(&hdev->pend_le_conns) &&
425             list_empty(&hdev->pend_le_reports)) {
426                 /* If there is no pending LE connections or devices
427                  * to be scanned for, we should stop the background
428                  * scanning.
429                  */
430
431                 /* If controller is not scanning we are done. */
432                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433                         return;
434
435                 hci_req_add_le_scan_disable(req);
436
437                 BT_DBG("%s stopping background scanning", hdev->name);
438         } else {
439                 /* If there is at least one pending LE connection, we should
440                  * keep the background scan running.
441                  */
442
443                 /* If controller is connecting, we should not start scanning
444                  * since some controllers are not able to scan and connect at
445                  * the same time.
446                  */
447                 if (hci_lookup_le_connect(hdev))
448                         return;
449
450                 /* If controller is currently scanning, we stop it to ensure we
451                  * don't miss any advertising (due to duplicates filter).
452                  */
453                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454                         hci_req_add_le_scan_disable(req);
455
456                 hci_req_add_le_passive_scan(req);
457
458                 BT_DBG("%s starting background scanning", hdev->name);
459         }
460 }
461
462 void __hci_req_update_name(struct hci_request *req)
463 {
464         struct hci_dev *hdev = req->hdev;
465         struct hci_cp_write_local_name cp;
466
467         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID             0x1200
473
474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476         u8 *ptr = data, *uuids_start = NULL;
477         struct bt_uuid *uuid;
478
479         if (len < 4)
480                 return ptr;
481
482         list_for_each_entry(uuid, &hdev->uuids, list) {
483                 u16 uuid16;
484
485                 if (uuid->size != 16)
486                         continue;
487
488                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489                 if (uuid16 < 0x1100)
490                         continue;
491
492                 if (uuid16 == PNP_INFO_SVCLASS_ID)
493                         continue;
494
495                 if (!uuids_start) {
496                         uuids_start = ptr;
497                         uuids_start[0] = 1;
498                         uuids_start[1] = EIR_UUID16_ALL;
499                         ptr += 2;
500                 }
501
502                 /* Stop if not enough space to put next UUID */
503                 if ((ptr - data) + sizeof(u16) > len) {
504                         uuids_start[1] = EIR_UUID16_SOME;
505                         break;
506                 }
507
508                 *ptr++ = (uuid16 & 0x00ff);
509                 *ptr++ = (uuid16 & 0xff00) >> 8;
510                 uuids_start[0] += sizeof(uuid16);
511         }
512
513         return ptr;
514 }
515
516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518         u8 *ptr = data, *uuids_start = NULL;
519         struct bt_uuid *uuid;
520
521         if (len < 6)
522                 return ptr;
523
524         list_for_each_entry(uuid, &hdev->uuids, list) {
525                 if (uuid->size != 32)
526                         continue;
527
528                 if (!uuids_start) {
529                         uuids_start = ptr;
530                         uuids_start[0] = 1;
531                         uuids_start[1] = EIR_UUID32_ALL;
532                         ptr += 2;
533                 }
534
535                 /* Stop if not enough space to put next UUID */
536                 if ((ptr - data) + sizeof(u32) > len) {
537                         uuids_start[1] = EIR_UUID32_SOME;
538                         break;
539                 }
540
541                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542                 ptr += sizeof(u32);
543                 uuids_start[0] += sizeof(u32);
544         }
545
546         return ptr;
547 }
548
549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551         u8 *ptr = data, *uuids_start = NULL;
552         struct bt_uuid *uuid;
553
554         if (len < 18)
555                 return ptr;
556
557         list_for_each_entry(uuid, &hdev->uuids, list) {
558                 if (uuid->size != 128)
559                         continue;
560
561                 if (!uuids_start) {
562                         uuids_start = ptr;
563                         uuids_start[0] = 1;
564                         uuids_start[1] = EIR_UUID128_ALL;
565                         ptr += 2;
566                 }
567
568                 /* Stop if not enough space to put next UUID */
569                 if ((ptr - data) + 16 > len) {
570                         uuids_start[1] = EIR_UUID128_SOME;
571                         break;
572                 }
573
574                 memcpy(ptr, uuid->uuid, 16);
575                 ptr += 16;
576                 uuids_start[0] += 16;
577         }
578
579         return ptr;
580 }
581
582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584         u8 *ptr = data;
585         size_t name_len;
586
587         name_len = strlen(hdev->dev_name);
588
589         if (name_len > 0) {
590                 /* EIR Data type */
591                 if (name_len > 48) {
592                         name_len = 48;
593                         ptr[1] = EIR_NAME_SHORT;
594                 } else
595                         ptr[1] = EIR_NAME_COMPLETE;
596
597                 /* EIR Data length */
598                 ptr[0] = name_len + 1;
599
600                 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602                 ptr += (name_len + 2);
603         }
604
605         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606                 ptr[0] = 2;
607                 ptr[1] = EIR_TX_POWER;
608                 ptr[2] = (u8) hdev->inq_tx_power;
609
610                 ptr += 3;
611         }
612
613         if (hdev->devid_source > 0) {
614                 ptr[0] = 9;
615                 ptr[1] = EIR_DEVICE_ID;
616
617                 put_unaligned_le16(hdev->devid_source, ptr + 2);
618                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619                 put_unaligned_le16(hdev->devid_product, ptr + 6);
620                 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622                 ptr += 10;
623         }
624
625         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
630 void __hci_req_update_eir(struct hci_request *req)
631 {
632         struct hci_dev *hdev = req->hdev;
633         struct hci_cp_write_eir cp;
634
635         if (!hdev_is_powered(hdev))
636                 return;
637
638         if (!lmp_ext_inq_capable(hdev))
639                 return;
640
641         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642                 return;
643
644         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645                 return;
646
647         memset(&cp, 0, sizeof(cp));
648
649         create_eir(hdev, cp.data);
650
651         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652                 return;
653
654         memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
659 void hci_req_add_le_scan_disable(struct hci_request *req)
660 {
661         struct hci_cp_le_set_scan_enable cp;
662
663         memset(&cp, 0, sizeof(cp));
664         cp.enable = LE_SCAN_DISABLE;
665         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 }
667
668 static void add_to_white_list(struct hci_request *req,
669                               struct hci_conn_params *params)
670 {
671         struct hci_cp_le_add_to_white_list cp;
672
673         cp.bdaddr_type = params->addr_type;
674         bacpy(&cp.bdaddr, &params->addr);
675
676         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678
679 static u8 update_white_list(struct hci_request *req)
680 {
681         struct hci_dev *hdev = req->hdev;
682         struct hci_conn_params *params;
683         struct bdaddr_list *b;
684         uint8_t white_list_entries = 0;
685
686         /* Go through the current white list programmed into the
687          * controller one by one and check if that address is still
688          * in the list of pending connections or list of devices to
689          * report. If not present in either list, then queue the
690          * command to remove it from the controller.
691          */
692         list_for_each_entry(b, &hdev->le_white_list, list) {
693                 /* If the device is neither in pend_le_conns nor
694                  * pend_le_reports then remove it from the whitelist.
695                  */
696                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697                                                &b->bdaddr, b->bdaddr_type) &&
698                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699                                                &b->bdaddr, b->bdaddr_type)) {
700                         struct hci_cp_le_del_from_white_list cp;
701
702                         cp.bdaddr_type = b->bdaddr_type;
703                         bacpy(&cp.bdaddr, &b->bdaddr);
704
705                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706                                     sizeof(cp), &cp);
707                         continue;
708                 }
709
710                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711                         /* White list can not be used with RPAs */
712                         return 0x00;
713                 }
714
715                 white_list_entries++;
716         }
717
718         /* Since all no longer valid white list entries have been
719          * removed, walk through the list of pending connections
720          * and ensure that any new device gets programmed into
721          * the controller.
722          *
723          * If the list of the devices is larger than the list of
724          * available white list entries in the controller, then
725          * just abort and return filer policy value to not use the
726          * white list.
727          */
728         list_for_each_entry(params, &hdev->pend_le_conns, action) {
729                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730                                            &params->addr, params->addr_type))
731                         continue;
732
733                 if (white_list_entries >= hdev->le_white_list_size) {
734                         /* Select filter policy to accept all advertising */
735                         return 0x00;
736                 }
737
738                 if (hci_find_irk_by_addr(hdev, &params->addr,
739                                          params->addr_type)) {
740                         /* White list can not be used with RPAs */
741                         return 0x00;
742                 }
743
744                 white_list_entries++;
745                 add_to_white_list(req, params);
746         }
747
748         /* After adding all new pending connections, walk through
749          * the list of pending reports and also add these to the
750          * white list if there is still space.
751          */
752         list_for_each_entry(params, &hdev->pend_le_reports, action) {
753                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754                                            &params->addr, params->addr_type))
755                         continue;
756
757                 if (white_list_entries >= hdev->le_white_list_size) {
758                         /* Select filter policy to accept all advertising */
759                         return 0x00;
760                 }
761
762                 if (hci_find_irk_by_addr(hdev, &params->addr,
763                                          params->addr_type)) {
764                         /* White list can not be used with RPAs */
765                         return 0x00;
766                 }
767
768                 white_list_entries++;
769                 add_to_white_list(req, params);
770         }
771
772         /* Select filter policy to use white list */
773         return 0x01;
774 }
775
776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778         return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780
781 void hci_req_add_le_passive_scan(struct hci_request *req)
782 {
783         struct hci_cp_le_set_scan_param param_cp;
784         struct hci_cp_le_set_scan_enable enable_cp;
785         struct hci_dev *hdev = req->hdev;
786         u8 own_addr_type;
787         u8 filter_policy;
788
789         /* Set require_privacy to false since no SCAN_REQ are send
790          * during passive scanning. Not using an non-resolvable address
791          * here is important so that peer devices using direct
792          * advertising with our address will be correctly reported
793          * by the controller.
794          */
795         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
796                                       &own_addr_type))
797                 return;
798
799         /* Adding or removing entries from the white list must
800          * happen before enabling scanning. The controller does
801          * not allow white list modification while scanning.
802          */
803         filter_policy = update_white_list(req);
804
805         /* When the controller is using random resolvable addresses and
806          * with that having LE privacy enabled, then controllers with
807          * Extended Scanner Filter Policies support can now enable support
808          * for handling directed advertising.
809          *
810          * So instead of using filter polices 0x00 (no whitelist)
811          * and 0x01 (whitelist enabled) use the new filter policies
812          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
813          */
814         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
815             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
816                 filter_policy |= 0x02;
817
818         memset(&param_cp, 0, sizeof(param_cp));
819         param_cp.type = LE_SCAN_PASSIVE;
820         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
821         param_cp.window = cpu_to_le16(hdev->le_scan_window);
822         param_cp.own_address_type = own_addr_type;
823         param_cp.filter_policy = filter_policy;
824         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
825                     &param_cp);
826
827         memset(&enable_cp, 0, sizeof(enable_cp));
828         enable_cp.enable = LE_SCAN_ENABLE;
829         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
830         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
831                     &enable_cp);
832 }
833
834 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
835 {
836         u8 instance = hdev->cur_adv_instance;
837         struct adv_info *adv_instance;
838
839         /* Ignore instance 0 */
840         if (instance == 0x00)
841                 return 0;
842
843         adv_instance = hci_find_adv_instance(hdev, instance);
844         if (!adv_instance)
845                 return 0;
846
847         /* TODO: Take into account the "appearance" and "local-name" flags here.
848          * These are currently being ignored as they are not supported.
849          */
850         return adv_instance->scan_rsp_len;
851 }
852
853 void __hci_req_disable_advertising(struct hci_request *req)
854 {
855         u8 enable = 0x00;
856
857         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
860 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
861 {
862         u32 flags;
863         struct adv_info *adv_instance;
864
865         if (instance == 0x00) {
866                 /* Instance 0 always manages the "Tx Power" and "Flags"
867                  * fields
868                  */
869                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
870
871                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
872                  * corresponds to the "connectable" instance flag.
873                  */
874                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
875                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
876
877                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
879                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880                         flags |= MGMT_ADV_FLAG_DISCOV;
881
882                 return flags;
883         }
884
885         adv_instance = hci_find_adv_instance(hdev, instance);
886
887         /* Return 0 when we got an invalid instance identifier. */
888         if (!adv_instance)
889                 return 0;
890
891         return adv_instance->flags;
892 }
893
894 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
895 {
896         /* If privacy is not enabled don't use RPA */
897         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
898                 return false;
899
900         /* If basic privacy mode is enabled use RPA */
901         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
902                 return true;
903
904         /* If limited privacy mode is enabled don't use RPA if we're
905          * both discoverable and bondable.
906          */
907         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
908             hci_dev_test_flag(hdev, HCI_BONDABLE))
909                 return false;
910
911         /* We're neither bondable nor discoverable in the limited
912          * privacy mode, therefore use RPA.
913          */
914         return true;
915 }
916
917 void __hci_req_enable_advertising(struct hci_request *req)
918 {
919         struct hci_dev *hdev = req->hdev;
920         struct hci_cp_le_set_adv_param cp;
921         u8 own_addr_type, enable = 0x01;
922         bool connectable;
923         u32 flags;
924
925         if (hci_conn_num(hdev, LE_LINK) > 0)
926                 return;
927
928         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
929                 __hci_req_disable_advertising(req);
930
931         /* Clear the HCI_LE_ADV bit temporarily so that the
932          * hci_update_random_address knows that it's safe to go ahead
933          * and write a new random address. The flag will be set back on
934          * as soon as the SET_ADV_ENABLE HCI command completes.
935          */
936         hci_dev_clear_flag(hdev, HCI_LE_ADV);
937
938         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
939
940         /* If the "connectable" instance flag was not set, then choose between
941          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
942          */
943         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
944                       mgmt_get_connectable(hdev);
945
946         /* Set require_privacy to true only when non-connectable
947          * advertising is used. In that case it is fine to use a
948          * non-resolvable private address.
949          */
950         if (hci_update_random_address(req, !connectable,
951                                       adv_use_rpa(hdev, flags),
952                                       &own_addr_type) < 0)
953                 return;
954
955         memset(&cp, 0, sizeof(cp));
956         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
957         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
958
959         if (connectable)
960                 cp.type = LE_ADV_IND;
961         else if (get_cur_adv_instance_scan_rsp_len(hdev))
962                 cp.type = LE_ADV_SCAN_IND;
963         else
964                 cp.type = LE_ADV_NONCONN_IND;
965
966         cp.own_address_type = own_addr_type;
967         cp.channel_map = hdev->le_adv_channel_map;
968
969         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
974 static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
975 {
976         size_t complete_len;
977         size_t short_len;
978         int max_len;
979
980         max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
981         complete_len = strlen(hdev->dev_name);
982         short_len = strlen(hdev->short_name);
983
984         /* no space left for name */
985         if (max_len < 1)
986                 return ad_len;
987
988         /* no name set */
989         if (!complete_len)
990                 return ad_len;
991
992         /* complete name fits and is eq to max short name len or smaller */
993         if (complete_len <= max_len &&
994             complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
995                 ptr[0] = complete_len + 1;
996                 ptr[1] = EIR_NAME_COMPLETE;
997                 memcpy(ptr + 2, hdev->dev_name, complete_len);
998
999                 return ad_len + complete_len + 2;
1000         }
1001
1002         /* short name set and fits */
1003         if (short_len && short_len <= max_len) {
1004                 ptr[0] = short_len + 1;
1005                 ptr[1] = EIR_NAME_SHORT;
1006                 memcpy(ptr + 2, hdev->short_name, short_len);
1007
1008                 return ad_len + short_len + 2;
1009         }
1010
1011         /* no short name set so shorten complete name */
1012         if (!short_len) {
1013                 ptr[0] = max_len + 1;
1014                 ptr[1] = EIR_NAME_SHORT;
1015                 memcpy(ptr + 2, hdev->dev_name, max_len);
1016
1017                 return ad_len + max_len + 2;
1018         }
1019
1020         return ad_len;
1021 }
1022
1023 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1024 {
1025         u8 scan_rsp_len = 0;
1026
1027         if (hdev->appearance) {
1028                 ptr[0] = 3;
1029                 ptr[1] = EIR_APPEARANCE;
1030                 put_unaligned_le16(hdev->appearance, ptr + 2);
1031                 scan_rsp_len += 4;
1032         }
1033
1034         return append_local_name(hdev, ptr + scan_rsp_len, scan_rsp_len);
1035 }
1036
1037 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1038                                         u8 *ptr)
1039 {
1040         struct adv_info *adv_instance;
1041         u32 instance_flags;
1042         u8 scan_rsp_len = 0;
1043
1044         adv_instance = hci_find_adv_instance(hdev, instance);
1045         if (!adv_instance)
1046                 return 0;
1047
1048         instance_flags = adv_instance->flags;
1049
1050         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1051                 ptr[0] = 3;
1052                 ptr[1] = EIR_APPEARANCE;
1053                 put_unaligned_le16(hdev->appearance, ptr + 2);
1054                 scan_rsp_len += 4;
1055                 ptr += 4;
1056         }
1057
1058         memcpy(ptr, adv_instance->scan_rsp_data,
1059                adv_instance->scan_rsp_len);
1060
1061         scan_rsp_len += adv_instance->scan_rsp_len;
1062         ptr += adv_instance->scan_rsp_len;
1063
1064         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1065                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1066
1067         return scan_rsp_len;
1068 }
1069
1070 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1071 {
1072         struct hci_dev *hdev = req->hdev;
1073         struct hci_cp_le_set_scan_rsp_data cp;
1074         u8 len;
1075
1076         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1077                 return;
1078
1079         memset(&cp, 0, sizeof(cp));
1080
1081         if (instance)
1082                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1083         else
1084                 len = create_default_scan_rsp_data(hdev, cp.data);
1085
1086         if (hdev->scan_rsp_data_len == len &&
1087             !memcmp(cp.data, hdev->scan_rsp_data, len))
1088                 return;
1089
1090         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1091         hdev->scan_rsp_data_len = len;
1092
1093         cp.length = len;
1094
1095         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1096 }
1097
1098 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1099 {
1100         struct adv_info *adv_instance = NULL;
1101         u8 ad_len = 0, flags = 0;
1102         u32 instance_flags;
1103
1104         /* Return 0 when the current instance identifier is invalid. */
1105         if (instance) {
1106                 adv_instance = hci_find_adv_instance(hdev, instance);
1107                 if (!adv_instance)
1108                         return 0;
1109         }
1110
1111         instance_flags = get_adv_instance_flags(hdev, instance);
1112
1113         /* The Add Advertising command allows userspace to set both the general
1114          * and limited discoverable flags.
1115          */
1116         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1117                 flags |= LE_AD_GENERAL;
1118
1119         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1120                 flags |= LE_AD_LIMITED;
1121
1122         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1123                 flags |= LE_AD_NO_BREDR;
1124
1125         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1126                 /* If a discovery flag wasn't provided, simply use the global
1127                  * settings.
1128                  */
1129                 if (!flags)
1130                         flags |= mgmt_get_adv_discov_flags(hdev);
1131
1132                 /* If flags would still be empty, then there is no need to
1133                  * include the "Flags" AD field".
1134                  */
1135                 if (flags) {
1136                         ptr[0] = 0x02;
1137                         ptr[1] = EIR_FLAGS;
1138                         ptr[2] = flags;
1139
1140                         ad_len += 3;
1141                         ptr += 3;
1142                 }
1143         }
1144
1145         if (adv_instance) {
1146                 memcpy(ptr, adv_instance->adv_data,
1147                        adv_instance->adv_data_len);
1148                 ad_len += adv_instance->adv_data_len;
1149                 ptr += adv_instance->adv_data_len;
1150         }
1151
1152         /* Provide Tx Power only if we can provide a valid value for it */
1153         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1154             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1155                 ptr[0] = 0x02;
1156                 ptr[1] = EIR_TX_POWER;
1157                 ptr[2] = (u8)hdev->adv_tx_power;
1158
1159                 ad_len += 3;
1160                 ptr += 3;
1161         }
1162
1163         return ad_len;
1164 }
1165
1166 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1167 {
1168         struct hci_dev *hdev = req->hdev;
1169         struct hci_cp_le_set_adv_data cp;
1170         u8 len;
1171
1172         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1173                 return;
1174
1175         memset(&cp, 0, sizeof(cp));
1176
1177         len = create_instance_adv_data(hdev, instance, cp.data);
1178
1179         /* There's nothing to do if the data hasn't changed */
1180         if (hdev->adv_data_len == len &&
1181             memcmp(cp.data, hdev->adv_data, len) == 0)
1182                 return;
1183
1184         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1185         hdev->adv_data_len = len;
1186
1187         cp.length = len;
1188
1189         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1190 }
1191
1192 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1193 {
1194         struct hci_request req;
1195
1196         hci_req_init(&req, hdev);
1197         __hci_req_update_adv_data(&req, instance);
1198
1199         return hci_req_run(&req, NULL);
1200 }
1201
1202 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1203 {
1204         BT_DBG("%s status %u", hdev->name, status);
1205 }
1206
1207 void hci_req_reenable_advertising(struct hci_dev *hdev)
1208 {
1209         struct hci_request req;
1210
1211         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1212             list_empty(&hdev->adv_instances))
1213                 return;
1214
1215         hci_req_init(&req, hdev);
1216
1217         if (hdev->cur_adv_instance) {
1218                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1219                                                 true);
1220         } else {
1221                 __hci_req_update_adv_data(&req, 0x00);
1222                 __hci_req_update_scan_rsp_data(&req, 0x00);
1223                 __hci_req_enable_advertising(&req);
1224         }
1225
1226         hci_req_run(&req, adv_enable_complete);
1227 }
1228
1229 static void adv_timeout_expire(struct work_struct *work)
1230 {
1231         struct hci_dev *hdev = container_of(work, struct hci_dev,
1232                                             adv_instance_expire.work);
1233
1234         struct hci_request req;
1235         u8 instance;
1236
1237         BT_DBG("%s", hdev->name);
1238
1239         hci_dev_lock(hdev);
1240
1241         hdev->adv_instance_timeout = 0;
1242
1243         instance = hdev->cur_adv_instance;
1244         if (instance == 0x00)
1245                 goto unlock;
1246
1247         hci_req_init(&req, hdev);
1248
1249         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1250
1251         if (list_empty(&hdev->adv_instances))
1252                 __hci_req_disable_advertising(&req);
1253
1254         hci_req_run(&req, NULL);
1255
1256 unlock:
1257         hci_dev_unlock(hdev);
1258 }
1259
1260 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1261                                     bool force)
1262 {
1263         struct hci_dev *hdev = req->hdev;
1264         struct adv_info *adv_instance = NULL;
1265         u16 timeout;
1266
1267         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1268             list_empty(&hdev->adv_instances))
1269                 return -EPERM;
1270
1271         if (hdev->adv_instance_timeout)
1272                 return -EBUSY;
1273
1274         adv_instance = hci_find_adv_instance(hdev, instance);
1275         if (!adv_instance)
1276                 return -ENOENT;
1277
1278         /* A zero timeout means unlimited advertising. As long as there is
1279          * only one instance, duration should be ignored. We still set a timeout
1280          * in case further instances are being added later on.
1281          *
1282          * If the remaining lifetime of the instance is more than the duration
1283          * then the timeout corresponds to the duration, otherwise it will be
1284          * reduced to the remaining instance lifetime.
1285          */
1286         if (adv_instance->timeout == 0 ||
1287             adv_instance->duration <= adv_instance->remaining_time)
1288                 timeout = adv_instance->duration;
1289         else
1290                 timeout = adv_instance->remaining_time;
1291
1292         /* The remaining time is being reduced unless the instance is being
1293          * advertised without time limit.
1294          */
1295         if (adv_instance->timeout)
1296                 adv_instance->remaining_time =
1297                                 adv_instance->remaining_time - timeout;
1298
1299         hdev->adv_instance_timeout = timeout;
1300         queue_delayed_work(hdev->req_workqueue,
1301                            &hdev->adv_instance_expire,
1302                            msecs_to_jiffies(timeout * 1000));
1303
1304         /* If we're just re-scheduling the same instance again then do not
1305          * execute any HCI commands. This happens when a single instance is
1306          * being advertised.
1307          */
1308         if (!force && hdev->cur_adv_instance == instance &&
1309             hci_dev_test_flag(hdev, HCI_LE_ADV))
1310                 return 0;
1311
1312         hdev->cur_adv_instance = instance;
1313         __hci_req_update_adv_data(req, instance);
1314         __hci_req_update_scan_rsp_data(req, instance);
1315         __hci_req_enable_advertising(req);
1316
1317         return 0;
1318 }
1319
1320 static void cancel_adv_timeout(struct hci_dev *hdev)
1321 {
1322         if (hdev->adv_instance_timeout) {
1323                 hdev->adv_instance_timeout = 0;
1324                 cancel_delayed_work(&hdev->adv_instance_expire);
1325         }
1326 }
1327
1328 /* For a single instance:
1329  * - force == true: The instance will be removed even when its remaining
1330  *   lifetime is not zero.
1331  * - force == false: the instance will be deactivated but kept stored unless
1332  *   the remaining lifetime is zero.
1333  *
1334  * For instance == 0x00:
1335  * - force == true: All instances will be removed regardless of their timeout
1336  *   setting.
1337  * - force == false: Only instances that have a timeout will be removed.
1338  */
1339 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1340                                 struct hci_request *req, u8 instance,
1341                                 bool force)
1342 {
1343         struct adv_info *adv_instance, *n, *next_instance = NULL;
1344         int err;
1345         u8 rem_inst;
1346
1347         /* Cancel any timeout concerning the removed instance(s). */
1348         if (!instance || hdev->cur_adv_instance == instance)
1349                 cancel_adv_timeout(hdev);
1350
1351         /* Get the next instance to advertise BEFORE we remove
1352          * the current one. This can be the same instance again
1353          * if there is only one instance.
1354          */
1355         if (instance && hdev->cur_adv_instance == instance)
1356                 next_instance = hci_get_next_instance(hdev, instance);
1357
1358         if (instance == 0x00) {
1359                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1360                                          list) {
1361                         if (!(force || adv_instance->timeout))
1362                                 continue;
1363
1364                         rem_inst = adv_instance->instance;
1365                         err = hci_remove_adv_instance(hdev, rem_inst);
1366                         if (!err)
1367                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1368                 }
1369         } else {
1370                 adv_instance = hci_find_adv_instance(hdev, instance);
1371
1372                 if (force || (adv_instance && adv_instance->timeout &&
1373                               !adv_instance->remaining_time)) {
1374                         /* Don't advertise a removed instance. */
1375                         if (next_instance &&
1376                             next_instance->instance == instance)
1377                                 next_instance = NULL;
1378
1379                         err = hci_remove_adv_instance(hdev, instance);
1380                         if (!err)
1381                                 mgmt_advertising_removed(sk, hdev, instance);
1382                 }
1383         }
1384
1385         if (!req || !hdev_is_powered(hdev) ||
1386             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1387                 return;
1388
1389         if (next_instance)
1390                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1391                                                 false);
1392 }
1393
1394 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1395 {
1396         struct hci_dev *hdev = req->hdev;
1397
1398         /* If we're advertising or initiating an LE connection we can't
1399          * go ahead and change the random address at this time. This is
1400          * because the eventual initiator address used for the
1401          * subsequently created connection will be undefined (some
1402          * controllers use the new address and others the one we had
1403          * when the operation started).
1404          *
1405          * In this kind of scenario skip the update and let the random
1406          * address be updated at the next cycle.
1407          */
1408         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1409             hci_lookup_le_connect(hdev)) {
1410                 BT_DBG("Deferring random address update");
1411                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1412                 return;
1413         }
1414
1415         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1416 }
1417
1418 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1419                               bool use_rpa, u8 *own_addr_type)
1420 {
1421         struct hci_dev *hdev = req->hdev;
1422         int err;
1423
1424         /* If privacy is enabled use a resolvable private address. If
1425          * current RPA has expired or there is something else than
1426          * the current RPA in use, then generate a new one.
1427          */
1428         if (use_rpa) {
1429                 int to;
1430
1431                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1432
1433                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1434                     !bacmp(&hdev->random_addr, &hdev->rpa))
1435                         return 0;
1436
1437                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1438                 if (err < 0) {
1439                         BT_ERR("%s failed to generate new RPA", hdev->name);
1440                         return err;
1441                 }
1442
1443                 set_random_addr(req, &hdev->rpa);
1444
1445                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1446                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1447
1448                 return 0;
1449         }
1450
1451         /* In case of required privacy without resolvable private address,
1452          * use an non-resolvable private address. This is useful for active
1453          * scanning and non-connectable advertising.
1454          */
1455         if (require_privacy) {
1456                 bdaddr_t nrpa;
1457
1458                 while (true) {
1459                         /* The non-resolvable private address is generated
1460                          * from random six bytes with the two most significant
1461                          * bits cleared.
1462                          */
1463                         get_random_bytes(&nrpa, 6);
1464                         nrpa.b[5] &= 0x3f;
1465
1466                         /* The non-resolvable private address shall not be
1467                          * equal to the public address.
1468                          */
1469                         if (bacmp(&hdev->bdaddr, &nrpa))
1470                                 break;
1471                 }
1472
1473                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1474                 set_random_addr(req, &nrpa);
1475                 return 0;
1476         }
1477
1478         /* If forcing static address is in use or there is no public
1479          * address use the static address as random address (but skip
1480          * the HCI command if the current random address is already the
1481          * static one.
1482          *
1483          * In case BR/EDR has been disabled on a dual-mode controller
1484          * and a static address has been configured, then use that
1485          * address instead of the public BR/EDR address.
1486          */
1487         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1488             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1489             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1490              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1491                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1492                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1493                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1494                                     &hdev->static_addr);
1495                 return 0;
1496         }
1497
1498         /* Neither privacy nor static address is being used so use a
1499          * public address.
1500          */
1501         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1502
1503         return 0;
1504 }
1505
1506 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1507 {
1508         struct bdaddr_list *b;
1509
1510         list_for_each_entry(b, &hdev->whitelist, list) {
1511                 struct hci_conn *conn;
1512
1513                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1514                 if (!conn)
1515                         return true;
1516
1517                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1518                         return true;
1519         }
1520
1521         return false;
1522 }
1523
1524 void __hci_req_update_scan(struct hci_request *req)
1525 {
1526         struct hci_dev *hdev = req->hdev;
1527         u8 scan;
1528
1529         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1530                 return;
1531
1532         if (!hdev_is_powered(hdev))
1533                 return;
1534
1535         if (mgmt_powering_down(hdev))
1536                 return;
1537
1538         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1539             disconnected_whitelist_entries(hdev))
1540                 scan = SCAN_PAGE;
1541         else
1542                 scan = SCAN_DISABLED;
1543
1544         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1545                 scan |= SCAN_INQUIRY;
1546
1547         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1548             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1549                 return;
1550
1551         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1552 }
1553
1554 static int update_scan(struct hci_request *req, unsigned long opt)
1555 {
1556         hci_dev_lock(req->hdev);
1557         __hci_req_update_scan(req);
1558         hci_dev_unlock(req->hdev);
1559         return 0;
1560 }
1561
1562 static void scan_update_work(struct work_struct *work)
1563 {
1564         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1565
1566         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1567 }
1568
1569 static int connectable_update(struct hci_request *req, unsigned long opt)
1570 {
1571         struct hci_dev *hdev = req->hdev;
1572
1573         hci_dev_lock(hdev);
1574
1575         __hci_req_update_scan(req);
1576
1577         /* If BR/EDR is not enabled and we disable advertising as a
1578          * by-product of disabling connectable, we need to update the
1579          * advertising flags.
1580          */
1581         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1582                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1583
1584         /* Update the advertising parameters if necessary */
1585         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1586             !list_empty(&hdev->adv_instances))
1587                 __hci_req_enable_advertising(req);
1588
1589         __hci_update_background_scan(req);
1590
1591         hci_dev_unlock(hdev);
1592
1593         return 0;
1594 }
1595
1596 static void connectable_update_work(struct work_struct *work)
1597 {
1598         struct hci_dev *hdev = container_of(work, struct hci_dev,
1599                                             connectable_update);
1600         u8 status;
1601
1602         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1603         mgmt_set_connectable_complete(hdev, status);
1604 }
1605
1606 static u8 get_service_classes(struct hci_dev *hdev)
1607 {
1608         struct bt_uuid *uuid;
1609         u8 val = 0;
1610
1611         list_for_each_entry(uuid, &hdev->uuids, list)
1612                 val |= uuid->svc_hint;
1613
1614         return val;
1615 }
1616
1617 void __hci_req_update_class(struct hci_request *req)
1618 {
1619         struct hci_dev *hdev = req->hdev;
1620         u8 cod[3];
1621
1622         BT_DBG("%s", hdev->name);
1623
1624         if (!hdev_is_powered(hdev))
1625                 return;
1626
1627         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1628                 return;
1629
1630         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1631                 return;
1632
1633         cod[0] = hdev->minor_class;
1634         cod[1] = hdev->major_class;
1635         cod[2] = get_service_classes(hdev);
1636
1637         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1638                 cod[1] |= 0x20;
1639
1640         if (memcmp(cod, hdev->dev_class, 3) == 0)
1641                 return;
1642
1643         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1644 }
1645
1646 static void write_iac(struct hci_request *req)
1647 {
1648         struct hci_dev *hdev = req->hdev;
1649         struct hci_cp_write_current_iac_lap cp;
1650
1651         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1652                 return;
1653
1654         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1655                 /* Limited discoverable mode */
1656                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1657                 cp.iac_lap[0] = 0x00;   /* LIAC */
1658                 cp.iac_lap[1] = 0x8b;
1659                 cp.iac_lap[2] = 0x9e;
1660                 cp.iac_lap[3] = 0x33;   /* GIAC */
1661                 cp.iac_lap[4] = 0x8b;
1662                 cp.iac_lap[5] = 0x9e;
1663         } else {
1664                 /* General discoverable mode */
1665                 cp.num_iac = 1;
1666                 cp.iac_lap[0] = 0x33;   /* GIAC */
1667                 cp.iac_lap[1] = 0x8b;
1668                 cp.iac_lap[2] = 0x9e;
1669         }
1670
1671         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1672                     (cp.num_iac * 3) + 1, &cp);
1673 }
1674
1675 static int discoverable_update(struct hci_request *req, unsigned long opt)
1676 {
1677         struct hci_dev *hdev = req->hdev;
1678
1679         hci_dev_lock(hdev);
1680
1681         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1682                 write_iac(req);
1683                 __hci_req_update_scan(req);
1684                 __hci_req_update_class(req);
1685         }
1686
1687         /* Advertising instances don't use the global discoverable setting, so
1688          * only update AD if advertising was enabled using Set Advertising.
1689          */
1690         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1691                 __hci_req_update_adv_data(req, 0x00);
1692
1693                 /* Discoverable mode affects the local advertising
1694                  * address in limited privacy mode.
1695                  */
1696                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1697                         __hci_req_enable_advertising(req);
1698         }
1699
1700         hci_dev_unlock(hdev);
1701
1702         return 0;
1703 }
1704
1705 static void discoverable_update_work(struct work_struct *work)
1706 {
1707         struct hci_dev *hdev = container_of(work, struct hci_dev,
1708                                             discoverable_update);
1709         u8 status;
1710
1711         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1712         mgmt_set_discoverable_complete(hdev, status);
1713 }
1714
1715 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1716                       u8 reason)
1717 {
1718         switch (conn->state) {
1719         case BT_CONNECTED:
1720         case BT_CONFIG:
1721                 if (conn->type == AMP_LINK) {
1722                         struct hci_cp_disconn_phy_link cp;
1723
1724                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1725                         cp.reason = reason;
1726                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1727                                     &cp);
1728                 } else {
1729                         struct hci_cp_disconnect dc;
1730
1731                         dc.handle = cpu_to_le16(conn->handle);
1732                         dc.reason = reason;
1733                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1734                 }
1735
1736                 conn->state = BT_DISCONN;
1737
1738                 break;
1739         case BT_CONNECT:
1740                 if (conn->type == LE_LINK) {
1741                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1742                                 break;
1743                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1744                                     0, NULL);
1745                 } else if (conn->type == ACL_LINK) {
1746                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1747                                 break;
1748                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1749                                     6, &conn->dst);
1750                 }
1751                 break;
1752         case BT_CONNECT2:
1753                 if (conn->type == ACL_LINK) {
1754                         struct hci_cp_reject_conn_req rej;
1755
1756                         bacpy(&rej.bdaddr, &conn->dst);
1757                         rej.reason = reason;
1758
1759                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1760                                     sizeof(rej), &rej);
1761                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1762                         struct hci_cp_reject_sync_conn_req rej;
1763
1764                         bacpy(&rej.bdaddr, &conn->dst);
1765
1766                         /* SCO rejection has its own limited set of
1767                          * allowed error values (0x0D-0x0F) which isn't
1768                          * compatible with most values passed to this
1769                          * function. To be safe hard-code one of the
1770                          * values that's suitable for SCO.
1771                          */
1772                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1773
1774                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1775                                     sizeof(rej), &rej);
1776                 }
1777                 break;
1778         default:
1779                 conn->state = BT_CLOSED;
1780                 break;
1781         }
1782 }
1783
1784 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1785 {
1786         if (status)
1787                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1788 }
1789
1790 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1791 {
1792         struct hci_request req;
1793         int err;
1794
1795         hci_req_init(&req, conn->hdev);
1796
1797         __hci_abort_conn(&req, conn, reason);
1798
1799         err = hci_req_run(&req, abort_conn_complete);
1800         if (err && err != -ENODATA) {
1801                 BT_ERR("Failed to run HCI request: err %d", err);
1802                 return err;
1803         }
1804
1805         return 0;
1806 }
1807
1808 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1809 {
1810         hci_dev_lock(req->hdev);
1811         __hci_update_background_scan(req);
1812         hci_dev_unlock(req->hdev);
1813         return 0;
1814 }
1815
1816 static void bg_scan_update(struct work_struct *work)
1817 {
1818         struct hci_dev *hdev = container_of(work, struct hci_dev,
1819                                             bg_scan_update);
1820         struct hci_conn *conn;
1821         u8 status;
1822         int err;
1823
1824         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1825         if (!err)
1826                 return;
1827
1828         hci_dev_lock(hdev);
1829
1830         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1831         if (conn)
1832                 hci_le_conn_failed(conn, status);
1833
1834         hci_dev_unlock(hdev);
1835 }
1836
1837 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1838 {
1839         hci_req_add_le_scan_disable(req);
1840         return 0;
1841 }
1842
1843 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1844 {
1845         u8 length = opt;
1846         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1847         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1848         struct hci_cp_inquiry cp;
1849
1850         BT_DBG("%s", req->hdev->name);
1851
1852         hci_dev_lock(req->hdev);
1853         hci_inquiry_cache_flush(req->hdev);
1854         hci_dev_unlock(req->hdev);
1855
1856         memset(&cp, 0, sizeof(cp));
1857
1858         if (req->hdev->discovery.limited)
1859                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1860         else
1861                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1862
1863         cp.length = length;
1864
1865         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1866
1867         return 0;
1868 }
1869
1870 static void le_scan_disable_work(struct work_struct *work)
1871 {
1872         struct hci_dev *hdev = container_of(work, struct hci_dev,
1873                                             le_scan_disable.work);
1874         u8 status;
1875
1876         BT_DBG("%s", hdev->name);
1877
1878         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1879                 return;
1880
1881         cancel_delayed_work(&hdev->le_scan_restart);
1882
1883         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1884         if (status) {
1885                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1886                 return;
1887         }
1888
1889         hdev->discovery.scan_start = 0;
1890
1891         /* If we were running LE only scan, change discovery state. If
1892          * we were running both LE and BR/EDR inquiry simultaneously,
1893          * and BR/EDR inquiry is already finished, stop discovery,
1894          * otherwise BR/EDR inquiry will stop discovery when finished.
1895          * If we will resolve remote device name, do not change
1896          * discovery state.
1897          */
1898
1899         if (hdev->discovery.type == DISCOV_TYPE_LE)
1900                 goto discov_stopped;
1901
1902         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1903                 return;
1904
1905         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1906                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1907                     hdev->discovery.state != DISCOVERY_RESOLVING)
1908                         goto discov_stopped;
1909
1910                 return;
1911         }
1912
1913         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1914                      HCI_CMD_TIMEOUT, &status);
1915         if (status) {
1916                 BT_ERR("Inquiry failed: status 0x%02x", status);
1917                 goto discov_stopped;
1918         }
1919
1920         return;
1921
1922 discov_stopped:
1923         hci_dev_lock(hdev);
1924         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1925         hci_dev_unlock(hdev);
1926 }
1927
1928 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1929 {
1930         struct hci_dev *hdev = req->hdev;
1931         struct hci_cp_le_set_scan_enable cp;
1932
1933         /* If controller is not scanning we are done. */
1934         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1935                 return 0;
1936
1937         hci_req_add_le_scan_disable(req);
1938
1939         memset(&cp, 0, sizeof(cp));
1940         cp.enable = LE_SCAN_ENABLE;
1941         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1942         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1943
1944         return 0;
1945 }
1946
1947 static void le_scan_restart_work(struct work_struct *work)
1948 {
1949         struct hci_dev *hdev = container_of(work, struct hci_dev,
1950                                             le_scan_restart.work);
1951         unsigned long timeout, duration, scan_start, now;
1952         u8 status;
1953
1954         BT_DBG("%s", hdev->name);
1955
1956         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1957         if (status) {
1958                 BT_ERR("Failed to restart LE scan: status %d", status);
1959                 return;
1960         }
1961
1962         hci_dev_lock(hdev);
1963
1964         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1965             !hdev->discovery.scan_start)
1966                 goto unlock;
1967
1968         /* When the scan was started, hdev->le_scan_disable has been queued
1969          * after duration from scan_start. During scan restart this job
1970          * has been canceled, and we need to queue it again after proper
1971          * timeout, to make sure that scan does not run indefinitely.
1972          */
1973         duration = hdev->discovery.scan_duration;
1974         scan_start = hdev->discovery.scan_start;
1975         now = jiffies;
1976         if (now - scan_start <= duration) {
1977                 int elapsed;
1978
1979                 if (now >= scan_start)
1980                         elapsed = now - scan_start;
1981                 else
1982                         elapsed = ULONG_MAX - scan_start + now;
1983
1984                 timeout = duration - elapsed;
1985         } else {
1986                 timeout = 0;
1987         }
1988
1989         queue_delayed_work(hdev->req_workqueue,
1990                            &hdev->le_scan_disable, timeout);
1991
1992 unlock:
1993         hci_dev_unlock(hdev);
1994 }
1995
1996 static void disable_advertising(struct hci_request *req)
1997 {
1998         u8 enable = 0x00;
1999
2000         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
2001 }
2002
2003 static int active_scan(struct hci_request *req, unsigned long opt)
2004 {
2005         uint16_t interval = opt;
2006         struct hci_dev *hdev = req->hdev;
2007         struct hci_cp_le_set_scan_param param_cp;
2008         struct hci_cp_le_set_scan_enable enable_cp;
2009         u8 own_addr_type;
2010         int err;
2011
2012         BT_DBG("%s", hdev->name);
2013
2014         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2015                 hci_dev_lock(hdev);
2016
2017                 /* Don't let discovery abort an outgoing connection attempt
2018                  * that's using directed advertising.
2019                  */
2020                 if (hci_lookup_le_connect(hdev)) {
2021                         hci_dev_unlock(hdev);
2022                         return -EBUSY;
2023                 }
2024
2025                 cancel_adv_timeout(hdev);
2026                 hci_dev_unlock(hdev);
2027
2028                 disable_advertising(req);
2029         }
2030
2031         /* If controller is scanning, it means the background scanning is
2032          * running. Thus, we should temporarily stop it in order to set the
2033          * discovery scanning parameters.
2034          */
2035         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2036                 hci_req_add_le_scan_disable(req);
2037
2038         /* All active scans will be done with either a resolvable private
2039          * address (when privacy feature has been enabled) or non-resolvable
2040          * private address.
2041          */
2042         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2043                                         &own_addr_type);
2044         if (err < 0)
2045                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2046
2047         memset(&param_cp, 0, sizeof(param_cp));
2048         param_cp.type = LE_SCAN_ACTIVE;
2049         param_cp.interval = cpu_to_le16(interval);
2050         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2051         param_cp.own_address_type = own_addr_type;
2052
2053         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2054                     &param_cp);
2055
2056         memset(&enable_cp, 0, sizeof(enable_cp));
2057         enable_cp.enable = LE_SCAN_ENABLE;
2058         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2059
2060         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2061                     &enable_cp);
2062
2063         return 0;
2064 }
2065
2066 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2067 {
2068         int err;
2069
2070         BT_DBG("%s", req->hdev->name);
2071
2072         err = active_scan(req, opt);
2073         if (err)
2074                 return err;
2075
2076         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2077 }
2078
2079 static void start_discovery(struct hci_dev *hdev, u8 *status)
2080 {
2081         unsigned long timeout;
2082
2083         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2084
2085         switch (hdev->discovery.type) {
2086         case DISCOV_TYPE_BREDR:
2087                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2088                         hci_req_sync(hdev, bredr_inquiry,
2089                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2090                                      status);
2091                 return;
2092         case DISCOV_TYPE_INTERLEAVED:
2093                 /* When running simultaneous discovery, the LE scanning time
2094                  * should occupy the whole discovery time sine BR/EDR inquiry
2095                  * and LE scanning are scheduled by the controller.
2096                  *
2097                  * For interleaving discovery in comparison, BR/EDR inquiry
2098                  * and LE scanning are done sequentially with separate
2099                  * timeouts.
2100                  */
2101                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2102                              &hdev->quirks)) {
2103                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2104                         /* During simultaneous discovery, we double LE scan
2105                          * interval. We must leave some time for the controller
2106                          * to do BR/EDR inquiry.
2107                          */
2108                         hci_req_sync(hdev, interleaved_discov,
2109                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2110                                      status);
2111                         break;
2112                 }
2113
2114                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2115                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2116                              HCI_CMD_TIMEOUT, status);
2117                 break;
2118         case DISCOV_TYPE_LE:
2119                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2120                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2121                              HCI_CMD_TIMEOUT, status);
2122                 break;
2123         default:
2124                 *status = HCI_ERROR_UNSPECIFIED;
2125                 return;
2126         }
2127
2128         if (*status)
2129                 return;
2130
2131         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2132
2133         /* When service discovery is used and the controller has a
2134          * strict duplicate filter, it is important to remember the
2135          * start and duration of the scan. This is required for
2136          * restarting scanning during the discovery phase.
2137          */
2138         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2139                      hdev->discovery.result_filtering) {
2140                 hdev->discovery.scan_start = jiffies;
2141                 hdev->discovery.scan_duration = timeout;
2142         }
2143
2144         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2145                            timeout);
2146 }
2147
2148 bool hci_req_stop_discovery(struct hci_request *req)
2149 {
2150         struct hci_dev *hdev = req->hdev;
2151         struct discovery_state *d = &hdev->discovery;
2152         struct hci_cp_remote_name_req_cancel cp;
2153         struct inquiry_entry *e;
2154         bool ret = false;
2155
2156         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2157
2158         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2159                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2160                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2161
2162                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2163                         cancel_delayed_work(&hdev->le_scan_disable);
2164                         hci_req_add_le_scan_disable(req);
2165                 }
2166
2167                 ret = true;
2168         } else {
2169                 /* Passive scanning */
2170                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2171                         hci_req_add_le_scan_disable(req);
2172                         ret = true;
2173                 }
2174         }
2175
2176         /* No further actions needed for LE-only discovery */
2177         if (d->type == DISCOV_TYPE_LE)
2178                 return ret;
2179
2180         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2181                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2182                                                      NAME_PENDING);
2183                 if (!e)
2184                         return ret;
2185
2186                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2187                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2188                             &cp);
2189                 ret = true;
2190         }
2191
2192         return ret;
2193 }
2194
2195 static int stop_discovery(struct hci_request *req, unsigned long opt)
2196 {
2197         hci_dev_lock(req->hdev);
2198         hci_req_stop_discovery(req);
2199         hci_dev_unlock(req->hdev);
2200
2201         return 0;
2202 }
2203
2204 static void discov_update(struct work_struct *work)
2205 {
2206         struct hci_dev *hdev = container_of(work, struct hci_dev,
2207                                             discov_update);
2208         u8 status = 0;
2209
2210         switch (hdev->discovery.state) {
2211         case DISCOVERY_STARTING:
2212                 start_discovery(hdev, &status);
2213                 mgmt_start_discovery_complete(hdev, status);
2214                 if (status)
2215                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2216                 else
2217                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2218                 break;
2219         case DISCOVERY_STOPPING:
2220                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2221                 mgmt_stop_discovery_complete(hdev, status);
2222                 if (!status)
2223                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2224                 break;
2225         case DISCOVERY_STOPPED:
2226         default:
2227                 return;
2228         }
2229 }
2230
2231 static void discov_off(struct work_struct *work)
2232 {
2233         struct hci_dev *hdev = container_of(work, struct hci_dev,
2234                                             discov_off.work);
2235
2236         BT_DBG("%s", hdev->name);
2237
2238         hci_dev_lock(hdev);
2239
2240         /* When discoverable timeout triggers, then just make sure
2241          * the limited discoverable flag is cleared. Even in the case
2242          * of a timeout triggered from general discoverable, it is
2243          * safe to unconditionally clear the flag.
2244          */
2245         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2246         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2247         hdev->discov_timeout = 0;
2248
2249         hci_dev_unlock(hdev);
2250
2251         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2252         mgmt_new_settings(hdev);
2253 }
2254
2255 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2256 {
2257         struct hci_dev *hdev = req->hdev;
2258         u8 link_sec;
2259
2260         hci_dev_lock(hdev);
2261
2262         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2263             !lmp_host_ssp_capable(hdev)) {
2264                 u8 mode = 0x01;
2265
2266                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2267
2268                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2269                         u8 support = 0x01;
2270
2271                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2272                                     sizeof(support), &support);
2273                 }
2274         }
2275
2276         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2277             lmp_bredr_capable(hdev)) {
2278                 struct hci_cp_write_le_host_supported cp;
2279
2280                 cp.le = 0x01;
2281                 cp.simul = 0x00;
2282
2283                 /* Check first if we already have the right
2284                  * host state (host features set)
2285                  */
2286                 if (cp.le != lmp_host_le_capable(hdev) ||
2287                     cp.simul != lmp_host_le_br_capable(hdev))
2288                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2289                                     sizeof(cp), &cp);
2290         }
2291
2292         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2293                 /* Make sure the controller has a good default for
2294                  * advertising data. This also applies to the case
2295                  * where BR/EDR was toggled during the AUTO_OFF phase.
2296                  */
2297                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2298                     list_empty(&hdev->adv_instances)) {
2299                         __hci_req_update_adv_data(req, 0x00);
2300                         __hci_req_update_scan_rsp_data(req, 0x00);
2301
2302                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2303                                 __hci_req_enable_advertising(req);
2304                 } else if (!list_empty(&hdev->adv_instances)) {
2305                         struct adv_info *adv_instance;
2306
2307                         adv_instance = list_first_entry(&hdev->adv_instances,
2308                                                         struct adv_info, list);
2309                         __hci_req_schedule_adv_instance(req,
2310                                                         adv_instance->instance,
2311                                                         true);
2312                 }
2313         }
2314
2315         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2316         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2317                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2318                             sizeof(link_sec), &link_sec);
2319
2320         if (lmp_bredr_capable(hdev)) {
2321                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2322                         __hci_req_write_fast_connectable(req, true);
2323                 else
2324                         __hci_req_write_fast_connectable(req, false);
2325                 __hci_req_update_scan(req);
2326                 __hci_req_update_class(req);
2327                 __hci_req_update_name(req);
2328                 __hci_req_update_eir(req);
2329         }
2330
2331         hci_dev_unlock(hdev);
2332         return 0;
2333 }
2334
2335 int __hci_req_hci_power_on(struct hci_dev *hdev)
2336 {
2337         /* Register the available SMP channels (BR/EDR and LE) only when
2338          * successfully powering on the controller. This late
2339          * registration is required so that LE SMP can clearly decide if
2340          * the public address or static address is used.
2341          */
2342         smp_register(hdev);
2343
2344         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2345                               NULL);
2346 }
2347
2348 void hci_request_setup(struct hci_dev *hdev)
2349 {
2350         INIT_WORK(&hdev->discov_update, discov_update);
2351         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2352         INIT_WORK(&hdev->scan_update, scan_update_work);
2353         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2354         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2355         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2356         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2357         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2358         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2359 }
2360
2361 void hci_request_cancel_all(struct hci_dev *hdev)
2362 {
2363         hci_req_sync_cancel(hdev, ENODEV);
2364
2365         cancel_work_sync(&hdev->discov_update);
2366         cancel_work_sync(&hdev->bg_scan_update);
2367         cancel_work_sync(&hdev->scan_update);
2368         cancel_work_sync(&hdev->connectable_update);
2369         cancel_work_sync(&hdev->discoverable_update);
2370         cancel_delayed_work_sync(&hdev->discov_off);
2371         cancel_delayed_work_sync(&hdev->le_scan_disable);
2372         cancel_delayed_work_sync(&hdev->le_scan_restart);
2373
2374         if (hdev->adv_instance_timeout) {
2375                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2376                 hdev->adv_instance_timeout = 0;
2377         }
2378 }