Bluetooth: Use extra variable to make code more readable
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 #define LE_SUSPEND_SCAN_WINDOW          0x0012
38 #define LE_SUSPEND_SCAN_INTERVAL        0x0060
39
40 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
41 {
42         skb_queue_head_init(&req->cmd_q);
43         req->hdev = hdev;
44         req->err = 0;
45 }
46
47 void hci_req_purge(struct hci_request *req)
48 {
49         skb_queue_purge(&req->cmd_q);
50 }
51
52 bool hci_req_status_pend(struct hci_dev *hdev)
53 {
54         return hdev->req_status == HCI_REQ_PEND;
55 }
56
57 static int req_run(struct hci_request *req, hci_req_complete_t complete,
58                    hci_req_complete_skb_t complete_skb)
59 {
60         struct hci_dev *hdev = req->hdev;
61         struct sk_buff *skb;
62         unsigned long flags;
63
64         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
65
66         /* If an error occurred during request building, remove all HCI
67          * commands queued on the HCI request queue.
68          */
69         if (req->err) {
70                 skb_queue_purge(&req->cmd_q);
71                 return req->err;
72         }
73
74         /* Do not allow empty requests */
75         if (skb_queue_empty(&req->cmd_q))
76                 return -ENODATA;
77
78         skb = skb_peek_tail(&req->cmd_q);
79         if (complete) {
80                 bt_cb(skb)->hci.req_complete = complete;
81         } else if (complete_skb) {
82                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
83                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84         }
85
86         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
87         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
88         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
89
90         queue_work(hdev->workqueue, &hdev->cmd_work);
91
92         return 0;
93 }
94
95 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
96 {
97         return req_run(req, complete, NULL);
98 }
99
100 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
101 {
102         return req_run(req, NULL, complete);
103 }
104
105 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106                                   struct sk_buff *skb)
107 {
108         BT_DBG("%s result 0x%2.2x", hdev->name, result);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = result;
112                 hdev->req_status = HCI_REQ_DONE;
113                 if (skb)
114                         hdev->req_skb = skb_get(skb);
115                 wake_up_interruptible(&hdev->req_wait_q);
116         }
117 }
118
119 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
120 {
121         BT_DBG("%s err 0x%2.2x", hdev->name, err);
122
123         if (hdev->req_status == HCI_REQ_PEND) {
124                 hdev->req_result = err;
125                 hdev->req_status = HCI_REQ_CANCELED;
126                 wake_up_interruptible(&hdev->req_wait_q);
127         }
128 }
129
130 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
131                                   const void *param, u8 event, u32 timeout)
132 {
133         struct hci_request req;
134         struct sk_buff *skb;
135         int err = 0;
136
137         BT_DBG("%s", hdev->name);
138
139         hci_req_init(&req, hdev);
140
141         hci_req_add_ev(&req, opcode, plen, param, event);
142
143         hdev->req_status = HCI_REQ_PEND;
144
145         err = hci_req_run_skb(&req, hci_req_sync_complete);
146         if (err < 0)
147                 return ERR_PTR(err);
148
149         err = wait_event_interruptible_timeout(hdev->req_wait_q,
150                         hdev->req_status != HCI_REQ_PEND, timeout);
151
152         if (err == -ERESTARTSYS)
153                 return ERR_PTR(-EINTR);
154
155         switch (hdev->req_status) {
156         case HCI_REQ_DONE:
157                 err = -bt_to_errno(hdev->req_result);
158                 break;
159
160         case HCI_REQ_CANCELED:
161                 err = -hdev->req_result;
162                 break;
163
164         default:
165                 err = -ETIMEDOUT;
166                 break;
167         }
168
169         hdev->req_status = hdev->req_result = 0;
170         skb = hdev->req_skb;
171         hdev->req_skb = NULL;
172
173         BT_DBG("%s end: err %d", hdev->name, err);
174
175         if (err < 0) {
176                 kfree_skb(skb);
177                 return ERR_PTR(err);
178         }
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         return skb;
184 }
185 EXPORT_SYMBOL(__hci_cmd_sync_ev);
186
187 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
188                                const void *param, u32 timeout)
189 {
190         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
191 }
192 EXPORT_SYMBOL(__hci_cmd_sync);
193
194 /* Execute request and wait for completion. */
195 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
196                                                      unsigned long opt),
197                    unsigned long opt, u32 timeout, u8 *hci_status)
198 {
199         struct hci_request req;
200         int err = 0;
201
202         BT_DBG("%s start", hdev->name);
203
204         hci_req_init(&req, hdev);
205
206         hdev->req_status = HCI_REQ_PEND;
207
208         err = func(&req, opt);
209         if (err) {
210                 if (hci_status)
211                         *hci_status = HCI_ERROR_UNSPECIFIED;
212                 return err;
213         }
214
215         err = hci_req_run_skb(&req, hci_req_sync_complete);
216         if (err < 0) {
217                 hdev->req_status = 0;
218
219                 /* ENODATA means the HCI request command queue is empty.
220                  * This can happen when a request with conditionals doesn't
221                  * trigger any commands to be sent. This is normal behavior
222                  * and should not trigger an error return.
223                  */
224                 if (err == -ENODATA) {
225                         if (hci_status)
226                                 *hci_status = 0;
227                         return 0;
228                 }
229
230                 if (hci_status)
231                         *hci_status = HCI_ERROR_UNSPECIFIED;
232
233                 return err;
234         }
235
236         err = wait_event_interruptible_timeout(hdev->req_wait_q,
237                         hdev->req_status != HCI_REQ_PEND, timeout);
238
239         if (err == -ERESTARTSYS)
240                 return -EINTR;
241
242         switch (hdev->req_status) {
243         case HCI_REQ_DONE:
244                 err = -bt_to_errno(hdev->req_result);
245                 if (hci_status)
246                         *hci_status = hdev->req_result;
247                 break;
248
249         case HCI_REQ_CANCELED:
250                 err = -hdev->req_result;
251                 if (hci_status)
252                         *hci_status = HCI_ERROR_UNSPECIFIED;
253                 break;
254
255         default:
256                 err = -ETIMEDOUT;
257                 if (hci_status)
258                         *hci_status = HCI_ERROR_UNSPECIFIED;
259                 break;
260         }
261
262         kfree_skb(hdev->req_skb);
263         hdev->req_skb = NULL;
264         hdev->req_status = hdev->req_result = 0;
265
266         BT_DBG("%s end: err %d", hdev->name, err);
267
268         return err;
269 }
270
271 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
272                                                   unsigned long opt),
273                  unsigned long opt, u32 timeout, u8 *hci_status)
274 {
275         int ret;
276
277         if (!test_bit(HCI_UP, &hdev->flags))
278                 return -ENETDOWN;
279
280         /* Serialize all requests */
281         hci_req_sync_lock(hdev);
282         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283         hci_req_sync_unlock(hdev);
284
285         return ret;
286 }
287
288 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
289                                 const void *param)
290 {
291         int len = HCI_COMMAND_HDR_SIZE + plen;
292         struct hci_command_hdr *hdr;
293         struct sk_buff *skb;
294
295         skb = bt_skb_alloc(len, GFP_ATOMIC);
296         if (!skb)
297                 return NULL;
298
299         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
300         hdr->opcode = cpu_to_le16(opcode);
301         hdr->plen   = plen;
302
303         if (plen)
304                 skb_put_data(skb, param, plen);
305
306         BT_DBG("skb len %d", skb->len);
307
308         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
309         hci_skb_opcode(skb) = opcode;
310
311         return skb;
312 }
313
314 /* Queue a command to an asynchronous HCI request */
315 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
316                     const void *param, u8 event)
317 {
318         struct hci_dev *hdev = req->hdev;
319         struct sk_buff *skb;
320
321         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
322
323         /* If an error occurred during request building, there is no point in
324          * queueing the HCI command. We can simply return.
325          */
326         if (req->err)
327                 return;
328
329         skb = hci_prepare_cmd(hdev, opcode, plen, param);
330         if (!skb) {
331                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
332                            opcode);
333                 req->err = -ENOMEM;
334                 return;
335         }
336
337         if (skb_queue_empty(&req->cmd_q))
338                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
339
340         bt_cb(skb)->hci.req_event = event;
341
342         skb_queue_tail(&req->cmd_q, skb);
343 }
344
345 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
346                  const void *param)
347 {
348         hci_req_add_ev(req, opcode, plen, param, 0);
349 }
350
351 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
352 {
353         struct hci_dev *hdev = req->hdev;
354         struct hci_cp_write_page_scan_activity acp;
355         u8 type;
356
357         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
358                 return;
359
360         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
361                 return;
362
363         if (enable) {
364                 type = PAGE_SCAN_TYPE_INTERLACED;
365
366                 /* 160 msec page scan interval */
367                 acp.interval = cpu_to_le16(0x0100);
368         } else {
369                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
370
371                 /* default 1.28 sec page scan */
372                 acp.interval = cpu_to_le16(0x0800);
373         }
374
375         acp.window = cpu_to_le16(0x0012);
376
377         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378             __cpu_to_le16(hdev->page_scan_window) != acp.window)
379                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
380                             sizeof(acp), &acp);
381
382         if (hdev->page_scan_type != type)
383                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
384 }
385
386 /* This function controls the background scanning based on hdev->pend_le_conns
387  * list. If there are pending LE connection we start the background scanning,
388  * otherwise we stop it.
389  *
390  * This function requires the caller holds hdev->lock.
391  */
392 static void __hci_update_background_scan(struct hci_request *req)
393 {
394         struct hci_dev *hdev = req->hdev;
395
396         if (!test_bit(HCI_UP, &hdev->flags) ||
397             test_bit(HCI_INIT, &hdev->flags) ||
398             hci_dev_test_flag(hdev, HCI_SETUP) ||
399             hci_dev_test_flag(hdev, HCI_CONFIG) ||
400             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
401             hci_dev_test_flag(hdev, HCI_UNREGISTER))
402                 return;
403
404         /* No point in doing scanning if LE support hasn't been enabled */
405         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
406                 return;
407
408         /* If discovery is active don't interfere with it */
409         if (hdev->discovery.state != DISCOVERY_STOPPED)
410                 return;
411
412         /* Reset RSSI and UUID filters when starting background scanning
413          * since these filters are meant for service discovery only.
414          *
415          * The Start Discovery and Start Service Discovery operations
416          * ensure to set proper values for RSSI threshold and UUID
417          * filter list. So it is safe to just reset them here.
418          */
419         hci_discovery_filter_clear(hdev);
420
421         if (list_empty(&hdev->pend_le_conns) &&
422             list_empty(&hdev->pend_le_reports)) {
423                 /* If there is no pending LE connections or devices
424                  * to be scanned for, we should stop the background
425                  * scanning.
426                  */
427
428                 /* If controller is not scanning we are done. */
429                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
430                         return;
431
432                 hci_req_add_le_scan_disable(req);
433
434                 BT_DBG("%s stopping background scanning", hdev->name);
435         } else {
436                 /* If there is at least one pending LE connection, we should
437                  * keep the background scan running.
438                  */
439
440                 /* If controller is connecting, we should not start scanning
441                  * since some controllers are not able to scan and connect at
442                  * the same time.
443                  */
444                 if (hci_lookup_le_connect(hdev))
445                         return;
446
447                 /* If controller is currently scanning, we stop it to ensure we
448                  * don't miss any advertising (due to duplicates filter).
449                  */
450                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
451                         hci_req_add_le_scan_disable(req);
452
453                 hci_req_add_le_passive_scan(req);
454
455                 BT_DBG("%s starting background scanning", hdev->name);
456         }
457 }
458
459 void __hci_req_update_name(struct hci_request *req)
460 {
461         struct hci_dev *hdev = req->hdev;
462         struct hci_cp_write_local_name cp;
463
464         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
465
466         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
467 }
468
469 #define PNP_INFO_SVCLASS_ID             0x1200
470
471 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
472 {
473         u8 *ptr = data, *uuids_start = NULL;
474         struct bt_uuid *uuid;
475
476         if (len < 4)
477                 return ptr;
478
479         list_for_each_entry(uuid, &hdev->uuids, list) {
480                 u16 uuid16;
481
482                 if (uuid->size != 16)
483                         continue;
484
485                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
486                 if (uuid16 < 0x1100)
487                         continue;
488
489                 if (uuid16 == PNP_INFO_SVCLASS_ID)
490                         continue;
491
492                 if (!uuids_start) {
493                         uuids_start = ptr;
494                         uuids_start[0] = 1;
495                         uuids_start[1] = EIR_UUID16_ALL;
496                         ptr += 2;
497                 }
498
499                 /* Stop if not enough space to put next UUID */
500                 if ((ptr - data) + sizeof(u16) > len) {
501                         uuids_start[1] = EIR_UUID16_SOME;
502                         break;
503                 }
504
505                 *ptr++ = (uuid16 & 0x00ff);
506                 *ptr++ = (uuid16 & 0xff00) >> 8;
507                 uuids_start[0] += sizeof(uuid16);
508         }
509
510         return ptr;
511 }
512
513 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514 {
515         u8 *ptr = data, *uuids_start = NULL;
516         struct bt_uuid *uuid;
517
518         if (len < 6)
519                 return ptr;
520
521         list_for_each_entry(uuid, &hdev->uuids, list) {
522                 if (uuid->size != 32)
523                         continue;
524
525                 if (!uuids_start) {
526                         uuids_start = ptr;
527                         uuids_start[0] = 1;
528                         uuids_start[1] = EIR_UUID32_ALL;
529                         ptr += 2;
530                 }
531
532                 /* Stop if not enough space to put next UUID */
533                 if ((ptr - data) + sizeof(u32) > len) {
534                         uuids_start[1] = EIR_UUID32_SOME;
535                         break;
536                 }
537
538                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
539                 ptr += sizeof(u32);
540                 uuids_start[0] += sizeof(u32);
541         }
542
543         return ptr;
544 }
545
546 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
547 {
548         u8 *ptr = data, *uuids_start = NULL;
549         struct bt_uuid *uuid;
550
551         if (len < 18)
552                 return ptr;
553
554         list_for_each_entry(uuid, &hdev->uuids, list) {
555                 if (uuid->size != 128)
556                         continue;
557
558                 if (!uuids_start) {
559                         uuids_start = ptr;
560                         uuids_start[0] = 1;
561                         uuids_start[1] = EIR_UUID128_ALL;
562                         ptr += 2;
563                 }
564
565                 /* Stop if not enough space to put next UUID */
566                 if ((ptr - data) + 16 > len) {
567                         uuids_start[1] = EIR_UUID128_SOME;
568                         break;
569                 }
570
571                 memcpy(ptr, uuid->uuid, 16);
572                 ptr += 16;
573                 uuids_start[0] += 16;
574         }
575
576         return ptr;
577 }
578
579 static void create_eir(struct hci_dev *hdev, u8 *data)
580 {
581         u8 *ptr = data;
582         size_t name_len;
583
584         name_len = strlen(hdev->dev_name);
585
586         if (name_len > 0) {
587                 /* EIR Data type */
588                 if (name_len > 48) {
589                         name_len = 48;
590                         ptr[1] = EIR_NAME_SHORT;
591                 } else
592                         ptr[1] = EIR_NAME_COMPLETE;
593
594                 /* EIR Data length */
595                 ptr[0] = name_len + 1;
596
597                 memcpy(ptr + 2, hdev->dev_name, name_len);
598
599                 ptr += (name_len + 2);
600         }
601
602         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
603                 ptr[0] = 2;
604                 ptr[1] = EIR_TX_POWER;
605                 ptr[2] = (u8) hdev->inq_tx_power;
606
607                 ptr += 3;
608         }
609
610         if (hdev->devid_source > 0) {
611                 ptr[0] = 9;
612                 ptr[1] = EIR_DEVICE_ID;
613
614                 put_unaligned_le16(hdev->devid_source, ptr + 2);
615                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
616                 put_unaligned_le16(hdev->devid_product, ptr + 6);
617                 put_unaligned_le16(hdev->devid_version, ptr + 8);
618
619                 ptr += 10;
620         }
621
622         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 }
626
627 void __hci_req_update_eir(struct hci_request *req)
628 {
629         struct hci_dev *hdev = req->hdev;
630         struct hci_cp_write_eir cp;
631
632         if (!hdev_is_powered(hdev))
633                 return;
634
635         if (!lmp_ext_inq_capable(hdev))
636                 return;
637
638         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
639                 return;
640
641         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
642                 return;
643
644         memset(&cp, 0, sizeof(cp));
645
646         create_eir(hdev, cp.data);
647
648         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
649                 return;
650
651         memcpy(hdev->eir, cp.data, sizeof(cp.data));
652
653         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
654 }
655
656 void hci_req_add_le_scan_disable(struct hci_request *req)
657 {
658         struct hci_dev *hdev = req->hdev;
659
660         if (hdev->scanning_paused) {
661                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
662                 return;
663         }
664
665         if (use_ext_scan(hdev)) {
666                 struct hci_cp_le_set_ext_scan_enable cp;
667
668                 memset(&cp, 0, sizeof(cp));
669                 cp.enable = LE_SCAN_DISABLE;
670                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
671                             &cp);
672         } else {
673                 struct hci_cp_le_set_scan_enable cp;
674
675                 memset(&cp, 0, sizeof(cp));
676                 cp.enable = LE_SCAN_DISABLE;
677                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
678         }
679 }
680
681 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
682                                 u8 bdaddr_type)
683 {
684         struct hci_cp_le_del_from_white_list cp;
685
686         cp.bdaddr_type = bdaddr_type;
687         bacpy(&cp.bdaddr, bdaddr);
688
689         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
690                    cp.bdaddr_type);
691         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
692 }
693
694 /* Adds connection to white list if needed. On error, returns -1. */
695 static int add_to_white_list(struct hci_request *req,
696                              struct hci_conn_params *params, u8 *num_entries,
697                              bool allow_rpa)
698 {
699         struct hci_cp_le_add_to_white_list cp;
700         struct hci_dev *hdev = req->hdev;
701
702         /* Already in white list */
703         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
704                                    params->addr_type))
705                 return 0;
706
707         /* Select filter policy to accept all advertising */
708         if (*num_entries >= hdev->le_white_list_size)
709                 return -1;
710
711         /* White list can not be used with RPAs */
712         if (!allow_rpa &&
713             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
714                 return -1;
715         }
716
717         /* During suspend, only wakeable devices can be in whitelist */
718         if (hdev->suspended && !params->wakeable)
719                 return 0;
720
721         *num_entries += 1;
722         cp.bdaddr_type = params->addr_type;
723         bacpy(&cp.bdaddr, &params->addr);
724
725         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
726                    cp.bdaddr_type);
727         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
728
729         return 0;
730 }
731
732 static u8 update_white_list(struct hci_request *req)
733 {
734         struct hci_dev *hdev = req->hdev;
735         struct hci_conn_params *params;
736         struct bdaddr_list *b;
737         u8 num_entries = 0;
738         bool pend_conn, pend_report;
739         /* We allow whitelisting even with RPAs in suspend. In the worst case,
740          * we won't be able to wake from devices that use the privacy1.2
741          * features. Additionally, once we support privacy1.2 and IRK
742          * offloading, we can update this to also check for those conditions.
743          */
744         bool allow_rpa = hdev->suspended;
745
746         /* Go through the current white list programmed into the
747          * controller one by one and check if that address is still
748          * in the list of pending connections or list of devices to
749          * report. If not present in either list, then queue the
750          * command to remove it from the controller.
751          */
752         list_for_each_entry(b, &hdev->le_white_list, list) {
753                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
754                                                       &b->bdaddr,
755                                                       b->bdaddr_type);
756                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
757                                                         &b->bdaddr,
758                                                         b->bdaddr_type);
759
760                 /* If the device is not likely to connect or report,
761                  * remove it from the whitelist.
762                  */
763                 if (!pend_conn && !pend_report) {
764                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
765                         continue;
766                 }
767
768                 /* White list can not be used with RPAs */
769                 if (!allow_rpa &&
770                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
771                         return 0x00;
772                 }
773
774                 num_entries++;
775         }
776
777         /* Since all no longer valid white list entries have been
778          * removed, walk through the list of pending connections
779          * and ensure that any new device gets programmed into
780          * the controller.
781          *
782          * If the list of the devices is larger than the list of
783          * available white list entries in the controller, then
784          * just abort and return filer policy value to not use the
785          * white list.
786          */
787         list_for_each_entry(params, &hdev->pend_le_conns, action) {
788                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
789                         return 0x00;
790         }
791
792         /* After adding all new pending connections, walk through
793          * the list of pending reports and also add these to the
794          * white list if there is still space. Abort if space runs out.
795          */
796         list_for_each_entry(params, &hdev->pend_le_reports, action) {
797                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
798                         return 0x00;
799         }
800
801         /* Select filter policy to use white list */
802         return 0x01;
803 }
804
805 static bool scan_use_rpa(struct hci_dev *hdev)
806 {
807         return hci_dev_test_flag(hdev, HCI_PRIVACY);
808 }
809
810 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
811                                u16 window, u8 own_addr_type, u8 filter_policy)
812 {
813         struct hci_dev *hdev = req->hdev;
814
815         /* Use ext scanning if set ext scan param and ext scan enable is
816          * supported
817          */
818         if (use_ext_scan(hdev)) {
819                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
820                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
821                 struct hci_cp_le_scan_phy_params *phy_params;
822                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
823                 u32 plen;
824
825                 ext_param_cp = (void *)data;
826                 phy_params = (void *)ext_param_cp->data;
827
828                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
829                 ext_param_cp->own_addr_type = own_addr_type;
830                 ext_param_cp->filter_policy = filter_policy;
831
832                 plen = sizeof(*ext_param_cp);
833
834                 if (scan_1m(hdev) || scan_2m(hdev)) {
835                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
836
837                         memset(phy_params, 0, sizeof(*phy_params));
838                         phy_params->type = type;
839                         phy_params->interval = cpu_to_le16(interval);
840                         phy_params->window = cpu_to_le16(window);
841
842                         plen += sizeof(*phy_params);
843                         phy_params++;
844                 }
845
846                 if (scan_coded(hdev)) {
847                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
848
849                         memset(phy_params, 0, sizeof(*phy_params));
850                         phy_params->type = type;
851                         phy_params->interval = cpu_to_le16(interval);
852                         phy_params->window = cpu_to_le16(window);
853
854                         plen += sizeof(*phy_params);
855                         phy_params++;
856                 }
857
858                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
859                             plen, ext_param_cp);
860
861                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
862                 ext_enable_cp.enable = LE_SCAN_ENABLE;
863                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
864
865                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
866                             sizeof(ext_enable_cp), &ext_enable_cp);
867         } else {
868                 struct hci_cp_le_set_scan_param param_cp;
869                 struct hci_cp_le_set_scan_enable enable_cp;
870
871                 memset(&param_cp, 0, sizeof(param_cp));
872                 param_cp.type = type;
873                 param_cp.interval = cpu_to_le16(interval);
874                 param_cp.window = cpu_to_le16(window);
875                 param_cp.own_address_type = own_addr_type;
876                 param_cp.filter_policy = filter_policy;
877                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
878                             &param_cp);
879
880                 memset(&enable_cp, 0, sizeof(enable_cp));
881                 enable_cp.enable = LE_SCAN_ENABLE;
882                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
883                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
884                             &enable_cp);
885         }
886 }
887
888 void hci_req_add_le_passive_scan(struct hci_request *req)
889 {
890         struct hci_dev *hdev = req->hdev;
891         u8 own_addr_type;
892         u8 filter_policy;
893         u8 window, interval;
894
895         if (hdev->scanning_paused) {
896                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
897                 return;
898         }
899
900         /* Set require_privacy to false since no SCAN_REQ are send
901          * during passive scanning. Not using an non-resolvable address
902          * here is important so that peer devices using direct
903          * advertising with our address will be correctly reported
904          * by the controller.
905          */
906         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
907                                       &own_addr_type))
908                 return;
909
910         /* Adding or removing entries from the white list must
911          * happen before enabling scanning. The controller does
912          * not allow white list modification while scanning.
913          */
914         filter_policy = update_white_list(req);
915
916         /* When the controller is using random resolvable addresses and
917          * with that having LE privacy enabled, then controllers with
918          * Extended Scanner Filter Policies support can now enable support
919          * for handling directed advertising.
920          *
921          * So instead of using filter polices 0x00 (no whitelist)
922          * and 0x01 (whitelist enabled) use the new filter policies
923          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
924          */
925         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
926             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
927                 filter_policy |= 0x02;
928
929         if (hdev->suspended) {
930                 window = LE_SUSPEND_SCAN_WINDOW;
931                 interval = LE_SUSPEND_SCAN_INTERVAL;
932         } else {
933                 window = hdev->le_scan_window;
934                 interval = hdev->le_scan_interval;
935         }
936
937         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
938         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
939                            own_addr_type, filter_policy);
940 }
941
942 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
943 {
944         struct adv_info *adv_instance;
945
946         /* Instance 0x00 always set local name */
947         if (instance == 0x00)
948                 return 1;
949
950         adv_instance = hci_find_adv_instance(hdev, instance);
951         if (!adv_instance)
952                 return 0;
953
954         /* TODO: Take into account the "appearance" and "local-name" flags here.
955          * These are currently being ignored as they are not supported.
956          */
957         return adv_instance->scan_rsp_len;
958 }
959
960 static void hci_req_clear_event_filter(struct hci_request *req)
961 {
962         struct hci_cp_set_event_filter f;
963
964         memset(&f, 0, sizeof(f));
965         f.flt_type = HCI_FLT_CLEAR_ALL;
966         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
967
968         /* Update page scan state (since we may have modified it when setting
969          * the event filter).
970          */
971         __hci_req_update_scan(req);
972 }
973
974 static void hci_req_set_event_filter(struct hci_request *req)
975 {
976         struct bdaddr_list *b;
977         struct hci_cp_set_event_filter f;
978         struct hci_dev *hdev = req->hdev;
979         u8 scan;
980
981         /* Always clear event filter when starting */
982         hci_req_clear_event_filter(req);
983
984         list_for_each_entry(b, &hdev->wakeable, list) {
985                 memset(&f, 0, sizeof(f));
986                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
987                 f.flt_type = HCI_FLT_CONN_SETUP;
988                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
989                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
990
991                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
992                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
993         }
994
995         scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED;
996         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
997 }
998
999 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1000 {
1001         /* Can't change params without disabling first */
1002         hci_req_add_le_scan_disable(req);
1003
1004         /* Configure params and enable scanning */
1005         hci_req_add_le_passive_scan(req);
1006
1007         /* Block suspend notifier on response */
1008         set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1009 }
1010
1011 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1012 {
1013         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1014                    status);
1015         if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1016             test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1017                 wake_up(&hdev->suspend_wait_q);
1018         }
1019 }
1020
1021 /* Call with hci_dev_lock */
1022 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1023 {
1024         int old_state;
1025         struct hci_conn *conn;
1026         struct hci_request req;
1027         u8 page_scan;
1028         int disconnect_counter;
1029
1030         if (next == hdev->suspend_state) {
1031                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1032                 goto done;
1033         }
1034
1035         hdev->suspend_state = next;
1036         hci_req_init(&req, hdev);
1037
1038         if (next == BT_SUSPEND_DISCONNECT) {
1039                 /* Mark device as suspended */
1040                 hdev->suspended = true;
1041
1042                 /* Pause discovery if not already stopped */
1043                 old_state = hdev->discovery.state;
1044                 if (old_state != DISCOVERY_STOPPED) {
1045                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1046                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1047                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1048                 }
1049
1050                 hdev->discovery_paused = true;
1051                 hdev->discovery_old_state = old_state;
1052
1053                 /* Stop advertising */
1054                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1055                 if (old_state) {
1056                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1057                         cancel_delayed_work(&hdev->discov_off);
1058                         queue_delayed_work(hdev->req_workqueue,
1059                                            &hdev->discov_off, 0);
1060                 }
1061
1062                 hdev->advertising_paused = true;
1063                 hdev->advertising_old_state = old_state;
1064                 /* Disable page scan */
1065                 page_scan = SCAN_DISABLED;
1066                 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1067
1068                 /* Disable LE passive scan */
1069                 hci_req_add_le_scan_disable(&req);
1070
1071                 /* Mark task needing completion */
1072                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1073
1074                 /* Prevent disconnects from causing scanning to be re-enabled */
1075                 hdev->scanning_paused = true;
1076
1077                 /* Run commands before disconnecting */
1078                 hci_req_run(&req, suspend_req_complete);
1079
1080                 disconnect_counter = 0;
1081                 /* Soft disconnect everything (power off) */
1082                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1083                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1084                         disconnect_counter++;
1085                 }
1086
1087                 if (disconnect_counter > 0) {
1088                         bt_dev_dbg(hdev,
1089                                    "Had %d disconnects. Will wait on them",
1090                                    disconnect_counter);
1091                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1092                 }
1093         } else if (next == BT_SUSPEND_COMPLETE) {
1094                 /* Unpause to take care of updating scanning params */
1095                 hdev->scanning_paused = false;
1096                 /* Enable event filter for paired devices */
1097                 hci_req_set_event_filter(&req);
1098                 /* Enable passive scan at lower duty cycle */
1099                 hci_req_config_le_suspend_scan(&req);
1100                 /* Pause scan changes again. */
1101                 hdev->scanning_paused = true;
1102                 hci_req_run(&req, suspend_req_complete);
1103         } else {
1104                 hdev->suspended = false;
1105                 hdev->scanning_paused = false;
1106
1107                 hci_req_clear_event_filter(&req);
1108                 /* Reset passive/background scanning to normal */
1109                 hci_req_config_le_suspend_scan(&req);
1110
1111                 /* Unpause advertising */
1112                 hdev->advertising_paused = false;
1113                 if (hdev->advertising_old_state) {
1114                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1115                                 hdev->suspend_tasks);
1116                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1117                         queue_work(hdev->req_workqueue,
1118                                    &hdev->discoverable_update);
1119                         hdev->advertising_old_state = 0;
1120                 }
1121
1122                 /* Unpause discovery */
1123                 hdev->discovery_paused = false;
1124                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1125                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1126                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1127                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1128                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1129                 }
1130
1131                 hci_req_run(&req, suspend_req_complete);
1132         }
1133
1134         hdev->suspend_state = next;
1135
1136 done:
1137         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1138         wake_up(&hdev->suspend_wait_q);
1139 }
1140
1141 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1142 {
1143         u8 instance = hdev->cur_adv_instance;
1144         struct adv_info *adv_instance;
1145
1146         /* Instance 0x00 always set local name */
1147         if (instance == 0x00)
1148                 return 1;
1149
1150         adv_instance = hci_find_adv_instance(hdev, instance);
1151         if (!adv_instance)
1152                 return 0;
1153
1154         /* TODO: Take into account the "appearance" and "local-name" flags here.
1155          * These are currently being ignored as they are not supported.
1156          */
1157         return adv_instance->scan_rsp_len;
1158 }
1159
1160 void __hci_req_disable_advertising(struct hci_request *req)
1161 {
1162         if (ext_adv_capable(req->hdev)) {
1163                 struct hci_cp_le_set_ext_adv_enable cp;
1164
1165                 cp.enable = 0x00;
1166                 /* Disable all sets since we only support one set at the moment */
1167                 cp.num_of_sets = 0x00;
1168
1169                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
1170         } else {
1171                 u8 enable = 0x00;
1172
1173                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1174         }
1175 }
1176
1177 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1178 {
1179         u32 flags;
1180         struct adv_info *adv_instance;
1181
1182         if (instance == 0x00) {
1183                 /* Instance 0 always manages the "Tx Power" and "Flags"
1184                  * fields
1185                  */
1186                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1187
1188                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1189                  * corresponds to the "connectable" instance flag.
1190                  */
1191                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1192                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1193
1194                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1195                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1196                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1197                         flags |= MGMT_ADV_FLAG_DISCOV;
1198
1199                 return flags;
1200         }
1201
1202         adv_instance = hci_find_adv_instance(hdev, instance);
1203
1204         /* Return 0 when we got an invalid instance identifier. */
1205         if (!adv_instance)
1206                 return 0;
1207
1208         return adv_instance->flags;
1209 }
1210
1211 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1212 {
1213         /* If privacy is not enabled don't use RPA */
1214         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1215                 return false;
1216
1217         /* If basic privacy mode is enabled use RPA */
1218         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1219                 return true;
1220
1221         /* If limited privacy mode is enabled don't use RPA if we're
1222          * both discoverable and bondable.
1223          */
1224         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1225             hci_dev_test_flag(hdev, HCI_BONDABLE))
1226                 return false;
1227
1228         /* We're neither bondable nor discoverable in the limited
1229          * privacy mode, therefore use RPA.
1230          */
1231         return true;
1232 }
1233
1234 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1235 {
1236         /* If there is no connection we are OK to advertise. */
1237         if (hci_conn_num(hdev, LE_LINK) == 0)
1238                 return true;
1239
1240         /* Check le_states if there is any connection in slave role. */
1241         if (hdev->conn_hash.le_num_slave > 0) {
1242                 /* Slave connection state and non connectable mode bit 20. */
1243                 if (!connectable && !(hdev->le_states[2] & 0x10))
1244                         return false;
1245
1246                 /* Slave connection state and connectable mode bit 38
1247                  * and scannable bit 21.
1248                  */
1249                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1250                                     !(hdev->le_states[2] & 0x20)))
1251                         return false;
1252         }
1253
1254         /* Check le_states if there is any connection in master role. */
1255         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1256                 /* Master connection state and non connectable mode bit 18. */
1257                 if (!connectable && !(hdev->le_states[2] & 0x02))
1258                         return false;
1259
1260                 /* Master connection state and connectable mode bit 35 and
1261                  * scannable 19.
1262                  */
1263                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1264                                     !(hdev->le_states[2] & 0x08)))
1265                         return false;
1266         }
1267
1268         return true;
1269 }
1270
1271 void __hci_req_enable_advertising(struct hci_request *req)
1272 {
1273         struct hci_dev *hdev = req->hdev;
1274         struct hci_cp_le_set_adv_param cp;
1275         u8 own_addr_type, enable = 0x01;
1276         bool connectable;
1277         u16 adv_min_interval, adv_max_interval;
1278         u32 flags;
1279
1280         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1281
1282         /* If the "connectable" instance flag was not set, then choose between
1283          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1284          */
1285         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1286                       mgmt_get_connectable(hdev);
1287
1288         if (!is_advertising_allowed(hdev, connectable))
1289                 return;
1290
1291         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1292                 __hci_req_disable_advertising(req);
1293
1294         /* Clear the HCI_LE_ADV bit temporarily so that the
1295          * hci_update_random_address knows that it's safe to go ahead
1296          * and write a new random address. The flag will be set back on
1297          * as soon as the SET_ADV_ENABLE HCI command completes.
1298          */
1299         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1300
1301         /* Set require_privacy to true only when non-connectable
1302          * advertising is used. In that case it is fine to use a
1303          * non-resolvable private address.
1304          */
1305         if (hci_update_random_address(req, !connectable,
1306                                       adv_use_rpa(hdev, flags),
1307                                       &own_addr_type) < 0)
1308                 return;
1309
1310         memset(&cp, 0, sizeof(cp));
1311
1312         if (connectable) {
1313                 cp.type = LE_ADV_IND;
1314
1315                 adv_min_interval = hdev->le_adv_min_interval;
1316                 adv_max_interval = hdev->le_adv_max_interval;
1317         } else {
1318                 if (get_cur_adv_instance_scan_rsp_len(hdev))
1319                         cp.type = LE_ADV_SCAN_IND;
1320                 else
1321                         cp.type = LE_ADV_NONCONN_IND;
1322
1323                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1324                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1325                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1326                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1327                 } else {
1328                         adv_min_interval = hdev->le_adv_min_interval;
1329                         adv_max_interval = hdev->le_adv_max_interval;
1330                 }
1331         }
1332
1333         cp.min_interval = cpu_to_le16(adv_min_interval);
1334         cp.max_interval = cpu_to_le16(adv_max_interval);
1335         cp.own_address_type = own_addr_type;
1336         cp.channel_map = hdev->le_adv_channel_map;
1337
1338         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1339
1340         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1341 }
1342
1343 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1344 {
1345         size_t short_len;
1346         size_t complete_len;
1347
1348         /* no space left for name (+ NULL + type + len) */
1349         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1350                 return ad_len;
1351
1352         /* use complete name if present and fits */
1353         complete_len = strlen(hdev->dev_name);
1354         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1355                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1356                                        hdev->dev_name, complete_len + 1);
1357
1358         /* use short name if present */
1359         short_len = strlen(hdev->short_name);
1360         if (short_len)
1361                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1362                                        hdev->short_name, short_len + 1);
1363
1364         /* use shortened full name if present, we already know that name
1365          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1366          */
1367         if (complete_len) {
1368                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1369
1370                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1371                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1372
1373                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1374                                        sizeof(name));
1375         }
1376
1377         return ad_len;
1378 }
1379
1380 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1381 {
1382         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1383 }
1384
1385 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1386 {
1387         u8 scan_rsp_len = 0;
1388
1389         if (hdev->appearance) {
1390                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1391         }
1392
1393         return append_local_name(hdev, ptr, scan_rsp_len);
1394 }
1395
1396 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1397                                         u8 *ptr)
1398 {
1399         struct adv_info *adv_instance;
1400         u32 instance_flags;
1401         u8 scan_rsp_len = 0;
1402
1403         adv_instance = hci_find_adv_instance(hdev, instance);
1404         if (!adv_instance)
1405                 return 0;
1406
1407         instance_flags = adv_instance->flags;
1408
1409         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1410                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1411         }
1412
1413         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1414                adv_instance->scan_rsp_len);
1415
1416         scan_rsp_len += adv_instance->scan_rsp_len;
1417
1418         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1419                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1420
1421         return scan_rsp_len;
1422 }
1423
1424 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1425 {
1426         struct hci_dev *hdev = req->hdev;
1427         u8 len;
1428
1429         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1430                 return;
1431
1432         if (ext_adv_capable(hdev)) {
1433                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1434
1435                 memset(&cp, 0, sizeof(cp));
1436
1437                 if (instance)
1438                         len = create_instance_scan_rsp_data(hdev, instance,
1439                                                             cp.data);
1440                 else
1441                         len = create_default_scan_rsp_data(hdev, cp.data);
1442
1443                 if (hdev->scan_rsp_data_len == len &&
1444                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1445                         return;
1446
1447                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1448                 hdev->scan_rsp_data_len = len;
1449
1450                 cp.handle = 0;
1451                 cp.length = len;
1452                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1453                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1454
1455                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1456                             &cp);
1457         } else {
1458                 struct hci_cp_le_set_scan_rsp_data cp;
1459
1460                 memset(&cp, 0, sizeof(cp));
1461
1462                 if (instance)
1463                         len = create_instance_scan_rsp_data(hdev, instance,
1464                                                             cp.data);
1465                 else
1466                         len = create_default_scan_rsp_data(hdev, cp.data);
1467
1468                 if (hdev->scan_rsp_data_len == len &&
1469                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1470                         return;
1471
1472                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1473                 hdev->scan_rsp_data_len = len;
1474
1475                 cp.length = len;
1476
1477                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1478         }
1479 }
1480
1481 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1482 {
1483         struct adv_info *adv_instance = NULL;
1484         u8 ad_len = 0, flags = 0;
1485         u32 instance_flags;
1486
1487         /* Return 0 when the current instance identifier is invalid. */
1488         if (instance) {
1489                 adv_instance = hci_find_adv_instance(hdev, instance);
1490                 if (!adv_instance)
1491                         return 0;
1492         }
1493
1494         instance_flags = get_adv_instance_flags(hdev, instance);
1495
1496         /* If instance already has the flags set skip adding it once
1497          * again.
1498          */
1499         if (adv_instance && eir_get_data(adv_instance->adv_data,
1500                                          adv_instance->adv_data_len, EIR_FLAGS,
1501                                          NULL))
1502                 goto skip_flags;
1503
1504         /* The Add Advertising command allows userspace to set both the general
1505          * and limited discoverable flags.
1506          */
1507         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1508                 flags |= LE_AD_GENERAL;
1509
1510         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1511                 flags |= LE_AD_LIMITED;
1512
1513         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1514                 flags |= LE_AD_NO_BREDR;
1515
1516         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1517                 /* If a discovery flag wasn't provided, simply use the global
1518                  * settings.
1519                  */
1520                 if (!flags)
1521                         flags |= mgmt_get_adv_discov_flags(hdev);
1522
1523                 /* If flags would still be empty, then there is no need to
1524                  * include the "Flags" AD field".
1525                  */
1526                 if (flags) {
1527                         ptr[0] = 0x02;
1528                         ptr[1] = EIR_FLAGS;
1529                         ptr[2] = flags;
1530
1531                         ad_len += 3;
1532                         ptr += 3;
1533                 }
1534         }
1535
1536 skip_flags:
1537         if (adv_instance) {
1538                 memcpy(ptr, adv_instance->adv_data,
1539                        adv_instance->adv_data_len);
1540                 ad_len += adv_instance->adv_data_len;
1541                 ptr += adv_instance->adv_data_len;
1542         }
1543
1544         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1545                 s8 adv_tx_power;
1546
1547                 if (ext_adv_capable(hdev)) {
1548                         if (adv_instance)
1549                                 adv_tx_power = adv_instance->tx_power;
1550                         else
1551                                 adv_tx_power = hdev->adv_tx_power;
1552                 } else {
1553                         adv_tx_power = hdev->adv_tx_power;
1554                 }
1555
1556                 /* Provide Tx Power only if we can provide a valid value for it */
1557                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1558                         ptr[0] = 0x02;
1559                         ptr[1] = EIR_TX_POWER;
1560                         ptr[2] = (u8)adv_tx_power;
1561
1562                         ad_len += 3;
1563                         ptr += 3;
1564                 }
1565         }
1566
1567         return ad_len;
1568 }
1569
1570 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1571 {
1572         struct hci_dev *hdev = req->hdev;
1573         u8 len;
1574
1575         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1576                 return;
1577
1578         if (ext_adv_capable(hdev)) {
1579                 struct hci_cp_le_set_ext_adv_data cp;
1580
1581                 memset(&cp, 0, sizeof(cp));
1582
1583                 len = create_instance_adv_data(hdev, instance, cp.data);
1584
1585                 /* There's nothing to do if the data hasn't changed */
1586                 if (hdev->adv_data_len == len &&
1587                     memcmp(cp.data, hdev->adv_data, len) == 0)
1588                         return;
1589
1590                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1591                 hdev->adv_data_len = len;
1592
1593                 cp.length = len;
1594                 cp.handle = 0;
1595                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1596                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1597
1598                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1599         } else {
1600                 struct hci_cp_le_set_adv_data cp;
1601
1602                 memset(&cp, 0, sizeof(cp));
1603
1604                 len = create_instance_adv_data(hdev, instance, cp.data);
1605
1606                 /* There's nothing to do if the data hasn't changed */
1607                 if (hdev->adv_data_len == len &&
1608                     memcmp(cp.data, hdev->adv_data, len) == 0)
1609                         return;
1610
1611                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1612                 hdev->adv_data_len = len;
1613
1614                 cp.length = len;
1615
1616                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1617         }
1618 }
1619
1620 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1621 {
1622         struct hci_request req;
1623
1624         hci_req_init(&req, hdev);
1625         __hci_req_update_adv_data(&req, instance);
1626
1627         return hci_req_run(&req, NULL);
1628 }
1629
1630 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1631 {
1632         BT_DBG("%s status %u", hdev->name, status);
1633 }
1634
1635 void hci_req_reenable_advertising(struct hci_dev *hdev)
1636 {
1637         struct hci_request req;
1638
1639         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1640             list_empty(&hdev->adv_instances))
1641                 return;
1642
1643         hci_req_init(&req, hdev);
1644
1645         if (hdev->cur_adv_instance) {
1646                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1647                                                 true);
1648         } else {
1649                 if (ext_adv_capable(hdev)) {
1650                         __hci_req_start_ext_adv(&req, 0x00);
1651                 } else {
1652                         __hci_req_update_adv_data(&req, 0x00);
1653                         __hci_req_update_scan_rsp_data(&req, 0x00);
1654                         __hci_req_enable_advertising(&req);
1655                 }
1656         }
1657
1658         hci_req_run(&req, adv_enable_complete);
1659 }
1660
1661 static void adv_timeout_expire(struct work_struct *work)
1662 {
1663         struct hci_dev *hdev = container_of(work, struct hci_dev,
1664                                             adv_instance_expire.work);
1665
1666         struct hci_request req;
1667         u8 instance;
1668
1669         BT_DBG("%s", hdev->name);
1670
1671         hci_dev_lock(hdev);
1672
1673         hdev->adv_instance_timeout = 0;
1674
1675         instance = hdev->cur_adv_instance;
1676         if (instance == 0x00)
1677                 goto unlock;
1678
1679         hci_req_init(&req, hdev);
1680
1681         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1682
1683         if (list_empty(&hdev->adv_instances))
1684                 __hci_req_disable_advertising(&req);
1685
1686         hci_req_run(&req, NULL);
1687
1688 unlock:
1689         hci_dev_unlock(hdev);
1690 }
1691
1692 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1693                            bool use_rpa, struct adv_info *adv_instance,
1694                            u8 *own_addr_type, bdaddr_t *rand_addr)
1695 {
1696         int err;
1697
1698         bacpy(rand_addr, BDADDR_ANY);
1699
1700         /* If privacy is enabled use a resolvable private address. If
1701          * current RPA has expired then generate a new one.
1702          */
1703         if (use_rpa) {
1704                 int to;
1705
1706                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1707
1708                 if (adv_instance) {
1709                         if (!adv_instance->rpa_expired &&
1710                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
1711                                 return 0;
1712
1713                         adv_instance->rpa_expired = false;
1714                 } else {
1715                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1716                             !bacmp(&hdev->random_addr, &hdev->rpa))
1717                                 return 0;
1718                 }
1719
1720                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1721                 if (err < 0) {
1722                         bt_dev_err(hdev, "failed to generate new RPA");
1723                         return err;
1724                 }
1725
1726                 bacpy(rand_addr, &hdev->rpa);
1727
1728                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1729                 if (adv_instance)
1730                         queue_delayed_work(hdev->workqueue,
1731                                            &adv_instance->rpa_expired_cb, to);
1732                 else
1733                         queue_delayed_work(hdev->workqueue,
1734                                            &hdev->rpa_expired, to);
1735
1736                 return 0;
1737         }
1738
1739         /* In case of required privacy without resolvable private address,
1740          * use an non-resolvable private address. This is useful for
1741          * non-connectable advertising.
1742          */
1743         if (require_privacy) {
1744                 bdaddr_t nrpa;
1745
1746                 while (true) {
1747                         /* The non-resolvable private address is generated
1748                          * from random six bytes with the two most significant
1749                          * bits cleared.
1750                          */
1751                         get_random_bytes(&nrpa, 6);
1752                         nrpa.b[5] &= 0x3f;
1753
1754                         /* The non-resolvable private address shall not be
1755                          * equal to the public address.
1756                          */
1757                         if (bacmp(&hdev->bdaddr, &nrpa))
1758                                 break;
1759                 }
1760
1761                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1762                 bacpy(rand_addr, &nrpa);
1763
1764                 return 0;
1765         }
1766
1767         /* No privacy so use a public address. */
1768         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1769
1770         return 0;
1771 }
1772
1773 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1774 {
1775         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1776 }
1777
1778 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1779 {
1780         struct hci_cp_le_set_ext_adv_params cp;
1781         struct hci_dev *hdev = req->hdev;
1782         bool connectable;
1783         u32 flags;
1784         bdaddr_t random_addr;
1785         u8 own_addr_type;
1786         int err;
1787         struct adv_info *adv_instance;
1788         bool secondary_adv;
1789         /* In ext adv set param interval is 3 octets */
1790         const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1791
1792         if (instance > 0) {
1793                 adv_instance = hci_find_adv_instance(hdev, instance);
1794                 if (!adv_instance)
1795                         return -EINVAL;
1796         } else {
1797                 adv_instance = NULL;
1798         }
1799
1800         flags = get_adv_instance_flags(hdev, instance);
1801
1802         /* If the "connectable" instance flag was not set, then choose between
1803          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1804          */
1805         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1806                       mgmt_get_connectable(hdev);
1807
1808         if (!is_advertising_allowed(hdev, connectable))
1809                 return -EPERM;
1810
1811         /* Set require_privacy to true only when non-connectable
1812          * advertising is used. In that case it is fine to use a
1813          * non-resolvable private address.
1814          */
1815         err = hci_get_random_address(hdev, !connectable,
1816                                      adv_use_rpa(hdev, flags), adv_instance,
1817                                      &own_addr_type, &random_addr);
1818         if (err < 0)
1819                 return err;
1820
1821         memset(&cp, 0, sizeof(cp));
1822
1823         memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1824         memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1825
1826         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1827
1828         if (connectable) {
1829                 if (secondary_adv)
1830                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1831                 else
1832                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1833         } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1834                 if (secondary_adv)
1835                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1836                 else
1837                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1838         } else {
1839                 if (secondary_adv)
1840                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1841                 else
1842                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1843         }
1844
1845         cp.own_addr_type = own_addr_type;
1846         cp.channel_map = hdev->le_adv_channel_map;
1847         cp.tx_power = 127;
1848         cp.handle = instance;
1849
1850         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1851                 cp.primary_phy = HCI_ADV_PHY_1M;
1852                 cp.secondary_phy = HCI_ADV_PHY_2M;
1853         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1854                 cp.primary_phy = HCI_ADV_PHY_CODED;
1855                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1856         } else {
1857                 /* In all other cases use 1M */
1858                 cp.primary_phy = HCI_ADV_PHY_1M;
1859                 cp.secondary_phy = HCI_ADV_PHY_1M;
1860         }
1861
1862         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1863
1864         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1865             bacmp(&random_addr, BDADDR_ANY)) {
1866                 struct hci_cp_le_set_adv_set_rand_addr cp;
1867
1868                 /* Check if random address need to be updated */
1869                 if (adv_instance) {
1870                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1871                                 return 0;
1872                 } else {
1873                         if (!bacmp(&random_addr, &hdev->random_addr))
1874                                 return 0;
1875                 }
1876
1877                 memset(&cp, 0, sizeof(cp));
1878
1879                 cp.handle = 0;
1880                 bacpy(&cp.bdaddr, &random_addr);
1881
1882                 hci_req_add(req,
1883                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1884                             sizeof(cp), &cp);
1885         }
1886
1887         return 0;
1888 }
1889
1890 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1891 {
1892         struct hci_dev *hdev = req->hdev;
1893         struct hci_cp_le_set_ext_adv_enable *cp;
1894         struct hci_cp_ext_adv_set *adv_set;
1895         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1896         struct adv_info *adv_instance;
1897
1898         if (instance > 0) {
1899                 adv_instance = hci_find_adv_instance(hdev, instance);
1900                 if (!adv_instance)
1901                         return -EINVAL;
1902         } else {
1903                 adv_instance = NULL;
1904         }
1905
1906         cp = (void *) data;
1907         adv_set = (void *) cp->data;
1908
1909         memset(cp, 0, sizeof(*cp));
1910
1911         cp->enable = 0x01;
1912         cp->num_of_sets = 0x01;
1913
1914         memset(adv_set, 0, sizeof(*adv_set));
1915
1916         adv_set->handle = instance;
1917
1918         /* Set duration per instance since controller is responsible for
1919          * scheduling it.
1920          */
1921         if (adv_instance && adv_instance->duration) {
1922                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1923
1924                 /* Time = N * 10 ms */
1925                 adv_set->duration = cpu_to_le16(duration / 10);
1926         }
1927
1928         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1929                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1930                     data);
1931
1932         return 0;
1933 }
1934
1935 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1936 {
1937         struct hci_dev *hdev = req->hdev;
1938         int err;
1939
1940         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1941                 __hci_req_disable_advertising(req);
1942
1943         err = __hci_req_setup_ext_adv_instance(req, instance);
1944         if (err < 0)
1945                 return err;
1946
1947         __hci_req_update_scan_rsp_data(req, instance);
1948         __hci_req_enable_ext_advertising(req, instance);
1949
1950         return 0;
1951 }
1952
1953 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1954                                     bool force)
1955 {
1956         struct hci_dev *hdev = req->hdev;
1957         struct adv_info *adv_instance = NULL;
1958         u16 timeout;
1959
1960         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1961             list_empty(&hdev->adv_instances))
1962                 return -EPERM;
1963
1964         if (hdev->adv_instance_timeout)
1965                 return -EBUSY;
1966
1967         adv_instance = hci_find_adv_instance(hdev, instance);
1968         if (!adv_instance)
1969                 return -ENOENT;
1970
1971         /* A zero timeout means unlimited advertising. As long as there is
1972          * only one instance, duration should be ignored. We still set a timeout
1973          * in case further instances are being added later on.
1974          *
1975          * If the remaining lifetime of the instance is more than the duration
1976          * then the timeout corresponds to the duration, otherwise it will be
1977          * reduced to the remaining instance lifetime.
1978          */
1979         if (adv_instance->timeout == 0 ||
1980             adv_instance->duration <= adv_instance->remaining_time)
1981                 timeout = adv_instance->duration;
1982         else
1983                 timeout = adv_instance->remaining_time;
1984
1985         /* The remaining time is being reduced unless the instance is being
1986          * advertised without time limit.
1987          */
1988         if (adv_instance->timeout)
1989                 adv_instance->remaining_time =
1990                                 adv_instance->remaining_time - timeout;
1991
1992         /* Only use work for scheduling instances with legacy advertising */
1993         if (!ext_adv_capable(hdev)) {
1994                 hdev->adv_instance_timeout = timeout;
1995                 queue_delayed_work(hdev->req_workqueue,
1996                            &hdev->adv_instance_expire,
1997                            msecs_to_jiffies(timeout * 1000));
1998         }
1999
2000         /* If we're just re-scheduling the same instance again then do not
2001          * execute any HCI commands. This happens when a single instance is
2002          * being advertised.
2003          */
2004         if (!force && hdev->cur_adv_instance == instance &&
2005             hci_dev_test_flag(hdev, HCI_LE_ADV))
2006                 return 0;
2007
2008         hdev->cur_adv_instance = instance;
2009         if (ext_adv_capable(hdev)) {
2010                 __hci_req_start_ext_adv(req, instance);
2011         } else {
2012                 __hci_req_update_adv_data(req, instance);
2013                 __hci_req_update_scan_rsp_data(req, instance);
2014                 __hci_req_enable_advertising(req);
2015         }
2016
2017         return 0;
2018 }
2019
2020 static void cancel_adv_timeout(struct hci_dev *hdev)
2021 {
2022         if (hdev->adv_instance_timeout) {
2023                 hdev->adv_instance_timeout = 0;
2024                 cancel_delayed_work(&hdev->adv_instance_expire);
2025         }
2026 }
2027
2028 /* For a single instance:
2029  * - force == true: The instance will be removed even when its remaining
2030  *   lifetime is not zero.
2031  * - force == false: the instance will be deactivated but kept stored unless
2032  *   the remaining lifetime is zero.
2033  *
2034  * For instance == 0x00:
2035  * - force == true: All instances will be removed regardless of their timeout
2036  *   setting.
2037  * - force == false: Only instances that have a timeout will be removed.
2038  */
2039 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2040                                 struct hci_request *req, u8 instance,
2041                                 bool force)
2042 {
2043         struct adv_info *adv_instance, *n, *next_instance = NULL;
2044         int err;
2045         u8 rem_inst;
2046
2047         /* Cancel any timeout concerning the removed instance(s). */
2048         if (!instance || hdev->cur_adv_instance == instance)
2049                 cancel_adv_timeout(hdev);
2050
2051         /* Get the next instance to advertise BEFORE we remove
2052          * the current one. This can be the same instance again
2053          * if there is only one instance.
2054          */
2055         if (instance && hdev->cur_adv_instance == instance)
2056                 next_instance = hci_get_next_instance(hdev, instance);
2057
2058         if (instance == 0x00) {
2059                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2060                                          list) {
2061                         if (!(force || adv_instance->timeout))
2062                                 continue;
2063
2064                         rem_inst = adv_instance->instance;
2065                         err = hci_remove_adv_instance(hdev, rem_inst);
2066                         if (!err)
2067                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2068                 }
2069         } else {
2070                 adv_instance = hci_find_adv_instance(hdev, instance);
2071
2072                 if (force || (adv_instance && adv_instance->timeout &&
2073                               !adv_instance->remaining_time)) {
2074                         /* Don't advertise a removed instance. */
2075                         if (next_instance &&
2076                             next_instance->instance == instance)
2077                                 next_instance = NULL;
2078
2079                         err = hci_remove_adv_instance(hdev, instance);
2080                         if (!err)
2081                                 mgmt_advertising_removed(sk, hdev, instance);
2082                 }
2083         }
2084
2085         if (!req || !hdev_is_powered(hdev) ||
2086             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2087                 return;
2088
2089         if (next_instance)
2090                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2091                                                 false);
2092 }
2093
2094 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2095 {
2096         struct hci_dev *hdev = req->hdev;
2097
2098         /* If we're advertising or initiating an LE connection we can't
2099          * go ahead and change the random address at this time. This is
2100          * because the eventual initiator address used for the
2101          * subsequently created connection will be undefined (some
2102          * controllers use the new address and others the one we had
2103          * when the operation started).
2104          *
2105          * In this kind of scenario skip the update and let the random
2106          * address be updated at the next cycle.
2107          */
2108         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2109             hci_lookup_le_connect(hdev)) {
2110                 BT_DBG("Deferring random address update");
2111                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2112                 return;
2113         }
2114
2115         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2116 }
2117
2118 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2119                               bool use_rpa, u8 *own_addr_type)
2120 {
2121         struct hci_dev *hdev = req->hdev;
2122         int err;
2123
2124         /* If privacy is enabled use a resolvable private address. If
2125          * current RPA has expired or there is something else than
2126          * the current RPA in use, then generate a new one.
2127          */
2128         if (use_rpa) {
2129                 int to;
2130
2131                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2132
2133                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2134                     !bacmp(&hdev->random_addr, &hdev->rpa))
2135                         return 0;
2136
2137                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2138                 if (err < 0) {
2139                         bt_dev_err(hdev, "failed to generate new RPA");
2140                         return err;
2141                 }
2142
2143                 set_random_addr(req, &hdev->rpa);
2144
2145                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2146                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2147
2148                 return 0;
2149         }
2150
2151         /* In case of required privacy without resolvable private address,
2152          * use an non-resolvable private address. This is useful for active
2153          * scanning and non-connectable advertising.
2154          */
2155         if (require_privacy) {
2156                 bdaddr_t nrpa;
2157
2158                 while (true) {
2159                         /* The non-resolvable private address is generated
2160                          * from random six bytes with the two most significant
2161                          * bits cleared.
2162                          */
2163                         get_random_bytes(&nrpa, 6);
2164                         nrpa.b[5] &= 0x3f;
2165
2166                         /* The non-resolvable private address shall not be
2167                          * equal to the public address.
2168                          */
2169                         if (bacmp(&hdev->bdaddr, &nrpa))
2170                                 break;
2171                 }
2172
2173                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2174                 set_random_addr(req, &nrpa);
2175                 return 0;
2176         }
2177
2178         /* If forcing static address is in use or there is no public
2179          * address use the static address as random address (but skip
2180          * the HCI command if the current random address is already the
2181          * static one.
2182          *
2183          * In case BR/EDR has been disabled on a dual-mode controller
2184          * and a static address has been configured, then use that
2185          * address instead of the public BR/EDR address.
2186          */
2187         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2188             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2189             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2190              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2191                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2192                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2193                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2194                                     &hdev->static_addr);
2195                 return 0;
2196         }
2197
2198         /* Neither privacy nor static address is being used so use a
2199          * public address.
2200          */
2201         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2202
2203         return 0;
2204 }
2205
2206 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2207 {
2208         struct bdaddr_list *b;
2209
2210         list_for_each_entry(b, &hdev->whitelist, list) {
2211                 struct hci_conn *conn;
2212
2213                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2214                 if (!conn)
2215                         return true;
2216
2217                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2218                         return true;
2219         }
2220
2221         return false;
2222 }
2223
2224 void __hci_req_update_scan(struct hci_request *req)
2225 {
2226         struct hci_dev *hdev = req->hdev;
2227         u8 scan;
2228
2229         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2230                 return;
2231
2232         if (!hdev_is_powered(hdev))
2233                 return;
2234
2235         if (mgmt_powering_down(hdev))
2236                 return;
2237
2238         if (hdev->scanning_paused)
2239                 return;
2240
2241         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2242             disconnected_whitelist_entries(hdev))
2243                 scan = SCAN_PAGE;
2244         else
2245                 scan = SCAN_DISABLED;
2246
2247         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2248                 scan |= SCAN_INQUIRY;
2249
2250         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2251             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2252                 return;
2253
2254         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2255 }
2256
2257 static int update_scan(struct hci_request *req, unsigned long opt)
2258 {
2259         hci_dev_lock(req->hdev);
2260         __hci_req_update_scan(req);
2261         hci_dev_unlock(req->hdev);
2262         return 0;
2263 }
2264
2265 static void scan_update_work(struct work_struct *work)
2266 {
2267         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2268
2269         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2270 }
2271
2272 static int connectable_update(struct hci_request *req, unsigned long opt)
2273 {
2274         struct hci_dev *hdev = req->hdev;
2275
2276         hci_dev_lock(hdev);
2277
2278         __hci_req_update_scan(req);
2279
2280         /* If BR/EDR is not enabled and we disable advertising as a
2281          * by-product of disabling connectable, we need to update the
2282          * advertising flags.
2283          */
2284         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2285                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2286
2287         /* Update the advertising parameters if necessary */
2288         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2289             !list_empty(&hdev->adv_instances)) {
2290                 if (ext_adv_capable(hdev))
2291                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2292                 else
2293                         __hci_req_enable_advertising(req);
2294         }
2295
2296         __hci_update_background_scan(req);
2297
2298         hci_dev_unlock(hdev);
2299
2300         return 0;
2301 }
2302
2303 static void connectable_update_work(struct work_struct *work)
2304 {
2305         struct hci_dev *hdev = container_of(work, struct hci_dev,
2306                                             connectable_update);
2307         u8 status;
2308
2309         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2310         mgmt_set_connectable_complete(hdev, status);
2311 }
2312
2313 static u8 get_service_classes(struct hci_dev *hdev)
2314 {
2315         struct bt_uuid *uuid;
2316         u8 val = 0;
2317
2318         list_for_each_entry(uuid, &hdev->uuids, list)
2319                 val |= uuid->svc_hint;
2320
2321         return val;
2322 }
2323
2324 void __hci_req_update_class(struct hci_request *req)
2325 {
2326         struct hci_dev *hdev = req->hdev;
2327         u8 cod[3];
2328
2329         BT_DBG("%s", hdev->name);
2330
2331         if (!hdev_is_powered(hdev))
2332                 return;
2333
2334         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2335                 return;
2336
2337         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2338                 return;
2339
2340         cod[0] = hdev->minor_class;
2341         cod[1] = hdev->major_class;
2342         cod[2] = get_service_classes(hdev);
2343
2344         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2345                 cod[1] |= 0x20;
2346
2347         if (memcmp(cod, hdev->dev_class, 3) == 0)
2348                 return;
2349
2350         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2351 }
2352
2353 static void write_iac(struct hci_request *req)
2354 {
2355         struct hci_dev *hdev = req->hdev;
2356         struct hci_cp_write_current_iac_lap cp;
2357
2358         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2359                 return;
2360
2361         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2362                 /* Limited discoverable mode */
2363                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2364                 cp.iac_lap[0] = 0x00;   /* LIAC */
2365                 cp.iac_lap[1] = 0x8b;
2366                 cp.iac_lap[2] = 0x9e;
2367                 cp.iac_lap[3] = 0x33;   /* GIAC */
2368                 cp.iac_lap[4] = 0x8b;
2369                 cp.iac_lap[5] = 0x9e;
2370         } else {
2371                 /* General discoverable mode */
2372                 cp.num_iac = 1;
2373                 cp.iac_lap[0] = 0x33;   /* GIAC */
2374                 cp.iac_lap[1] = 0x8b;
2375                 cp.iac_lap[2] = 0x9e;
2376         }
2377
2378         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2379                     (cp.num_iac * 3) + 1, &cp);
2380 }
2381
2382 static int discoverable_update(struct hci_request *req, unsigned long opt)
2383 {
2384         struct hci_dev *hdev = req->hdev;
2385
2386         hci_dev_lock(hdev);
2387
2388         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2389                 write_iac(req);
2390                 __hci_req_update_scan(req);
2391                 __hci_req_update_class(req);
2392         }
2393
2394         /* Advertising instances don't use the global discoverable setting, so
2395          * only update AD if advertising was enabled using Set Advertising.
2396          */
2397         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2398                 __hci_req_update_adv_data(req, 0x00);
2399
2400                 /* Discoverable mode affects the local advertising
2401                  * address in limited privacy mode.
2402                  */
2403                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2404                         if (ext_adv_capable(hdev))
2405                                 __hci_req_start_ext_adv(req, 0x00);
2406                         else
2407                                 __hci_req_enable_advertising(req);
2408                 }
2409         }
2410
2411         hci_dev_unlock(hdev);
2412
2413         return 0;
2414 }
2415
2416 static void discoverable_update_work(struct work_struct *work)
2417 {
2418         struct hci_dev *hdev = container_of(work, struct hci_dev,
2419                                             discoverable_update);
2420         u8 status;
2421
2422         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2423         mgmt_set_discoverable_complete(hdev, status);
2424 }
2425
2426 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2427                       u8 reason)
2428 {
2429         switch (conn->state) {
2430         case BT_CONNECTED:
2431         case BT_CONFIG:
2432                 if (conn->type == AMP_LINK) {
2433                         struct hci_cp_disconn_phy_link cp;
2434
2435                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2436                         cp.reason = reason;
2437                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2438                                     &cp);
2439                 } else {
2440                         struct hci_cp_disconnect dc;
2441
2442                         dc.handle = cpu_to_le16(conn->handle);
2443                         dc.reason = reason;
2444                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2445                 }
2446
2447                 conn->state = BT_DISCONN;
2448
2449                 break;
2450         case BT_CONNECT:
2451                 if (conn->type == LE_LINK) {
2452                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2453                                 break;
2454                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2455                                     0, NULL);
2456                 } else if (conn->type == ACL_LINK) {
2457                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2458                                 break;
2459                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2460                                     6, &conn->dst);
2461                 }
2462                 break;
2463         case BT_CONNECT2:
2464                 if (conn->type == ACL_LINK) {
2465                         struct hci_cp_reject_conn_req rej;
2466
2467                         bacpy(&rej.bdaddr, &conn->dst);
2468                         rej.reason = reason;
2469
2470                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2471                                     sizeof(rej), &rej);
2472                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2473                         struct hci_cp_reject_sync_conn_req rej;
2474
2475                         bacpy(&rej.bdaddr, &conn->dst);
2476
2477                         /* SCO rejection has its own limited set of
2478                          * allowed error values (0x0D-0x0F) which isn't
2479                          * compatible with most values passed to this
2480                          * function. To be safe hard-code one of the
2481                          * values that's suitable for SCO.
2482                          */
2483                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2484
2485                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2486                                     sizeof(rej), &rej);
2487                 }
2488                 break;
2489         default:
2490                 conn->state = BT_CLOSED;
2491                 break;
2492         }
2493 }
2494
2495 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2496 {
2497         if (status)
2498                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2499 }
2500
2501 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2502 {
2503         struct hci_request req;
2504         int err;
2505
2506         hci_req_init(&req, conn->hdev);
2507
2508         __hci_abort_conn(&req, conn, reason);
2509
2510         err = hci_req_run(&req, abort_conn_complete);
2511         if (err && err != -ENODATA) {
2512                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2513                 return err;
2514         }
2515
2516         return 0;
2517 }
2518
2519 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2520 {
2521         hci_dev_lock(req->hdev);
2522         __hci_update_background_scan(req);
2523         hci_dev_unlock(req->hdev);
2524         return 0;
2525 }
2526
2527 static void bg_scan_update(struct work_struct *work)
2528 {
2529         struct hci_dev *hdev = container_of(work, struct hci_dev,
2530                                             bg_scan_update);
2531         struct hci_conn *conn;
2532         u8 status;
2533         int err;
2534
2535         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2536         if (!err)
2537                 return;
2538
2539         hci_dev_lock(hdev);
2540
2541         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2542         if (conn)
2543                 hci_le_conn_failed(conn, status);
2544
2545         hci_dev_unlock(hdev);
2546 }
2547
2548 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2549 {
2550         hci_req_add_le_scan_disable(req);
2551         return 0;
2552 }
2553
2554 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2555 {
2556         u8 length = opt;
2557         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2558         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2559         struct hci_cp_inquiry cp;
2560
2561         BT_DBG("%s", req->hdev->name);
2562
2563         hci_dev_lock(req->hdev);
2564         hci_inquiry_cache_flush(req->hdev);
2565         hci_dev_unlock(req->hdev);
2566
2567         memset(&cp, 0, sizeof(cp));
2568
2569         if (req->hdev->discovery.limited)
2570                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2571         else
2572                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2573
2574         cp.length = length;
2575
2576         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2577
2578         return 0;
2579 }
2580
2581 static void le_scan_disable_work(struct work_struct *work)
2582 {
2583         struct hci_dev *hdev = container_of(work, struct hci_dev,
2584                                             le_scan_disable.work);
2585         u8 status;
2586
2587         BT_DBG("%s", hdev->name);
2588
2589         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2590                 return;
2591
2592         cancel_delayed_work(&hdev->le_scan_restart);
2593
2594         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2595         if (status) {
2596                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2597                            status);
2598                 return;
2599         }
2600
2601         hdev->discovery.scan_start = 0;
2602
2603         /* If we were running LE only scan, change discovery state. If
2604          * we were running both LE and BR/EDR inquiry simultaneously,
2605          * and BR/EDR inquiry is already finished, stop discovery,
2606          * otherwise BR/EDR inquiry will stop discovery when finished.
2607          * If we will resolve remote device name, do not change
2608          * discovery state.
2609          */
2610
2611         if (hdev->discovery.type == DISCOV_TYPE_LE)
2612                 goto discov_stopped;
2613
2614         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2615                 return;
2616
2617         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2618                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2619                     hdev->discovery.state != DISCOVERY_RESOLVING)
2620                         goto discov_stopped;
2621
2622                 return;
2623         }
2624
2625         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2626                      HCI_CMD_TIMEOUT, &status);
2627         if (status) {
2628                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2629                 goto discov_stopped;
2630         }
2631
2632         return;
2633
2634 discov_stopped:
2635         hci_dev_lock(hdev);
2636         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2637         hci_dev_unlock(hdev);
2638 }
2639
2640 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2641 {
2642         struct hci_dev *hdev = req->hdev;
2643
2644         /* If controller is not scanning we are done. */
2645         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2646                 return 0;
2647
2648         hci_req_add_le_scan_disable(req);
2649
2650         if (use_ext_scan(hdev)) {
2651                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2652
2653                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2654                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2655                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2656
2657                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2658                             sizeof(ext_enable_cp), &ext_enable_cp);
2659         } else {
2660                 struct hci_cp_le_set_scan_enable cp;
2661
2662                 memset(&cp, 0, sizeof(cp));
2663                 cp.enable = LE_SCAN_ENABLE;
2664                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2665                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2666         }
2667
2668         return 0;
2669 }
2670
2671 static void le_scan_restart_work(struct work_struct *work)
2672 {
2673         struct hci_dev *hdev = container_of(work, struct hci_dev,
2674                                             le_scan_restart.work);
2675         unsigned long timeout, duration, scan_start, now;
2676         u8 status;
2677
2678         BT_DBG("%s", hdev->name);
2679
2680         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2681         if (status) {
2682                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2683                            status);
2684                 return;
2685         }
2686
2687         hci_dev_lock(hdev);
2688
2689         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2690             !hdev->discovery.scan_start)
2691                 goto unlock;
2692
2693         /* When the scan was started, hdev->le_scan_disable has been queued
2694          * after duration from scan_start. During scan restart this job
2695          * has been canceled, and we need to queue it again after proper
2696          * timeout, to make sure that scan does not run indefinitely.
2697          */
2698         duration = hdev->discovery.scan_duration;
2699         scan_start = hdev->discovery.scan_start;
2700         now = jiffies;
2701         if (now - scan_start <= duration) {
2702                 int elapsed;
2703
2704                 if (now >= scan_start)
2705                         elapsed = now - scan_start;
2706                 else
2707                         elapsed = ULONG_MAX - scan_start + now;
2708
2709                 timeout = duration - elapsed;
2710         } else {
2711                 timeout = 0;
2712         }
2713
2714         queue_delayed_work(hdev->req_workqueue,
2715                            &hdev->le_scan_disable, timeout);
2716
2717 unlock:
2718         hci_dev_unlock(hdev);
2719 }
2720
2721 static int active_scan(struct hci_request *req, unsigned long opt)
2722 {
2723         uint16_t interval = opt;
2724         struct hci_dev *hdev = req->hdev;
2725         u8 own_addr_type;
2726         /* White list is not used for discovery */
2727         u8 filter_policy = 0x00;
2728         int err;
2729
2730         BT_DBG("%s", hdev->name);
2731
2732         /* If controller is scanning, it means the background scanning is
2733          * running. Thus, we should temporarily stop it in order to set the
2734          * discovery scanning parameters.
2735          */
2736         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2737                 hci_req_add_le_scan_disable(req);
2738
2739         /* All active scans will be done with either a resolvable private
2740          * address (when privacy feature has been enabled) or non-resolvable
2741          * private address.
2742          */
2743         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2744                                         &own_addr_type);
2745         if (err < 0)
2746                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2747
2748         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2749                            own_addr_type, filter_policy);
2750         return 0;
2751 }
2752
2753 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2754 {
2755         int err;
2756
2757         BT_DBG("%s", req->hdev->name);
2758
2759         err = active_scan(req, opt);
2760         if (err)
2761                 return err;
2762
2763         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2764 }
2765
2766 static void start_discovery(struct hci_dev *hdev, u8 *status)
2767 {
2768         unsigned long timeout;
2769
2770         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2771
2772         switch (hdev->discovery.type) {
2773         case DISCOV_TYPE_BREDR:
2774                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2775                         hci_req_sync(hdev, bredr_inquiry,
2776                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2777                                      status);
2778                 return;
2779         case DISCOV_TYPE_INTERLEAVED:
2780                 /* When running simultaneous discovery, the LE scanning time
2781                  * should occupy the whole discovery time sine BR/EDR inquiry
2782                  * and LE scanning are scheduled by the controller.
2783                  *
2784                  * For interleaving discovery in comparison, BR/EDR inquiry
2785                  * and LE scanning are done sequentially with separate
2786                  * timeouts.
2787                  */
2788                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2789                              &hdev->quirks)) {
2790                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2791                         /* During simultaneous discovery, we double LE scan
2792                          * interval. We must leave some time for the controller
2793                          * to do BR/EDR inquiry.
2794                          */
2795                         hci_req_sync(hdev, interleaved_discov,
2796                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2797                                      status);
2798                         break;
2799                 }
2800
2801                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2802                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2803                              HCI_CMD_TIMEOUT, status);
2804                 break;
2805         case DISCOV_TYPE_LE:
2806                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2807                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2808                              HCI_CMD_TIMEOUT, status);
2809                 break;
2810         default:
2811                 *status = HCI_ERROR_UNSPECIFIED;
2812                 return;
2813         }
2814
2815         if (*status)
2816                 return;
2817
2818         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2819
2820         /* When service discovery is used and the controller has a
2821          * strict duplicate filter, it is important to remember the
2822          * start and duration of the scan. This is required for
2823          * restarting scanning during the discovery phase.
2824          */
2825         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2826                      hdev->discovery.result_filtering) {
2827                 hdev->discovery.scan_start = jiffies;
2828                 hdev->discovery.scan_duration = timeout;
2829         }
2830
2831         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2832                            timeout);
2833 }
2834
2835 bool hci_req_stop_discovery(struct hci_request *req)
2836 {
2837         struct hci_dev *hdev = req->hdev;
2838         struct discovery_state *d = &hdev->discovery;
2839         struct hci_cp_remote_name_req_cancel cp;
2840         struct inquiry_entry *e;
2841         bool ret = false;
2842
2843         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2844
2845         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2846                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2847                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2848
2849                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2850                         cancel_delayed_work(&hdev->le_scan_disable);
2851                         hci_req_add_le_scan_disable(req);
2852                 }
2853
2854                 ret = true;
2855         } else {
2856                 /* Passive scanning */
2857                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2858                         hci_req_add_le_scan_disable(req);
2859                         ret = true;
2860                 }
2861         }
2862
2863         /* No further actions needed for LE-only discovery */
2864         if (d->type == DISCOV_TYPE_LE)
2865                 return ret;
2866
2867         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2868                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2869                                                      NAME_PENDING);
2870                 if (!e)
2871                         return ret;
2872
2873                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2874                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2875                             &cp);
2876                 ret = true;
2877         }
2878
2879         return ret;
2880 }
2881
2882 static int stop_discovery(struct hci_request *req, unsigned long opt)
2883 {
2884         hci_dev_lock(req->hdev);
2885         hci_req_stop_discovery(req);
2886         hci_dev_unlock(req->hdev);
2887
2888         return 0;
2889 }
2890
2891 static void discov_update(struct work_struct *work)
2892 {
2893         struct hci_dev *hdev = container_of(work, struct hci_dev,
2894                                             discov_update);
2895         u8 status = 0;
2896
2897         switch (hdev->discovery.state) {
2898         case DISCOVERY_STARTING:
2899                 start_discovery(hdev, &status);
2900                 mgmt_start_discovery_complete(hdev, status);
2901                 if (status)
2902                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2903                 else
2904                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2905                 break;
2906         case DISCOVERY_STOPPING:
2907                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2908                 mgmt_stop_discovery_complete(hdev, status);
2909                 if (!status)
2910                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2911                 break;
2912         case DISCOVERY_STOPPED:
2913         default:
2914                 return;
2915         }
2916 }
2917
2918 static void discov_off(struct work_struct *work)
2919 {
2920         struct hci_dev *hdev = container_of(work, struct hci_dev,
2921                                             discov_off.work);
2922
2923         BT_DBG("%s", hdev->name);
2924
2925         hci_dev_lock(hdev);
2926
2927         /* When discoverable timeout triggers, then just make sure
2928          * the limited discoverable flag is cleared. Even in the case
2929          * of a timeout triggered from general discoverable, it is
2930          * safe to unconditionally clear the flag.
2931          */
2932         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2933         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2934         hdev->discov_timeout = 0;
2935
2936         hci_dev_unlock(hdev);
2937
2938         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2939         mgmt_new_settings(hdev);
2940 }
2941
2942 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2943 {
2944         struct hci_dev *hdev = req->hdev;
2945         u8 link_sec;
2946
2947         hci_dev_lock(hdev);
2948
2949         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2950             !lmp_host_ssp_capable(hdev)) {
2951                 u8 mode = 0x01;
2952
2953                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2954
2955                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2956                         u8 support = 0x01;
2957
2958                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2959                                     sizeof(support), &support);
2960                 }
2961         }
2962
2963         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2964             lmp_bredr_capable(hdev)) {
2965                 struct hci_cp_write_le_host_supported cp;
2966
2967                 cp.le = 0x01;
2968                 cp.simul = 0x00;
2969
2970                 /* Check first if we already have the right
2971                  * host state (host features set)
2972                  */
2973                 if (cp.le != lmp_host_le_capable(hdev) ||
2974                     cp.simul != lmp_host_le_br_capable(hdev))
2975                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2976                                     sizeof(cp), &cp);
2977         }
2978
2979         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2980                 /* Make sure the controller has a good default for
2981                  * advertising data. This also applies to the case
2982                  * where BR/EDR was toggled during the AUTO_OFF phase.
2983                  */
2984                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2985                     list_empty(&hdev->adv_instances)) {
2986                         int err;
2987
2988                         if (ext_adv_capable(hdev)) {
2989                                 err = __hci_req_setup_ext_adv_instance(req,
2990                                                                        0x00);
2991                                 if (!err)
2992                                         __hci_req_update_scan_rsp_data(req,
2993                                                                        0x00);
2994                         } else {
2995                                 err = 0;
2996                                 __hci_req_update_adv_data(req, 0x00);
2997                                 __hci_req_update_scan_rsp_data(req, 0x00);
2998                         }
2999
3000                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3001                                 if (!ext_adv_capable(hdev))
3002                                         __hci_req_enable_advertising(req);
3003                                 else if (!err)
3004                                         __hci_req_enable_ext_advertising(req,
3005                                                                          0x00);
3006                         }
3007                 } else if (!list_empty(&hdev->adv_instances)) {
3008                         struct adv_info *adv_instance;
3009
3010                         adv_instance = list_first_entry(&hdev->adv_instances,
3011                                                         struct adv_info, list);
3012                         __hci_req_schedule_adv_instance(req,
3013                                                         adv_instance->instance,
3014                                                         true);
3015                 }
3016         }
3017
3018         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3019         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3020                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3021                             sizeof(link_sec), &link_sec);
3022
3023         if (lmp_bredr_capable(hdev)) {
3024                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3025                         __hci_req_write_fast_connectable(req, true);
3026                 else
3027                         __hci_req_write_fast_connectable(req, false);
3028                 __hci_req_update_scan(req);
3029                 __hci_req_update_class(req);
3030                 __hci_req_update_name(req);
3031                 __hci_req_update_eir(req);
3032         }
3033
3034         hci_dev_unlock(hdev);
3035         return 0;
3036 }
3037
3038 int __hci_req_hci_power_on(struct hci_dev *hdev)
3039 {
3040         /* Register the available SMP channels (BR/EDR and LE) only when
3041          * successfully powering on the controller. This late
3042          * registration is required so that LE SMP can clearly decide if
3043          * the public address or static address is used.
3044          */
3045         smp_register(hdev);
3046
3047         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3048                               NULL);
3049 }
3050
3051 void hci_request_setup(struct hci_dev *hdev)
3052 {
3053         INIT_WORK(&hdev->discov_update, discov_update);
3054         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3055         INIT_WORK(&hdev->scan_update, scan_update_work);
3056         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3057         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3058         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3059         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3060         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3061         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3062 }
3063
3064 void hci_request_cancel_all(struct hci_dev *hdev)
3065 {
3066         hci_req_sync_cancel(hdev, ENODEV);
3067
3068         cancel_work_sync(&hdev->discov_update);
3069         cancel_work_sync(&hdev->bg_scan_update);
3070         cancel_work_sync(&hdev->scan_update);
3071         cancel_work_sync(&hdev->connectable_update);
3072         cancel_work_sync(&hdev->discoverable_update);
3073         cancel_delayed_work_sync(&hdev->discov_off);
3074         cancel_delayed_work_sync(&hdev->le_scan_disable);
3075         cancel_delayed_work_sync(&hdev->le_scan_restart);
3076
3077         if (hdev->adv_instance_timeout) {
3078                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3079                 hdev->adv_instance_timeout = 0;
3080         }
3081 }