Bluetooth: use configured default params for active scans
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 void hci_req_purge(struct hci_request *req)
45 {
46         skb_queue_purge(&req->cmd_q);
47 }
48
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51         return hdev->req_status == HCI_REQ_PEND;
52 }
53
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55                    hci_req_complete_skb_t complete_skb)
56 {
57         struct hci_dev *hdev = req->hdev;
58         struct sk_buff *skb;
59         unsigned long flags;
60
61         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63         /* If an error occurred during request building, remove all HCI
64          * commands queued on the HCI request queue.
65          */
66         if (req->err) {
67                 skb_queue_purge(&req->cmd_q);
68                 return req->err;
69         }
70
71         /* Do not allow empty requests */
72         if (skb_queue_empty(&req->cmd_q))
73                 return -ENODATA;
74
75         skb = skb_peek_tail(&req->cmd_q);
76         if (complete) {
77                 bt_cb(skb)->hci.req_complete = complete;
78         } else if (complete_skb) {
79                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81         }
82
83         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87         queue_work(hdev->workqueue, &hdev->cmd_work);
88
89         return 0;
90 }
91
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94         return req_run(req, complete, NULL);
95 }
96
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99         return req_run(req, NULL, complete);
100 }
101
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103                                   struct sk_buff *skb)
104 {
105         BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 if (skb)
111                         hdev->req_skb = skb_get(skb);
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118         BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120         if (hdev->req_status == HCI_REQ_PEND) {
121                 hdev->req_result = err;
122                 hdev->req_status = HCI_REQ_CANCELED;
123                 wake_up_interruptible(&hdev->req_wait_q);
124         }
125 }
126
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128                                   const void *param, u8 event, u32 timeout)
129 {
130         struct hci_request req;
131         struct sk_buff *skb;
132         int err = 0;
133
134         BT_DBG("%s", hdev->name);
135
136         hci_req_init(&req, hdev);
137
138         hci_req_add_ev(&req, opcode, plen, param, event);
139
140         hdev->req_status = HCI_REQ_PEND;
141
142         err = hci_req_run_skb(&req, hci_req_sync_complete);
143         if (err < 0)
144                 return ERR_PTR(err);
145
146         err = wait_event_interruptible_timeout(hdev->req_wait_q,
147                         hdev->req_status != HCI_REQ_PEND, timeout);
148
149         if (err == -ERESTARTSYS)
150                 return ERR_PTR(-EINTR);
151
152         switch (hdev->req_status) {
153         case HCI_REQ_DONE:
154                 err = -bt_to_errno(hdev->req_result);
155                 break;
156
157         case HCI_REQ_CANCELED:
158                 err = -hdev->req_result;
159                 break;
160
161         default:
162                 err = -ETIMEDOUT;
163                 break;
164         }
165
166         hdev->req_status = hdev->req_result = 0;
167         skb = hdev->req_skb;
168         hdev->req_skb = NULL;
169
170         BT_DBG("%s end: err %d", hdev->name, err);
171
172         if (err < 0) {
173                 kfree_skb(skb);
174                 return ERR_PTR(err);
175         }
176
177         if (!skb)
178                 return ERR_PTR(-ENODATA);
179
180         return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185                                const void *param, u32 timeout)
186 {
187         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193                                                      unsigned long opt),
194                    unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196         struct hci_request req;
197         int err = 0;
198
199         BT_DBG("%s start", hdev->name);
200
201         hci_req_init(&req, hdev);
202
203         hdev->req_status = HCI_REQ_PEND;
204
205         err = func(&req, opt);
206         if (err) {
207                 if (hci_status)
208                         *hci_status = HCI_ERROR_UNSPECIFIED;
209                 return err;
210         }
211
212         err = hci_req_run_skb(&req, hci_req_sync_complete);
213         if (err < 0) {
214                 hdev->req_status = 0;
215
216                 /* ENODATA means the HCI request command queue is empty.
217                  * This can happen when a request with conditionals doesn't
218                  * trigger any commands to be sent. This is normal behavior
219                  * and should not trigger an error return.
220                  */
221                 if (err == -ENODATA) {
222                         if (hci_status)
223                                 *hci_status = 0;
224                         return 0;
225                 }
226
227                 if (hci_status)
228                         *hci_status = HCI_ERROR_UNSPECIFIED;
229
230                 return err;
231         }
232
233         err = wait_event_interruptible_timeout(hdev->req_wait_q,
234                         hdev->req_status != HCI_REQ_PEND, timeout);
235
236         if (err == -ERESTARTSYS)
237                 return -EINTR;
238
239         switch (hdev->req_status) {
240         case HCI_REQ_DONE:
241                 err = -bt_to_errno(hdev->req_result);
242                 if (hci_status)
243                         *hci_status = hdev->req_result;
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 if (hci_status)
249                         *hci_status = HCI_ERROR_UNSPECIFIED;
250                 break;
251
252         default:
253                 err = -ETIMEDOUT;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257         }
258
259         kfree_skb(hdev->req_skb);
260         hdev->req_skb = NULL;
261         hdev->req_status = hdev->req_result = 0;
262
263         BT_DBG("%s end: err %d", hdev->name, err);
264
265         return err;
266 }
267
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269                                                   unsigned long opt),
270                  unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272         int ret;
273
274         if (!test_bit(HCI_UP, &hdev->flags))
275                 return -ENETDOWN;
276
277         /* Serialize all requests */
278         hci_req_sync_lock(hdev);
279         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280         hci_req_sync_unlock(hdev);
281
282         return ret;
283 }
284
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286                                 const void *param)
287 {
288         int len = HCI_COMMAND_HDR_SIZE + plen;
289         struct hci_command_hdr *hdr;
290         struct sk_buff *skb;
291
292         skb = bt_skb_alloc(len, GFP_ATOMIC);
293         if (!skb)
294                 return NULL;
295
296         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297         hdr->opcode = cpu_to_le16(opcode);
298         hdr->plen   = plen;
299
300         if (plen)
301                 skb_put_data(skb, param, plen);
302
303         BT_DBG("skb len %d", skb->len);
304
305         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306         hci_skb_opcode(skb) = opcode;
307
308         return skb;
309 }
310
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313                     const void *param, u8 event)
314 {
315         struct hci_dev *hdev = req->hdev;
316         struct sk_buff *skb;
317
318         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320         /* If an error occurred during request building, there is no point in
321          * queueing the HCI command. We can simply return.
322          */
323         if (req->err)
324                 return;
325
326         skb = hci_prepare_cmd(hdev, opcode, plen, param);
327         if (!skb) {
328                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329                            opcode);
330                 req->err = -ENOMEM;
331                 return;
332         }
333
334         if (skb_queue_empty(&req->cmd_q))
335                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337         bt_cb(skb)->hci.req_event = event;
338
339         skb_queue_tail(&req->cmd_q, skb);
340 }
341
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343                  const void *param)
344 {
345         hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350         struct hci_dev *hdev = req->hdev;
351         struct hci_cp_write_page_scan_activity acp;
352         u8 type;
353
354         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355                 return;
356
357         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358                 return;
359
360         if (enable) {
361                 type = PAGE_SCAN_TYPE_INTERLACED;
362
363                 /* 160 msec page scan interval */
364                 acp.interval = cpu_to_le16(0x0100);
365         } else {
366                 type = hdev->def_page_scan_type;
367                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
368         }
369
370         acp.window = cpu_to_le16(hdev->def_page_scan_window);
371
372         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373             __cpu_to_le16(hdev->page_scan_window) != acp.window)
374                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375                             sizeof(acp), &acp);
376
377         if (hdev->page_scan_type != type)
378                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379 }
380
381 /* This function controls the background scanning based on hdev->pend_le_conns
382  * list. If there are pending LE connection we start the background scanning,
383  * otherwise we stop it.
384  *
385  * This function requires the caller holds hdev->lock.
386  */
387 static void __hci_update_background_scan(struct hci_request *req)
388 {
389         struct hci_dev *hdev = req->hdev;
390
391         if (!test_bit(HCI_UP, &hdev->flags) ||
392             test_bit(HCI_INIT, &hdev->flags) ||
393             hci_dev_test_flag(hdev, HCI_SETUP) ||
394             hci_dev_test_flag(hdev, HCI_CONFIG) ||
395             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396             hci_dev_test_flag(hdev, HCI_UNREGISTER))
397                 return;
398
399         /* No point in doing scanning if LE support hasn't been enabled */
400         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401                 return;
402
403         /* If discovery is active don't interfere with it */
404         if (hdev->discovery.state != DISCOVERY_STOPPED)
405                 return;
406
407         /* Reset RSSI and UUID filters when starting background scanning
408          * since these filters are meant for service discovery only.
409          *
410          * The Start Discovery and Start Service Discovery operations
411          * ensure to set proper values for RSSI threshold and UUID
412          * filter list. So it is safe to just reset them here.
413          */
414         hci_discovery_filter_clear(hdev);
415
416         BT_DBG("%s ADV monitoring is %s", hdev->name,
417                hci_is_adv_monitoring(hdev) ? "on" : "off");
418
419         if (list_empty(&hdev->pend_le_conns) &&
420             list_empty(&hdev->pend_le_reports) &&
421             !hci_is_adv_monitoring(hdev)) {
422                 /* If there is no pending LE connections or devices
423                  * to be scanned for or no ADV monitors, we should stop the
424                  * background scanning.
425                  */
426
427                 /* If controller is not scanning we are done. */
428                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429                         return;
430
431                 hci_req_add_le_scan_disable(req);
432
433                 BT_DBG("%s stopping background scanning", hdev->name);
434         } else {
435                 /* If there is at least one pending LE connection, we should
436                  * keep the background scan running.
437                  */
438
439                 /* If controller is connecting, we should not start scanning
440                  * since some controllers are not able to scan and connect at
441                  * the same time.
442                  */
443                 if (hci_lookup_le_connect(hdev))
444                         return;
445
446                 /* If controller is currently scanning, we stop it to ensure we
447                  * don't miss any advertising (due to duplicates filter).
448                  */
449                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450                         hci_req_add_le_scan_disable(req);
451
452                 hci_req_add_le_passive_scan(req);
453
454                 BT_DBG("%s starting background scanning", hdev->name);
455         }
456 }
457
458 void __hci_req_update_name(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461         struct hci_cp_write_local_name cp;
462
463         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464
465         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466 }
467
468 #define PNP_INFO_SVCLASS_ID             0x1200
469
470 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471 {
472         u8 *ptr = data, *uuids_start = NULL;
473         struct bt_uuid *uuid;
474
475         if (len < 4)
476                 return ptr;
477
478         list_for_each_entry(uuid, &hdev->uuids, list) {
479                 u16 uuid16;
480
481                 if (uuid->size != 16)
482                         continue;
483
484                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485                 if (uuid16 < 0x1100)
486                         continue;
487
488                 if (uuid16 == PNP_INFO_SVCLASS_ID)
489                         continue;
490
491                 if (!uuids_start) {
492                         uuids_start = ptr;
493                         uuids_start[0] = 1;
494                         uuids_start[1] = EIR_UUID16_ALL;
495                         ptr += 2;
496                 }
497
498                 /* Stop if not enough space to put next UUID */
499                 if ((ptr - data) + sizeof(u16) > len) {
500                         uuids_start[1] = EIR_UUID16_SOME;
501                         break;
502                 }
503
504                 *ptr++ = (uuid16 & 0x00ff);
505                 *ptr++ = (uuid16 & 0xff00) >> 8;
506                 uuids_start[0] += sizeof(uuid16);
507         }
508
509         return ptr;
510 }
511
512 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513 {
514         u8 *ptr = data, *uuids_start = NULL;
515         struct bt_uuid *uuid;
516
517         if (len < 6)
518                 return ptr;
519
520         list_for_each_entry(uuid, &hdev->uuids, list) {
521                 if (uuid->size != 32)
522                         continue;
523
524                 if (!uuids_start) {
525                         uuids_start = ptr;
526                         uuids_start[0] = 1;
527                         uuids_start[1] = EIR_UUID32_ALL;
528                         ptr += 2;
529                 }
530
531                 /* Stop if not enough space to put next UUID */
532                 if ((ptr - data) + sizeof(u32) > len) {
533                         uuids_start[1] = EIR_UUID32_SOME;
534                         break;
535                 }
536
537                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538                 ptr += sizeof(u32);
539                 uuids_start[0] += sizeof(u32);
540         }
541
542         return ptr;
543 }
544
545 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546 {
547         u8 *ptr = data, *uuids_start = NULL;
548         struct bt_uuid *uuid;
549
550         if (len < 18)
551                 return ptr;
552
553         list_for_each_entry(uuid, &hdev->uuids, list) {
554                 if (uuid->size != 128)
555                         continue;
556
557                 if (!uuids_start) {
558                         uuids_start = ptr;
559                         uuids_start[0] = 1;
560                         uuids_start[1] = EIR_UUID128_ALL;
561                         ptr += 2;
562                 }
563
564                 /* Stop if not enough space to put next UUID */
565                 if ((ptr - data) + 16 > len) {
566                         uuids_start[1] = EIR_UUID128_SOME;
567                         break;
568                 }
569
570                 memcpy(ptr, uuid->uuid, 16);
571                 ptr += 16;
572                 uuids_start[0] += 16;
573         }
574
575         return ptr;
576 }
577
578 static void create_eir(struct hci_dev *hdev, u8 *data)
579 {
580         u8 *ptr = data;
581         size_t name_len;
582
583         name_len = strlen(hdev->dev_name);
584
585         if (name_len > 0) {
586                 /* EIR Data type */
587                 if (name_len > 48) {
588                         name_len = 48;
589                         ptr[1] = EIR_NAME_SHORT;
590                 } else
591                         ptr[1] = EIR_NAME_COMPLETE;
592
593                 /* EIR Data length */
594                 ptr[0] = name_len + 1;
595
596                 memcpy(ptr + 2, hdev->dev_name, name_len);
597
598                 ptr += (name_len + 2);
599         }
600
601         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602                 ptr[0] = 2;
603                 ptr[1] = EIR_TX_POWER;
604                 ptr[2] = (u8) hdev->inq_tx_power;
605
606                 ptr += 3;
607         }
608
609         if (hdev->devid_source > 0) {
610                 ptr[0] = 9;
611                 ptr[1] = EIR_DEVICE_ID;
612
613                 put_unaligned_le16(hdev->devid_source, ptr + 2);
614                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615                 put_unaligned_le16(hdev->devid_product, ptr + 6);
616                 put_unaligned_le16(hdev->devid_version, ptr + 8);
617
618                 ptr += 10;
619         }
620
621         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 }
625
626 void __hci_req_update_eir(struct hci_request *req)
627 {
628         struct hci_dev *hdev = req->hdev;
629         struct hci_cp_write_eir cp;
630
631         if (!hdev_is_powered(hdev))
632                 return;
633
634         if (!lmp_ext_inq_capable(hdev))
635                 return;
636
637         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638                 return;
639
640         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641                 return;
642
643         memset(&cp, 0, sizeof(cp));
644
645         create_eir(hdev, cp.data);
646
647         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648                 return;
649
650         memcpy(hdev->eir, cp.data, sizeof(cp.data));
651
652         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653 }
654
655 void hci_req_add_le_scan_disable(struct hci_request *req)
656 {
657         struct hci_dev *hdev = req->hdev;
658
659         if (hdev->scanning_paused) {
660                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
661                 return;
662         }
663
664         if (use_ext_scan(hdev)) {
665                 struct hci_cp_le_set_ext_scan_enable cp;
666
667                 memset(&cp, 0, sizeof(cp));
668                 cp.enable = LE_SCAN_DISABLE;
669                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670                             &cp);
671         } else {
672                 struct hci_cp_le_set_scan_enable cp;
673
674                 memset(&cp, 0, sizeof(cp));
675                 cp.enable = LE_SCAN_DISABLE;
676                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677         }
678 }
679
680 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
681                                 u8 bdaddr_type)
682 {
683         struct hci_cp_le_del_from_white_list cp;
684
685         cp.bdaddr_type = bdaddr_type;
686         bacpy(&cp.bdaddr, bdaddr);
687
688         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
689                    cp.bdaddr_type);
690         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
691 }
692
693 /* Adds connection to white list if needed. On error, returns -1. */
694 static int add_to_white_list(struct hci_request *req,
695                              struct hci_conn_params *params, u8 *num_entries,
696                              bool allow_rpa)
697 {
698         struct hci_cp_le_add_to_white_list cp;
699         struct hci_dev *hdev = req->hdev;
700
701         /* Already in white list */
702         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
703                                    params->addr_type))
704                 return 0;
705
706         /* Select filter policy to accept all advertising */
707         if (*num_entries >= hdev->le_white_list_size)
708                 return -1;
709
710         /* White list can not be used with RPAs */
711         if (!allow_rpa &&
712             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
713                 return -1;
714         }
715
716         /* During suspend, only wakeable devices can be in whitelist */
717         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
718                                                    params->current_flags))
719                 return 0;
720
721         *num_entries += 1;
722         cp.bdaddr_type = params->addr_type;
723         bacpy(&cp.bdaddr, &params->addr);
724
725         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
726                    cp.bdaddr_type);
727         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
728
729         return 0;
730 }
731
732 static u8 update_white_list(struct hci_request *req)
733 {
734         struct hci_dev *hdev = req->hdev;
735         struct hci_conn_params *params;
736         struct bdaddr_list *b;
737         u8 num_entries = 0;
738         bool pend_conn, pend_report;
739         /* We allow whitelisting even with RPAs in suspend. In the worst case,
740          * we won't be able to wake from devices that use the privacy1.2
741          * features. Additionally, once we support privacy1.2 and IRK
742          * offloading, we can update this to also check for those conditions.
743          */
744         bool allow_rpa = hdev->suspended;
745
746         /* Go through the current white list programmed into the
747          * controller one by one and check if that address is still
748          * in the list of pending connections or list of devices to
749          * report. If not present in either list, then queue the
750          * command to remove it from the controller.
751          */
752         list_for_each_entry(b, &hdev->le_white_list, list) {
753                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
754                                                       &b->bdaddr,
755                                                       b->bdaddr_type);
756                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
757                                                         &b->bdaddr,
758                                                         b->bdaddr_type);
759
760                 /* If the device is not likely to connect or report,
761                  * remove it from the whitelist.
762                  */
763                 if (!pend_conn && !pend_report) {
764                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
765                         continue;
766                 }
767
768                 /* White list can not be used with RPAs */
769                 if (!allow_rpa &&
770                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
771                         return 0x00;
772                 }
773
774                 num_entries++;
775         }
776
777         /* Since all no longer valid white list entries have been
778          * removed, walk through the list of pending connections
779          * and ensure that any new device gets programmed into
780          * the controller.
781          *
782          * If the list of the devices is larger than the list of
783          * available white list entries in the controller, then
784          * just abort and return filer policy value to not use the
785          * white list.
786          */
787         list_for_each_entry(params, &hdev->pend_le_conns, action) {
788                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
789                         return 0x00;
790         }
791
792         /* After adding all new pending connections, walk through
793          * the list of pending reports and also add these to the
794          * white list if there is still space. Abort if space runs out.
795          */
796         list_for_each_entry(params, &hdev->pend_le_reports, action) {
797                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
798                         return 0x00;
799         }
800
801         /* Once the controller offloading of advertisement monitor is in place,
802          * the if condition should include the support of MSFT extension
803          * support.
804          */
805         if (!idr_is_empty(&hdev->adv_monitors_idr))
806                 return 0x00;
807
808         /* Select filter policy to use white list */
809         return 0x01;
810 }
811
812 static bool scan_use_rpa(struct hci_dev *hdev)
813 {
814         return hci_dev_test_flag(hdev, HCI_PRIVACY);
815 }
816
817 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
818                                u16 window, u8 own_addr_type, u8 filter_policy)
819 {
820         struct hci_dev *hdev = req->hdev;
821
822         if (hdev->scanning_paused) {
823                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
824                 return;
825         }
826
827         /* Use ext scanning if set ext scan param and ext scan enable is
828          * supported
829          */
830         if (use_ext_scan(hdev)) {
831                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
832                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
833                 struct hci_cp_le_scan_phy_params *phy_params;
834                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
835                 u32 plen;
836
837                 ext_param_cp = (void *)data;
838                 phy_params = (void *)ext_param_cp->data;
839
840                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
841                 ext_param_cp->own_addr_type = own_addr_type;
842                 ext_param_cp->filter_policy = filter_policy;
843
844                 plen = sizeof(*ext_param_cp);
845
846                 if (scan_1m(hdev) || scan_2m(hdev)) {
847                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
848
849                         memset(phy_params, 0, sizeof(*phy_params));
850                         phy_params->type = type;
851                         phy_params->interval = cpu_to_le16(interval);
852                         phy_params->window = cpu_to_le16(window);
853
854                         plen += sizeof(*phy_params);
855                         phy_params++;
856                 }
857
858                 if (scan_coded(hdev)) {
859                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
860
861                         memset(phy_params, 0, sizeof(*phy_params));
862                         phy_params->type = type;
863                         phy_params->interval = cpu_to_le16(interval);
864                         phy_params->window = cpu_to_le16(window);
865
866                         plen += sizeof(*phy_params);
867                         phy_params++;
868                 }
869
870                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
871                             plen, ext_param_cp);
872
873                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
874                 ext_enable_cp.enable = LE_SCAN_ENABLE;
875                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
876
877                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
878                             sizeof(ext_enable_cp), &ext_enable_cp);
879         } else {
880                 struct hci_cp_le_set_scan_param param_cp;
881                 struct hci_cp_le_set_scan_enable enable_cp;
882
883                 memset(&param_cp, 0, sizeof(param_cp));
884                 param_cp.type = type;
885                 param_cp.interval = cpu_to_le16(interval);
886                 param_cp.window = cpu_to_le16(window);
887                 param_cp.own_address_type = own_addr_type;
888                 param_cp.filter_policy = filter_policy;
889                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
890                             &param_cp);
891
892                 memset(&enable_cp, 0, sizeof(enable_cp));
893                 enable_cp.enable = LE_SCAN_ENABLE;
894                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
895                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
896                             &enable_cp);
897         }
898 }
899
900 void hci_req_add_le_passive_scan(struct hci_request *req)
901 {
902         struct hci_dev *hdev = req->hdev;
903         u8 own_addr_type;
904         u8 filter_policy;
905         u16 window, interval;
906
907         if (hdev->scanning_paused) {
908                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
909                 return;
910         }
911
912         /* Set require_privacy to false since no SCAN_REQ are send
913          * during passive scanning. Not using an non-resolvable address
914          * here is important so that peer devices using direct
915          * advertising with our address will be correctly reported
916          * by the controller.
917          */
918         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
919                                       &own_addr_type))
920                 return;
921
922         /* Adding or removing entries from the white list must
923          * happen before enabling scanning. The controller does
924          * not allow white list modification while scanning.
925          */
926         filter_policy = update_white_list(req);
927
928         /* When the controller is using random resolvable addresses and
929          * with that having LE privacy enabled, then controllers with
930          * Extended Scanner Filter Policies support can now enable support
931          * for handling directed advertising.
932          *
933          * So instead of using filter polices 0x00 (no whitelist)
934          * and 0x01 (whitelist enabled) use the new filter policies
935          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
936          */
937         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
938             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
939                 filter_policy |= 0x02;
940
941         if (hdev->suspended) {
942                 window = hdev->le_scan_window_suspend;
943                 interval = hdev->le_scan_int_suspend;
944         } else {
945                 window = hdev->le_scan_window;
946                 interval = hdev->le_scan_interval;
947         }
948
949         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
950         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
951                            own_addr_type, filter_policy);
952 }
953
954 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
955 {
956         struct adv_info *adv_instance;
957
958         /* Instance 0x00 always set local name */
959         if (instance == 0x00)
960                 return 1;
961
962         adv_instance = hci_find_adv_instance(hdev, instance);
963         if (!adv_instance)
964                 return 0;
965
966         /* TODO: Take into account the "appearance" and "local-name" flags here.
967          * These are currently being ignored as they are not supported.
968          */
969         return adv_instance->scan_rsp_len;
970 }
971
972 static void hci_req_clear_event_filter(struct hci_request *req)
973 {
974         struct hci_cp_set_event_filter f;
975
976         memset(&f, 0, sizeof(f));
977         f.flt_type = HCI_FLT_CLEAR_ALL;
978         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
979
980         /* Update page scan state (since we may have modified it when setting
981          * the event filter).
982          */
983         __hci_req_update_scan(req);
984 }
985
986 static void hci_req_set_event_filter(struct hci_request *req)
987 {
988         struct bdaddr_list_with_flags *b;
989         struct hci_cp_set_event_filter f;
990         struct hci_dev *hdev = req->hdev;
991         u8 scan = SCAN_DISABLED;
992
993         /* Always clear event filter when starting */
994         hci_req_clear_event_filter(req);
995
996         list_for_each_entry(b, &hdev->whitelist, list) {
997                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
998                                         b->current_flags))
999                         continue;
1000
1001                 memset(&f, 0, sizeof(f));
1002                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1003                 f.flt_type = HCI_FLT_CONN_SETUP;
1004                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1005                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1006
1007                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1008                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1009                 scan = SCAN_PAGE;
1010         }
1011
1012         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1013 }
1014
1015 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1016 {
1017         /* Before changing params disable scan if enabled */
1018         if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1019                 hci_req_add_le_scan_disable(req);
1020
1021         /* Configure params and enable scanning */
1022         hci_req_add_le_passive_scan(req);
1023
1024         /* Block suspend notifier on response */
1025         set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1026 }
1027
1028 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1029 {
1030         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1031                    status);
1032         if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1033             test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1034                 wake_up(&hdev->suspend_wait_q);
1035         }
1036 }
1037
1038 /* Call with hci_dev_lock */
1039 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1040 {
1041         int old_state;
1042         struct hci_conn *conn;
1043         struct hci_request req;
1044         u8 page_scan;
1045         int disconnect_counter;
1046
1047         if (next == hdev->suspend_state) {
1048                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1049                 goto done;
1050         }
1051
1052         hdev->suspend_state = next;
1053         hci_req_init(&req, hdev);
1054
1055         if (next == BT_SUSPEND_DISCONNECT) {
1056                 /* Mark device as suspended */
1057                 hdev->suspended = true;
1058
1059                 /* Pause discovery if not already stopped */
1060                 old_state = hdev->discovery.state;
1061                 if (old_state != DISCOVERY_STOPPED) {
1062                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1063                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1064                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1065                 }
1066
1067                 hdev->discovery_paused = true;
1068                 hdev->discovery_old_state = old_state;
1069
1070                 /* Stop advertising */
1071                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1072                 if (old_state) {
1073                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1074                         cancel_delayed_work(&hdev->discov_off);
1075                         queue_delayed_work(hdev->req_workqueue,
1076                                            &hdev->discov_off, 0);
1077                 }
1078
1079                 hdev->advertising_paused = true;
1080                 hdev->advertising_old_state = old_state;
1081                 /* Disable page scan */
1082                 page_scan = SCAN_DISABLED;
1083                 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1084
1085                 /* Disable LE passive scan if enabled */
1086                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1087                         hci_req_add_le_scan_disable(&req);
1088
1089                 /* Mark task needing completion */
1090                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1091
1092                 /* Prevent disconnects from causing scanning to be re-enabled */
1093                 hdev->scanning_paused = true;
1094
1095                 /* Run commands before disconnecting */
1096                 hci_req_run(&req, suspend_req_complete);
1097
1098                 disconnect_counter = 0;
1099                 /* Soft disconnect everything (power off) */
1100                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1101                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1102                         disconnect_counter++;
1103                 }
1104
1105                 if (disconnect_counter > 0) {
1106                         bt_dev_dbg(hdev,
1107                                    "Had %d disconnects. Will wait on them",
1108                                    disconnect_counter);
1109                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1110                 }
1111         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1112                 /* Unpause to take care of updating scanning params */
1113                 hdev->scanning_paused = false;
1114                 /* Enable event filter for paired devices */
1115                 hci_req_set_event_filter(&req);
1116                 /* Enable passive scan at lower duty cycle */
1117                 hci_req_config_le_suspend_scan(&req);
1118                 /* Pause scan changes again. */
1119                 hdev->scanning_paused = true;
1120                 hci_req_run(&req, suspend_req_complete);
1121         } else {
1122                 hdev->suspended = false;
1123                 hdev->scanning_paused = false;
1124
1125                 hci_req_clear_event_filter(&req);
1126                 /* Reset passive/background scanning to normal */
1127                 hci_req_config_le_suspend_scan(&req);
1128
1129                 /* Unpause advertising */
1130                 hdev->advertising_paused = false;
1131                 if (hdev->advertising_old_state) {
1132                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1133                                 hdev->suspend_tasks);
1134                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1135                         queue_work(hdev->req_workqueue,
1136                                    &hdev->discoverable_update);
1137                         hdev->advertising_old_state = 0;
1138                 }
1139
1140                 /* Unpause discovery */
1141                 hdev->discovery_paused = false;
1142                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1143                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1144                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1145                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1146                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1147                 }
1148
1149                 hci_req_run(&req, suspend_req_complete);
1150         }
1151
1152         hdev->suspend_state = next;
1153
1154 done:
1155         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1156         wake_up(&hdev->suspend_wait_q);
1157 }
1158
1159 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1160 {
1161         u8 instance = hdev->cur_adv_instance;
1162         struct adv_info *adv_instance;
1163
1164         /* Instance 0x00 always set local name */
1165         if (instance == 0x00)
1166                 return 1;
1167
1168         adv_instance = hci_find_adv_instance(hdev, instance);
1169         if (!adv_instance)
1170                 return 0;
1171
1172         /* TODO: Take into account the "appearance" and "local-name" flags here.
1173          * These are currently being ignored as they are not supported.
1174          */
1175         return adv_instance->scan_rsp_len;
1176 }
1177
1178 void __hci_req_disable_advertising(struct hci_request *req)
1179 {
1180         if (ext_adv_capable(req->hdev)) {
1181                 struct hci_cp_le_set_ext_adv_enable cp;
1182
1183                 cp.enable = 0x00;
1184                 /* Disable all sets since we only support one set at the moment */
1185                 cp.num_of_sets = 0x00;
1186
1187                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
1188         } else {
1189                 u8 enable = 0x00;
1190
1191                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1192         }
1193 }
1194
1195 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1196 {
1197         u32 flags;
1198         struct adv_info *adv_instance;
1199
1200         if (instance == 0x00) {
1201                 /* Instance 0 always manages the "Tx Power" and "Flags"
1202                  * fields
1203                  */
1204                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1205
1206                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1207                  * corresponds to the "connectable" instance flag.
1208                  */
1209                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1210                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1211
1212                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1213                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1214                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1215                         flags |= MGMT_ADV_FLAG_DISCOV;
1216
1217                 return flags;
1218         }
1219
1220         adv_instance = hci_find_adv_instance(hdev, instance);
1221
1222         /* Return 0 when we got an invalid instance identifier. */
1223         if (!adv_instance)
1224                 return 0;
1225
1226         return adv_instance->flags;
1227 }
1228
1229 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1230 {
1231         /* If privacy is not enabled don't use RPA */
1232         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1233                 return false;
1234
1235         /* If basic privacy mode is enabled use RPA */
1236         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1237                 return true;
1238
1239         /* If limited privacy mode is enabled don't use RPA if we're
1240          * both discoverable and bondable.
1241          */
1242         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1243             hci_dev_test_flag(hdev, HCI_BONDABLE))
1244                 return false;
1245
1246         /* We're neither bondable nor discoverable in the limited
1247          * privacy mode, therefore use RPA.
1248          */
1249         return true;
1250 }
1251
1252 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1253 {
1254         /* If there is no connection we are OK to advertise. */
1255         if (hci_conn_num(hdev, LE_LINK) == 0)
1256                 return true;
1257
1258         /* Check le_states if there is any connection in slave role. */
1259         if (hdev->conn_hash.le_num_slave > 0) {
1260                 /* Slave connection state and non connectable mode bit 20. */
1261                 if (!connectable && !(hdev->le_states[2] & 0x10))
1262                         return false;
1263
1264                 /* Slave connection state and connectable mode bit 38
1265                  * and scannable bit 21.
1266                  */
1267                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1268                                     !(hdev->le_states[2] & 0x20)))
1269                         return false;
1270         }
1271
1272         /* Check le_states if there is any connection in master role. */
1273         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1274                 /* Master connection state and non connectable mode bit 18. */
1275                 if (!connectable && !(hdev->le_states[2] & 0x02))
1276                         return false;
1277
1278                 /* Master connection state and connectable mode bit 35 and
1279                  * scannable 19.
1280                  */
1281                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1282                                     !(hdev->le_states[2] & 0x08)))
1283                         return false;
1284         }
1285
1286         return true;
1287 }
1288
1289 void __hci_req_enable_advertising(struct hci_request *req)
1290 {
1291         struct hci_dev *hdev = req->hdev;
1292         struct hci_cp_le_set_adv_param cp;
1293         u8 own_addr_type, enable = 0x01;
1294         bool connectable;
1295         u16 adv_min_interval, adv_max_interval;
1296         u32 flags;
1297
1298         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1299
1300         /* If the "connectable" instance flag was not set, then choose between
1301          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1302          */
1303         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1304                       mgmt_get_connectable(hdev);
1305
1306         if (!is_advertising_allowed(hdev, connectable))
1307                 return;
1308
1309         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1310                 __hci_req_disable_advertising(req);
1311
1312         /* Clear the HCI_LE_ADV bit temporarily so that the
1313          * hci_update_random_address knows that it's safe to go ahead
1314          * and write a new random address. The flag will be set back on
1315          * as soon as the SET_ADV_ENABLE HCI command completes.
1316          */
1317         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1318
1319         /* Set require_privacy to true only when non-connectable
1320          * advertising is used. In that case it is fine to use a
1321          * non-resolvable private address.
1322          */
1323         if (hci_update_random_address(req, !connectable,
1324                                       adv_use_rpa(hdev, flags),
1325                                       &own_addr_type) < 0)
1326                 return;
1327
1328         memset(&cp, 0, sizeof(cp));
1329
1330         if (connectable) {
1331                 cp.type = LE_ADV_IND;
1332
1333                 adv_min_interval = hdev->le_adv_min_interval;
1334                 adv_max_interval = hdev->le_adv_max_interval;
1335         } else {
1336                 if (get_cur_adv_instance_scan_rsp_len(hdev))
1337                         cp.type = LE_ADV_SCAN_IND;
1338                 else
1339                         cp.type = LE_ADV_NONCONN_IND;
1340
1341                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1342                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1343                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1344                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1345                 } else {
1346                         adv_min_interval = hdev->le_adv_min_interval;
1347                         adv_max_interval = hdev->le_adv_max_interval;
1348                 }
1349         }
1350
1351         cp.min_interval = cpu_to_le16(adv_min_interval);
1352         cp.max_interval = cpu_to_le16(adv_max_interval);
1353         cp.own_address_type = own_addr_type;
1354         cp.channel_map = hdev->le_adv_channel_map;
1355
1356         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1357
1358         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1359 }
1360
1361 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1362 {
1363         size_t short_len;
1364         size_t complete_len;
1365
1366         /* no space left for name (+ NULL + type + len) */
1367         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1368                 return ad_len;
1369
1370         /* use complete name if present and fits */
1371         complete_len = strlen(hdev->dev_name);
1372         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1373                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1374                                        hdev->dev_name, complete_len + 1);
1375
1376         /* use short name if present */
1377         short_len = strlen(hdev->short_name);
1378         if (short_len)
1379                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1380                                        hdev->short_name, short_len + 1);
1381
1382         /* use shortened full name if present, we already know that name
1383          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1384          */
1385         if (complete_len) {
1386                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1387
1388                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1389                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1390
1391                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1392                                        sizeof(name));
1393         }
1394
1395         return ad_len;
1396 }
1397
1398 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1399 {
1400         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1401 }
1402
1403 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1404 {
1405         u8 scan_rsp_len = 0;
1406
1407         if (hdev->appearance) {
1408                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1409         }
1410
1411         return append_local_name(hdev, ptr, scan_rsp_len);
1412 }
1413
1414 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1415                                         u8 *ptr)
1416 {
1417         struct adv_info *adv_instance;
1418         u32 instance_flags;
1419         u8 scan_rsp_len = 0;
1420
1421         adv_instance = hci_find_adv_instance(hdev, instance);
1422         if (!adv_instance)
1423                 return 0;
1424
1425         instance_flags = adv_instance->flags;
1426
1427         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1428                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1429         }
1430
1431         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1432                adv_instance->scan_rsp_len);
1433
1434         scan_rsp_len += adv_instance->scan_rsp_len;
1435
1436         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1437                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1438
1439         return scan_rsp_len;
1440 }
1441
1442 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1443 {
1444         struct hci_dev *hdev = req->hdev;
1445         u8 len;
1446
1447         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1448                 return;
1449
1450         if (ext_adv_capable(hdev)) {
1451                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1452
1453                 memset(&cp, 0, sizeof(cp));
1454
1455                 if (instance)
1456                         len = create_instance_scan_rsp_data(hdev, instance,
1457                                                             cp.data);
1458                 else
1459                         len = create_default_scan_rsp_data(hdev, cp.data);
1460
1461                 if (hdev->scan_rsp_data_len == len &&
1462                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1463                         return;
1464
1465                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1466                 hdev->scan_rsp_data_len = len;
1467
1468                 cp.handle = instance;
1469                 cp.length = len;
1470                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1471                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1472
1473                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1474                             &cp);
1475         } else {
1476                 struct hci_cp_le_set_scan_rsp_data cp;
1477
1478                 memset(&cp, 0, sizeof(cp));
1479
1480                 if (instance)
1481                         len = create_instance_scan_rsp_data(hdev, instance,
1482                                                             cp.data);
1483                 else
1484                         len = create_default_scan_rsp_data(hdev, cp.data);
1485
1486                 if (hdev->scan_rsp_data_len == len &&
1487                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1488                         return;
1489
1490                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1491                 hdev->scan_rsp_data_len = len;
1492
1493                 cp.length = len;
1494
1495                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1496         }
1497 }
1498
1499 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1500 {
1501         struct adv_info *adv_instance = NULL;
1502         u8 ad_len = 0, flags = 0;
1503         u32 instance_flags;
1504
1505         /* Return 0 when the current instance identifier is invalid. */
1506         if (instance) {
1507                 adv_instance = hci_find_adv_instance(hdev, instance);
1508                 if (!adv_instance)
1509                         return 0;
1510         }
1511
1512         instance_flags = get_adv_instance_flags(hdev, instance);
1513
1514         /* If instance already has the flags set skip adding it once
1515          * again.
1516          */
1517         if (adv_instance && eir_get_data(adv_instance->adv_data,
1518                                          adv_instance->adv_data_len, EIR_FLAGS,
1519                                          NULL))
1520                 goto skip_flags;
1521
1522         /* The Add Advertising command allows userspace to set both the general
1523          * and limited discoverable flags.
1524          */
1525         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1526                 flags |= LE_AD_GENERAL;
1527
1528         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1529                 flags |= LE_AD_LIMITED;
1530
1531         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1532                 flags |= LE_AD_NO_BREDR;
1533
1534         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1535                 /* If a discovery flag wasn't provided, simply use the global
1536                  * settings.
1537                  */
1538                 if (!flags)
1539                         flags |= mgmt_get_adv_discov_flags(hdev);
1540
1541                 /* If flags would still be empty, then there is no need to
1542                  * include the "Flags" AD field".
1543                  */
1544                 if (flags) {
1545                         ptr[0] = 0x02;
1546                         ptr[1] = EIR_FLAGS;
1547                         ptr[2] = flags;
1548
1549                         ad_len += 3;
1550                         ptr += 3;
1551                 }
1552         }
1553
1554 skip_flags:
1555         if (adv_instance) {
1556                 memcpy(ptr, adv_instance->adv_data,
1557                        adv_instance->adv_data_len);
1558                 ad_len += adv_instance->adv_data_len;
1559                 ptr += adv_instance->adv_data_len;
1560         }
1561
1562         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1563                 s8 adv_tx_power;
1564
1565                 if (ext_adv_capable(hdev)) {
1566                         if (adv_instance)
1567                                 adv_tx_power = adv_instance->tx_power;
1568                         else
1569                                 adv_tx_power = hdev->adv_tx_power;
1570                 } else {
1571                         adv_tx_power = hdev->adv_tx_power;
1572                 }
1573
1574                 /* Provide Tx Power only if we can provide a valid value for it */
1575                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1576                         ptr[0] = 0x02;
1577                         ptr[1] = EIR_TX_POWER;
1578                         ptr[2] = (u8)adv_tx_power;
1579
1580                         ad_len += 3;
1581                         ptr += 3;
1582                 }
1583         }
1584
1585         return ad_len;
1586 }
1587
1588 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1589 {
1590         struct hci_dev *hdev = req->hdev;
1591         u8 len;
1592
1593         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1594                 return;
1595
1596         if (ext_adv_capable(hdev)) {
1597                 struct hci_cp_le_set_ext_adv_data cp;
1598
1599                 memset(&cp, 0, sizeof(cp));
1600
1601                 len = create_instance_adv_data(hdev, instance, cp.data);
1602
1603                 /* There's nothing to do if the data hasn't changed */
1604                 if (hdev->adv_data_len == len &&
1605                     memcmp(cp.data, hdev->adv_data, len) == 0)
1606                         return;
1607
1608                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1609                 hdev->adv_data_len = len;
1610
1611                 cp.length = len;
1612                 cp.handle = instance;
1613                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1614                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1615
1616                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1617         } else {
1618                 struct hci_cp_le_set_adv_data cp;
1619
1620                 memset(&cp, 0, sizeof(cp));
1621
1622                 len = create_instance_adv_data(hdev, instance, cp.data);
1623
1624                 /* There's nothing to do if the data hasn't changed */
1625                 if (hdev->adv_data_len == len &&
1626                     memcmp(cp.data, hdev->adv_data, len) == 0)
1627                         return;
1628
1629                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1630                 hdev->adv_data_len = len;
1631
1632                 cp.length = len;
1633
1634                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1635         }
1636 }
1637
1638 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1639 {
1640         struct hci_request req;
1641
1642         hci_req_init(&req, hdev);
1643         __hci_req_update_adv_data(&req, instance);
1644
1645         return hci_req_run(&req, NULL);
1646 }
1647
1648 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1649 {
1650         BT_DBG("%s status %u", hdev->name, status);
1651 }
1652
1653 void hci_req_reenable_advertising(struct hci_dev *hdev)
1654 {
1655         struct hci_request req;
1656
1657         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1658             list_empty(&hdev->adv_instances))
1659                 return;
1660
1661         hci_req_init(&req, hdev);
1662
1663         if (hdev->cur_adv_instance) {
1664                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1665                                                 true);
1666         } else {
1667                 if (ext_adv_capable(hdev)) {
1668                         __hci_req_start_ext_adv(&req, 0x00);
1669                 } else {
1670                         __hci_req_update_adv_data(&req, 0x00);
1671                         __hci_req_update_scan_rsp_data(&req, 0x00);
1672                         __hci_req_enable_advertising(&req);
1673                 }
1674         }
1675
1676         hci_req_run(&req, adv_enable_complete);
1677 }
1678
1679 static void adv_timeout_expire(struct work_struct *work)
1680 {
1681         struct hci_dev *hdev = container_of(work, struct hci_dev,
1682                                             adv_instance_expire.work);
1683
1684         struct hci_request req;
1685         u8 instance;
1686
1687         BT_DBG("%s", hdev->name);
1688
1689         hci_dev_lock(hdev);
1690
1691         hdev->adv_instance_timeout = 0;
1692
1693         instance = hdev->cur_adv_instance;
1694         if (instance == 0x00)
1695                 goto unlock;
1696
1697         hci_req_init(&req, hdev);
1698
1699         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1700
1701         if (list_empty(&hdev->adv_instances))
1702                 __hci_req_disable_advertising(&req);
1703
1704         hci_req_run(&req, NULL);
1705
1706 unlock:
1707         hci_dev_unlock(hdev);
1708 }
1709
1710 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1711                            bool use_rpa, struct adv_info *adv_instance,
1712                            u8 *own_addr_type, bdaddr_t *rand_addr)
1713 {
1714         int err;
1715
1716         bacpy(rand_addr, BDADDR_ANY);
1717
1718         /* If privacy is enabled use a resolvable private address. If
1719          * current RPA has expired then generate a new one.
1720          */
1721         if (use_rpa) {
1722                 int to;
1723
1724                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1725
1726                 if (adv_instance) {
1727                         if (!adv_instance->rpa_expired &&
1728                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
1729                                 return 0;
1730
1731                         adv_instance->rpa_expired = false;
1732                 } else {
1733                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1734                             !bacmp(&hdev->random_addr, &hdev->rpa))
1735                                 return 0;
1736                 }
1737
1738                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1739                 if (err < 0) {
1740                         bt_dev_err(hdev, "failed to generate new RPA");
1741                         return err;
1742                 }
1743
1744                 bacpy(rand_addr, &hdev->rpa);
1745
1746                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1747                 if (adv_instance)
1748                         queue_delayed_work(hdev->workqueue,
1749                                            &adv_instance->rpa_expired_cb, to);
1750                 else
1751                         queue_delayed_work(hdev->workqueue,
1752                                            &hdev->rpa_expired, to);
1753
1754                 return 0;
1755         }
1756
1757         /* In case of required privacy without resolvable private address,
1758          * use an non-resolvable private address. This is useful for
1759          * non-connectable advertising.
1760          */
1761         if (require_privacy) {
1762                 bdaddr_t nrpa;
1763
1764                 while (true) {
1765                         /* The non-resolvable private address is generated
1766                          * from random six bytes with the two most significant
1767                          * bits cleared.
1768                          */
1769                         get_random_bytes(&nrpa, 6);
1770                         nrpa.b[5] &= 0x3f;
1771
1772                         /* The non-resolvable private address shall not be
1773                          * equal to the public address.
1774                          */
1775                         if (bacmp(&hdev->bdaddr, &nrpa))
1776                                 break;
1777                 }
1778
1779                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1780                 bacpy(rand_addr, &nrpa);
1781
1782                 return 0;
1783         }
1784
1785         /* No privacy so use a public address. */
1786         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1787
1788         return 0;
1789 }
1790
1791 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1792 {
1793         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1794 }
1795
1796 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1797 {
1798         struct hci_cp_le_set_ext_adv_params cp;
1799         struct hci_dev *hdev = req->hdev;
1800         bool connectable;
1801         u32 flags;
1802         bdaddr_t random_addr;
1803         u8 own_addr_type;
1804         int err;
1805         struct adv_info *adv_instance;
1806         bool secondary_adv;
1807
1808         if (instance > 0) {
1809                 adv_instance = hci_find_adv_instance(hdev, instance);
1810                 if (!adv_instance)
1811                         return -EINVAL;
1812         } else {
1813                 adv_instance = NULL;
1814         }
1815
1816         flags = get_adv_instance_flags(hdev, instance);
1817
1818         /* If the "connectable" instance flag was not set, then choose between
1819          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1820          */
1821         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1822                       mgmt_get_connectable(hdev);
1823
1824         if (!is_advertising_allowed(hdev, connectable))
1825                 return -EPERM;
1826
1827         /* Set require_privacy to true only when non-connectable
1828          * advertising is used. In that case it is fine to use a
1829          * non-resolvable private address.
1830          */
1831         err = hci_get_random_address(hdev, !connectable,
1832                                      adv_use_rpa(hdev, flags), adv_instance,
1833                                      &own_addr_type, &random_addr);
1834         if (err < 0)
1835                 return err;
1836
1837         memset(&cp, 0, sizeof(cp));
1838
1839         /* In ext adv set param interval is 3 octets */
1840         hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1841         hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1842
1843         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1844
1845         if (connectable) {
1846                 if (secondary_adv)
1847                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1848                 else
1849                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1850         } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1851                 if (secondary_adv)
1852                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1853                 else
1854                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1855         } else {
1856                 if (secondary_adv)
1857                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1858                 else
1859                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1860         }
1861
1862         cp.own_addr_type = own_addr_type;
1863         cp.channel_map = hdev->le_adv_channel_map;
1864         cp.tx_power = 127;
1865         cp.handle = instance;
1866
1867         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1868                 cp.primary_phy = HCI_ADV_PHY_1M;
1869                 cp.secondary_phy = HCI_ADV_PHY_2M;
1870         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1871                 cp.primary_phy = HCI_ADV_PHY_CODED;
1872                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1873         } else {
1874                 /* In all other cases use 1M */
1875                 cp.primary_phy = HCI_ADV_PHY_1M;
1876                 cp.secondary_phy = HCI_ADV_PHY_1M;
1877         }
1878
1879         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1880
1881         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1882             bacmp(&random_addr, BDADDR_ANY)) {
1883                 struct hci_cp_le_set_adv_set_rand_addr cp;
1884
1885                 /* Check if random address need to be updated */
1886                 if (adv_instance) {
1887                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1888                                 return 0;
1889                 } else {
1890                         if (!bacmp(&random_addr, &hdev->random_addr))
1891                                 return 0;
1892                 }
1893
1894                 memset(&cp, 0, sizeof(cp));
1895
1896                 cp.handle = instance;
1897                 bacpy(&cp.bdaddr, &random_addr);
1898
1899                 hci_req_add(req,
1900                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1901                             sizeof(cp), &cp);
1902         }
1903
1904         return 0;
1905 }
1906
1907 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1908 {
1909         struct hci_dev *hdev = req->hdev;
1910         struct hci_cp_le_set_ext_adv_enable *cp;
1911         struct hci_cp_ext_adv_set *adv_set;
1912         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1913         struct adv_info *adv_instance;
1914
1915         if (instance > 0) {
1916                 adv_instance = hci_find_adv_instance(hdev, instance);
1917                 if (!adv_instance)
1918                         return -EINVAL;
1919         } else {
1920                 adv_instance = NULL;
1921         }
1922
1923         cp = (void *) data;
1924         adv_set = (void *) cp->data;
1925
1926         memset(cp, 0, sizeof(*cp));
1927
1928         cp->enable = 0x01;
1929         cp->num_of_sets = 0x01;
1930
1931         memset(adv_set, 0, sizeof(*adv_set));
1932
1933         adv_set->handle = instance;
1934
1935         /* Set duration per instance since controller is responsible for
1936          * scheduling it.
1937          */
1938         if (adv_instance && adv_instance->duration) {
1939                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1940
1941                 /* Time = N * 10 ms */
1942                 adv_set->duration = cpu_to_le16(duration / 10);
1943         }
1944
1945         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1946                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1947                     data);
1948
1949         return 0;
1950 }
1951
1952 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1953 {
1954         struct hci_dev *hdev = req->hdev;
1955         int err;
1956
1957         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1958                 __hci_req_disable_advertising(req);
1959
1960         err = __hci_req_setup_ext_adv_instance(req, instance);
1961         if (err < 0)
1962                 return err;
1963
1964         __hci_req_update_scan_rsp_data(req, instance);
1965         __hci_req_enable_ext_advertising(req, instance);
1966
1967         return 0;
1968 }
1969
1970 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1971                                     bool force)
1972 {
1973         struct hci_dev *hdev = req->hdev;
1974         struct adv_info *adv_instance = NULL;
1975         u16 timeout;
1976
1977         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1978             list_empty(&hdev->adv_instances))
1979                 return -EPERM;
1980
1981         if (hdev->adv_instance_timeout)
1982                 return -EBUSY;
1983
1984         adv_instance = hci_find_adv_instance(hdev, instance);
1985         if (!adv_instance)
1986                 return -ENOENT;
1987
1988         /* A zero timeout means unlimited advertising. As long as there is
1989          * only one instance, duration should be ignored. We still set a timeout
1990          * in case further instances are being added later on.
1991          *
1992          * If the remaining lifetime of the instance is more than the duration
1993          * then the timeout corresponds to the duration, otherwise it will be
1994          * reduced to the remaining instance lifetime.
1995          */
1996         if (adv_instance->timeout == 0 ||
1997             adv_instance->duration <= adv_instance->remaining_time)
1998                 timeout = adv_instance->duration;
1999         else
2000                 timeout = adv_instance->remaining_time;
2001
2002         /* The remaining time is being reduced unless the instance is being
2003          * advertised without time limit.
2004          */
2005         if (adv_instance->timeout)
2006                 adv_instance->remaining_time =
2007                                 adv_instance->remaining_time - timeout;
2008
2009         /* Only use work for scheduling instances with legacy advertising */
2010         if (!ext_adv_capable(hdev)) {
2011                 hdev->adv_instance_timeout = timeout;
2012                 queue_delayed_work(hdev->req_workqueue,
2013                            &hdev->adv_instance_expire,
2014                            msecs_to_jiffies(timeout * 1000));
2015         }
2016
2017         /* If we're just re-scheduling the same instance again then do not
2018          * execute any HCI commands. This happens when a single instance is
2019          * being advertised.
2020          */
2021         if (!force && hdev->cur_adv_instance == instance &&
2022             hci_dev_test_flag(hdev, HCI_LE_ADV))
2023                 return 0;
2024
2025         hdev->cur_adv_instance = instance;
2026         if (ext_adv_capable(hdev)) {
2027                 __hci_req_start_ext_adv(req, instance);
2028         } else {
2029                 __hci_req_update_adv_data(req, instance);
2030                 __hci_req_update_scan_rsp_data(req, instance);
2031                 __hci_req_enable_advertising(req);
2032         }
2033
2034         return 0;
2035 }
2036
2037 static void cancel_adv_timeout(struct hci_dev *hdev)
2038 {
2039         if (hdev->adv_instance_timeout) {
2040                 hdev->adv_instance_timeout = 0;
2041                 cancel_delayed_work(&hdev->adv_instance_expire);
2042         }
2043 }
2044
2045 /* For a single instance:
2046  * - force == true: The instance will be removed even when its remaining
2047  *   lifetime is not zero.
2048  * - force == false: the instance will be deactivated but kept stored unless
2049  *   the remaining lifetime is zero.
2050  *
2051  * For instance == 0x00:
2052  * - force == true: All instances will be removed regardless of their timeout
2053  *   setting.
2054  * - force == false: Only instances that have a timeout will be removed.
2055  */
2056 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2057                                 struct hci_request *req, u8 instance,
2058                                 bool force)
2059 {
2060         struct adv_info *adv_instance, *n, *next_instance = NULL;
2061         int err;
2062         u8 rem_inst;
2063
2064         /* Cancel any timeout concerning the removed instance(s). */
2065         if (!instance || hdev->cur_adv_instance == instance)
2066                 cancel_adv_timeout(hdev);
2067
2068         /* Get the next instance to advertise BEFORE we remove
2069          * the current one. This can be the same instance again
2070          * if there is only one instance.
2071          */
2072         if (instance && hdev->cur_adv_instance == instance)
2073                 next_instance = hci_get_next_instance(hdev, instance);
2074
2075         if (instance == 0x00) {
2076                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2077                                          list) {
2078                         if (!(force || adv_instance->timeout))
2079                                 continue;
2080
2081                         rem_inst = adv_instance->instance;
2082                         err = hci_remove_adv_instance(hdev, rem_inst);
2083                         if (!err)
2084                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2085                 }
2086         } else {
2087                 adv_instance = hci_find_adv_instance(hdev, instance);
2088
2089                 if (force || (adv_instance && adv_instance->timeout &&
2090                               !adv_instance->remaining_time)) {
2091                         /* Don't advertise a removed instance. */
2092                         if (next_instance &&
2093                             next_instance->instance == instance)
2094                                 next_instance = NULL;
2095
2096                         err = hci_remove_adv_instance(hdev, instance);
2097                         if (!err)
2098                                 mgmt_advertising_removed(sk, hdev, instance);
2099                 }
2100         }
2101
2102         if (!req || !hdev_is_powered(hdev) ||
2103             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2104                 return;
2105
2106         if (next_instance)
2107                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2108                                                 false);
2109 }
2110
2111 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2112 {
2113         struct hci_dev *hdev = req->hdev;
2114
2115         /* If we're advertising or initiating an LE connection we can't
2116          * go ahead and change the random address at this time. This is
2117          * because the eventual initiator address used for the
2118          * subsequently created connection will be undefined (some
2119          * controllers use the new address and others the one we had
2120          * when the operation started).
2121          *
2122          * In this kind of scenario skip the update and let the random
2123          * address be updated at the next cycle.
2124          */
2125         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2126             hci_lookup_le_connect(hdev)) {
2127                 BT_DBG("Deferring random address update");
2128                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2129                 return;
2130         }
2131
2132         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2133 }
2134
2135 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2136                               bool use_rpa, u8 *own_addr_type)
2137 {
2138         struct hci_dev *hdev = req->hdev;
2139         int err;
2140
2141         /* If privacy is enabled use a resolvable private address. If
2142          * current RPA has expired or there is something else than
2143          * the current RPA in use, then generate a new one.
2144          */
2145         if (use_rpa) {
2146                 int to;
2147
2148                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2149
2150                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2151                     !bacmp(&hdev->random_addr, &hdev->rpa))
2152                         return 0;
2153
2154                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2155                 if (err < 0) {
2156                         bt_dev_err(hdev, "failed to generate new RPA");
2157                         return err;
2158                 }
2159
2160                 set_random_addr(req, &hdev->rpa);
2161
2162                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2163                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2164
2165                 return 0;
2166         }
2167
2168         /* In case of required privacy without resolvable private address,
2169          * use an non-resolvable private address. This is useful for active
2170          * scanning and non-connectable advertising.
2171          */
2172         if (require_privacy) {
2173                 bdaddr_t nrpa;
2174
2175                 while (true) {
2176                         /* The non-resolvable private address is generated
2177                          * from random six bytes with the two most significant
2178                          * bits cleared.
2179                          */
2180                         get_random_bytes(&nrpa, 6);
2181                         nrpa.b[5] &= 0x3f;
2182
2183                         /* The non-resolvable private address shall not be
2184                          * equal to the public address.
2185                          */
2186                         if (bacmp(&hdev->bdaddr, &nrpa))
2187                                 break;
2188                 }
2189
2190                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2191                 set_random_addr(req, &nrpa);
2192                 return 0;
2193         }
2194
2195         /* If forcing static address is in use or there is no public
2196          * address use the static address as random address (but skip
2197          * the HCI command if the current random address is already the
2198          * static one.
2199          *
2200          * In case BR/EDR has been disabled on a dual-mode controller
2201          * and a static address has been configured, then use that
2202          * address instead of the public BR/EDR address.
2203          */
2204         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2205             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2206             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2207              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2208                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2209                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2210                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2211                                     &hdev->static_addr);
2212                 return 0;
2213         }
2214
2215         /* Neither privacy nor static address is being used so use a
2216          * public address.
2217          */
2218         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2219
2220         return 0;
2221 }
2222
2223 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2224 {
2225         struct bdaddr_list *b;
2226
2227         list_for_each_entry(b, &hdev->whitelist, list) {
2228                 struct hci_conn *conn;
2229
2230                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2231                 if (!conn)
2232                         return true;
2233
2234                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2235                         return true;
2236         }
2237
2238         return false;
2239 }
2240
2241 void __hci_req_update_scan(struct hci_request *req)
2242 {
2243         struct hci_dev *hdev = req->hdev;
2244         u8 scan;
2245
2246         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2247                 return;
2248
2249         if (!hdev_is_powered(hdev))
2250                 return;
2251
2252         if (mgmt_powering_down(hdev))
2253                 return;
2254
2255         if (hdev->scanning_paused)
2256                 return;
2257
2258         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2259             disconnected_whitelist_entries(hdev))
2260                 scan = SCAN_PAGE;
2261         else
2262                 scan = SCAN_DISABLED;
2263
2264         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2265                 scan |= SCAN_INQUIRY;
2266
2267         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2268             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2269                 return;
2270
2271         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2272 }
2273
2274 static int update_scan(struct hci_request *req, unsigned long opt)
2275 {
2276         hci_dev_lock(req->hdev);
2277         __hci_req_update_scan(req);
2278         hci_dev_unlock(req->hdev);
2279         return 0;
2280 }
2281
2282 static void scan_update_work(struct work_struct *work)
2283 {
2284         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2285
2286         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2287 }
2288
2289 static int connectable_update(struct hci_request *req, unsigned long opt)
2290 {
2291         struct hci_dev *hdev = req->hdev;
2292
2293         hci_dev_lock(hdev);
2294
2295         __hci_req_update_scan(req);
2296
2297         /* If BR/EDR is not enabled and we disable advertising as a
2298          * by-product of disabling connectable, we need to update the
2299          * advertising flags.
2300          */
2301         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2302                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2303
2304         /* Update the advertising parameters if necessary */
2305         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2306             !list_empty(&hdev->adv_instances)) {
2307                 if (ext_adv_capable(hdev))
2308                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2309                 else
2310                         __hci_req_enable_advertising(req);
2311         }
2312
2313         __hci_update_background_scan(req);
2314
2315         hci_dev_unlock(hdev);
2316
2317         return 0;
2318 }
2319
2320 static void connectable_update_work(struct work_struct *work)
2321 {
2322         struct hci_dev *hdev = container_of(work, struct hci_dev,
2323                                             connectable_update);
2324         u8 status;
2325
2326         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2327         mgmt_set_connectable_complete(hdev, status);
2328 }
2329
2330 static u8 get_service_classes(struct hci_dev *hdev)
2331 {
2332         struct bt_uuid *uuid;
2333         u8 val = 0;
2334
2335         list_for_each_entry(uuid, &hdev->uuids, list)
2336                 val |= uuid->svc_hint;
2337
2338         return val;
2339 }
2340
2341 void __hci_req_update_class(struct hci_request *req)
2342 {
2343         struct hci_dev *hdev = req->hdev;
2344         u8 cod[3];
2345
2346         BT_DBG("%s", hdev->name);
2347
2348         if (!hdev_is_powered(hdev))
2349                 return;
2350
2351         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2352                 return;
2353
2354         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2355                 return;
2356
2357         cod[0] = hdev->minor_class;
2358         cod[1] = hdev->major_class;
2359         cod[2] = get_service_classes(hdev);
2360
2361         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2362                 cod[1] |= 0x20;
2363
2364         if (memcmp(cod, hdev->dev_class, 3) == 0)
2365                 return;
2366
2367         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2368 }
2369
2370 static void write_iac(struct hci_request *req)
2371 {
2372         struct hci_dev *hdev = req->hdev;
2373         struct hci_cp_write_current_iac_lap cp;
2374
2375         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2376                 return;
2377
2378         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2379                 /* Limited discoverable mode */
2380                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2381                 cp.iac_lap[0] = 0x00;   /* LIAC */
2382                 cp.iac_lap[1] = 0x8b;
2383                 cp.iac_lap[2] = 0x9e;
2384                 cp.iac_lap[3] = 0x33;   /* GIAC */
2385                 cp.iac_lap[4] = 0x8b;
2386                 cp.iac_lap[5] = 0x9e;
2387         } else {
2388                 /* General discoverable mode */
2389                 cp.num_iac = 1;
2390                 cp.iac_lap[0] = 0x33;   /* GIAC */
2391                 cp.iac_lap[1] = 0x8b;
2392                 cp.iac_lap[2] = 0x9e;
2393         }
2394
2395         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2396                     (cp.num_iac * 3) + 1, &cp);
2397 }
2398
2399 static int discoverable_update(struct hci_request *req, unsigned long opt)
2400 {
2401         struct hci_dev *hdev = req->hdev;
2402
2403         hci_dev_lock(hdev);
2404
2405         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2406                 write_iac(req);
2407                 __hci_req_update_scan(req);
2408                 __hci_req_update_class(req);
2409         }
2410
2411         /* Advertising instances don't use the global discoverable setting, so
2412          * only update AD if advertising was enabled using Set Advertising.
2413          */
2414         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2415                 __hci_req_update_adv_data(req, 0x00);
2416
2417                 /* Discoverable mode affects the local advertising
2418                  * address in limited privacy mode.
2419                  */
2420                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2421                         if (ext_adv_capable(hdev))
2422                                 __hci_req_start_ext_adv(req, 0x00);
2423                         else
2424                                 __hci_req_enable_advertising(req);
2425                 }
2426         }
2427
2428         hci_dev_unlock(hdev);
2429
2430         return 0;
2431 }
2432
2433 static void discoverable_update_work(struct work_struct *work)
2434 {
2435         struct hci_dev *hdev = container_of(work, struct hci_dev,
2436                                             discoverable_update);
2437         u8 status;
2438
2439         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2440         mgmt_set_discoverable_complete(hdev, status);
2441 }
2442
2443 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2444                       u8 reason)
2445 {
2446         switch (conn->state) {
2447         case BT_CONNECTED:
2448         case BT_CONFIG:
2449                 if (conn->type == AMP_LINK) {
2450                         struct hci_cp_disconn_phy_link cp;
2451
2452                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2453                         cp.reason = reason;
2454                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2455                                     &cp);
2456                 } else {
2457                         struct hci_cp_disconnect dc;
2458
2459                         dc.handle = cpu_to_le16(conn->handle);
2460                         dc.reason = reason;
2461                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2462                 }
2463
2464                 conn->state = BT_DISCONN;
2465
2466                 break;
2467         case BT_CONNECT:
2468                 if (conn->type == LE_LINK) {
2469                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2470                                 break;
2471                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2472                                     0, NULL);
2473                 } else if (conn->type == ACL_LINK) {
2474                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2475                                 break;
2476                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2477                                     6, &conn->dst);
2478                 }
2479                 break;
2480         case BT_CONNECT2:
2481                 if (conn->type == ACL_LINK) {
2482                         struct hci_cp_reject_conn_req rej;
2483
2484                         bacpy(&rej.bdaddr, &conn->dst);
2485                         rej.reason = reason;
2486
2487                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2488                                     sizeof(rej), &rej);
2489                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2490                         struct hci_cp_reject_sync_conn_req rej;
2491
2492                         bacpy(&rej.bdaddr, &conn->dst);
2493
2494                         /* SCO rejection has its own limited set of
2495                          * allowed error values (0x0D-0x0F) which isn't
2496                          * compatible with most values passed to this
2497                          * function. To be safe hard-code one of the
2498                          * values that's suitable for SCO.
2499                          */
2500                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2501
2502                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2503                                     sizeof(rej), &rej);
2504                 }
2505                 break;
2506         default:
2507                 conn->state = BT_CLOSED;
2508                 break;
2509         }
2510 }
2511
2512 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2513 {
2514         if (status)
2515                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2516 }
2517
2518 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2519 {
2520         struct hci_request req;
2521         int err;
2522
2523         hci_req_init(&req, conn->hdev);
2524
2525         __hci_abort_conn(&req, conn, reason);
2526
2527         err = hci_req_run(&req, abort_conn_complete);
2528         if (err && err != -ENODATA) {
2529                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2530                 return err;
2531         }
2532
2533         return 0;
2534 }
2535
2536 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2537 {
2538         hci_dev_lock(req->hdev);
2539         __hci_update_background_scan(req);
2540         hci_dev_unlock(req->hdev);
2541         return 0;
2542 }
2543
2544 static void bg_scan_update(struct work_struct *work)
2545 {
2546         struct hci_dev *hdev = container_of(work, struct hci_dev,
2547                                             bg_scan_update);
2548         struct hci_conn *conn;
2549         u8 status;
2550         int err;
2551
2552         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2553         if (!err)
2554                 return;
2555
2556         hci_dev_lock(hdev);
2557
2558         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2559         if (conn)
2560                 hci_le_conn_failed(conn, status);
2561
2562         hci_dev_unlock(hdev);
2563 }
2564
2565 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2566 {
2567         hci_req_add_le_scan_disable(req);
2568         return 0;
2569 }
2570
2571 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2572 {
2573         u8 length = opt;
2574         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2575         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2576         struct hci_cp_inquiry cp;
2577
2578         BT_DBG("%s", req->hdev->name);
2579
2580         hci_dev_lock(req->hdev);
2581         hci_inquiry_cache_flush(req->hdev);
2582         hci_dev_unlock(req->hdev);
2583
2584         memset(&cp, 0, sizeof(cp));
2585
2586         if (req->hdev->discovery.limited)
2587                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2588         else
2589                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2590
2591         cp.length = length;
2592
2593         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2594
2595         return 0;
2596 }
2597
2598 static void le_scan_disable_work(struct work_struct *work)
2599 {
2600         struct hci_dev *hdev = container_of(work, struct hci_dev,
2601                                             le_scan_disable.work);
2602         u8 status;
2603
2604         BT_DBG("%s", hdev->name);
2605
2606         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2607                 return;
2608
2609         cancel_delayed_work(&hdev->le_scan_restart);
2610
2611         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2612         if (status) {
2613                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2614                            status);
2615                 return;
2616         }
2617
2618         hdev->discovery.scan_start = 0;
2619
2620         /* If we were running LE only scan, change discovery state. If
2621          * we were running both LE and BR/EDR inquiry simultaneously,
2622          * and BR/EDR inquiry is already finished, stop discovery,
2623          * otherwise BR/EDR inquiry will stop discovery when finished.
2624          * If we will resolve remote device name, do not change
2625          * discovery state.
2626          */
2627
2628         if (hdev->discovery.type == DISCOV_TYPE_LE)
2629                 goto discov_stopped;
2630
2631         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2632                 return;
2633
2634         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2635                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2636                     hdev->discovery.state != DISCOVERY_RESOLVING)
2637                         goto discov_stopped;
2638
2639                 return;
2640         }
2641
2642         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2643                      HCI_CMD_TIMEOUT, &status);
2644         if (status) {
2645                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2646                 goto discov_stopped;
2647         }
2648
2649         return;
2650
2651 discov_stopped:
2652         hci_dev_lock(hdev);
2653         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2654         hci_dev_unlock(hdev);
2655 }
2656
2657 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2658 {
2659         struct hci_dev *hdev = req->hdev;
2660
2661         /* If controller is not scanning we are done. */
2662         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2663                 return 0;
2664
2665         if (hdev->scanning_paused) {
2666                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2667                 return 0;
2668         }
2669
2670         hci_req_add_le_scan_disable(req);
2671
2672         if (use_ext_scan(hdev)) {
2673                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2674
2675                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2676                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2677                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2678
2679                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2680                             sizeof(ext_enable_cp), &ext_enable_cp);
2681         } else {
2682                 struct hci_cp_le_set_scan_enable cp;
2683
2684                 memset(&cp, 0, sizeof(cp));
2685                 cp.enable = LE_SCAN_ENABLE;
2686                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2687                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2688         }
2689
2690         return 0;
2691 }
2692
2693 static void le_scan_restart_work(struct work_struct *work)
2694 {
2695         struct hci_dev *hdev = container_of(work, struct hci_dev,
2696                                             le_scan_restart.work);
2697         unsigned long timeout, duration, scan_start, now;
2698         u8 status;
2699
2700         BT_DBG("%s", hdev->name);
2701
2702         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2703         if (status) {
2704                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2705                            status);
2706                 return;
2707         }
2708
2709         hci_dev_lock(hdev);
2710
2711         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2712             !hdev->discovery.scan_start)
2713                 goto unlock;
2714
2715         /* When the scan was started, hdev->le_scan_disable has been queued
2716          * after duration from scan_start. During scan restart this job
2717          * has been canceled, and we need to queue it again after proper
2718          * timeout, to make sure that scan does not run indefinitely.
2719          */
2720         duration = hdev->discovery.scan_duration;
2721         scan_start = hdev->discovery.scan_start;
2722         now = jiffies;
2723         if (now - scan_start <= duration) {
2724                 int elapsed;
2725
2726                 if (now >= scan_start)
2727                         elapsed = now - scan_start;
2728                 else
2729                         elapsed = ULONG_MAX - scan_start + now;
2730
2731                 timeout = duration - elapsed;
2732         } else {
2733                 timeout = 0;
2734         }
2735
2736         queue_delayed_work(hdev->req_workqueue,
2737                            &hdev->le_scan_disable, timeout);
2738
2739 unlock:
2740         hci_dev_unlock(hdev);
2741 }
2742
2743 static int active_scan(struct hci_request *req, unsigned long opt)
2744 {
2745         uint16_t interval = opt;
2746         struct hci_dev *hdev = req->hdev;
2747         u8 own_addr_type;
2748         /* White list is not used for discovery */
2749         u8 filter_policy = 0x00;
2750         int err;
2751
2752         BT_DBG("%s", hdev->name);
2753
2754         /* If controller is scanning, it means the background scanning is
2755          * running. Thus, we should temporarily stop it in order to set the
2756          * discovery scanning parameters.
2757          */
2758         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2759                 hci_req_add_le_scan_disable(req);
2760
2761         /* All active scans will be done with either a resolvable private
2762          * address (when privacy feature has been enabled) or non-resolvable
2763          * private address.
2764          */
2765         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2766                                         &own_addr_type);
2767         if (err < 0)
2768                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2769
2770         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2771                            hdev->le_scan_window_discovery, own_addr_type,
2772                            filter_policy);
2773         return 0;
2774 }
2775
2776 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2777 {
2778         int err;
2779
2780         BT_DBG("%s", req->hdev->name);
2781
2782         err = active_scan(req, opt);
2783         if (err)
2784                 return err;
2785
2786         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2787 }
2788
2789 static void start_discovery(struct hci_dev *hdev, u8 *status)
2790 {
2791         unsigned long timeout;
2792
2793         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2794
2795         switch (hdev->discovery.type) {
2796         case DISCOV_TYPE_BREDR:
2797                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2798                         hci_req_sync(hdev, bredr_inquiry,
2799                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2800                                      status);
2801                 return;
2802         case DISCOV_TYPE_INTERLEAVED:
2803                 /* When running simultaneous discovery, the LE scanning time
2804                  * should occupy the whole discovery time sine BR/EDR inquiry
2805                  * and LE scanning are scheduled by the controller.
2806                  *
2807                  * For interleaving discovery in comparison, BR/EDR inquiry
2808                  * and LE scanning are done sequentially with separate
2809                  * timeouts.
2810                  */
2811                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2812                              &hdev->quirks)) {
2813                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2814                         /* During simultaneous discovery, we double LE scan
2815                          * interval. We must leave some time for the controller
2816                          * to do BR/EDR inquiry.
2817                          */
2818                         hci_req_sync(hdev, interleaved_discov,
2819                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2820                                      status);
2821                         break;
2822                 }
2823
2824                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2825                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2826                              HCI_CMD_TIMEOUT, status);
2827                 break;
2828         case DISCOV_TYPE_LE:
2829                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2830                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2831                              HCI_CMD_TIMEOUT, status);
2832                 break;
2833         default:
2834                 *status = HCI_ERROR_UNSPECIFIED;
2835                 return;
2836         }
2837
2838         if (*status)
2839                 return;
2840
2841         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2842
2843         /* When service discovery is used and the controller has a
2844          * strict duplicate filter, it is important to remember the
2845          * start and duration of the scan. This is required for
2846          * restarting scanning during the discovery phase.
2847          */
2848         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2849                      hdev->discovery.result_filtering) {
2850                 hdev->discovery.scan_start = jiffies;
2851                 hdev->discovery.scan_duration = timeout;
2852         }
2853
2854         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2855                            timeout);
2856 }
2857
2858 bool hci_req_stop_discovery(struct hci_request *req)
2859 {
2860         struct hci_dev *hdev = req->hdev;
2861         struct discovery_state *d = &hdev->discovery;
2862         struct hci_cp_remote_name_req_cancel cp;
2863         struct inquiry_entry *e;
2864         bool ret = false;
2865
2866         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2867
2868         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2869                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2870                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2871
2872                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2873                         cancel_delayed_work(&hdev->le_scan_disable);
2874                         hci_req_add_le_scan_disable(req);
2875                 }
2876
2877                 ret = true;
2878         } else {
2879                 /* Passive scanning */
2880                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2881                         hci_req_add_le_scan_disable(req);
2882                         ret = true;
2883                 }
2884         }
2885
2886         /* No further actions needed for LE-only discovery */
2887         if (d->type == DISCOV_TYPE_LE)
2888                 return ret;
2889
2890         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2891                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2892                                                      NAME_PENDING);
2893                 if (!e)
2894                         return ret;
2895
2896                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2897                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2898                             &cp);
2899                 ret = true;
2900         }
2901
2902         return ret;
2903 }
2904
2905 static int stop_discovery(struct hci_request *req, unsigned long opt)
2906 {
2907         hci_dev_lock(req->hdev);
2908         hci_req_stop_discovery(req);
2909         hci_dev_unlock(req->hdev);
2910
2911         return 0;
2912 }
2913
2914 static void discov_update(struct work_struct *work)
2915 {
2916         struct hci_dev *hdev = container_of(work, struct hci_dev,
2917                                             discov_update);
2918         u8 status = 0;
2919
2920         switch (hdev->discovery.state) {
2921         case DISCOVERY_STARTING:
2922                 start_discovery(hdev, &status);
2923                 mgmt_start_discovery_complete(hdev, status);
2924                 if (status)
2925                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2926                 else
2927                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2928                 break;
2929         case DISCOVERY_STOPPING:
2930                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2931                 mgmt_stop_discovery_complete(hdev, status);
2932                 if (!status)
2933                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2934                 break;
2935         case DISCOVERY_STOPPED:
2936         default:
2937                 return;
2938         }
2939 }
2940
2941 static void discov_off(struct work_struct *work)
2942 {
2943         struct hci_dev *hdev = container_of(work, struct hci_dev,
2944                                             discov_off.work);
2945
2946         BT_DBG("%s", hdev->name);
2947
2948         hci_dev_lock(hdev);
2949
2950         /* When discoverable timeout triggers, then just make sure
2951          * the limited discoverable flag is cleared. Even in the case
2952          * of a timeout triggered from general discoverable, it is
2953          * safe to unconditionally clear the flag.
2954          */
2955         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2956         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2957         hdev->discov_timeout = 0;
2958
2959         hci_dev_unlock(hdev);
2960
2961         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2962         mgmt_new_settings(hdev);
2963 }
2964
2965 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2966 {
2967         struct hci_dev *hdev = req->hdev;
2968         u8 link_sec;
2969
2970         hci_dev_lock(hdev);
2971
2972         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2973             !lmp_host_ssp_capable(hdev)) {
2974                 u8 mode = 0x01;
2975
2976                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2977
2978                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2979                         u8 support = 0x01;
2980
2981                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2982                                     sizeof(support), &support);
2983                 }
2984         }
2985
2986         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2987             lmp_bredr_capable(hdev)) {
2988                 struct hci_cp_write_le_host_supported cp;
2989
2990                 cp.le = 0x01;
2991                 cp.simul = 0x00;
2992
2993                 /* Check first if we already have the right
2994                  * host state (host features set)
2995                  */
2996                 if (cp.le != lmp_host_le_capable(hdev) ||
2997                     cp.simul != lmp_host_le_br_capable(hdev))
2998                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2999                                     sizeof(cp), &cp);
3000         }
3001
3002         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3003                 /* Make sure the controller has a good default for
3004                  * advertising data. This also applies to the case
3005                  * where BR/EDR was toggled during the AUTO_OFF phase.
3006                  */
3007                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3008                     list_empty(&hdev->adv_instances)) {
3009                         int err;
3010
3011                         if (ext_adv_capable(hdev)) {
3012                                 err = __hci_req_setup_ext_adv_instance(req,
3013                                                                        0x00);
3014                                 if (!err)
3015                                         __hci_req_update_scan_rsp_data(req,
3016                                                                        0x00);
3017                         } else {
3018                                 err = 0;
3019                                 __hci_req_update_adv_data(req, 0x00);
3020                                 __hci_req_update_scan_rsp_data(req, 0x00);
3021                         }
3022
3023                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3024                                 if (!ext_adv_capable(hdev))
3025                                         __hci_req_enable_advertising(req);
3026                                 else if (!err)
3027                                         __hci_req_enable_ext_advertising(req,
3028                                                                          0x00);
3029                         }
3030                 } else if (!list_empty(&hdev->adv_instances)) {
3031                         struct adv_info *adv_instance;
3032
3033                         adv_instance = list_first_entry(&hdev->adv_instances,
3034                                                         struct adv_info, list);
3035                         __hci_req_schedule_adv_instance(req,
3036                                                         adv_instance->instance,
3037                                                         true);
3038                 }
3039         }
3040
3041         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3042         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3043                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3044                             sizeof(link_sec), &link_sec);
3045
3046         if (lmp_bredr_capable(hdev)) {
3047                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3048                         __hci_req_write_fast_connectable(req, true);
3049                 else
3050                         __hci_req_write_fast_connectable(req, false);
3051                 __hci_req_update_scan(req);
3052                 __hci_req_update_class(req);
3053                 __hci_req_update_name(req);
3054                 __hci_req_update_eir(req);
3055         }
3056
3057         hci_dev_unlock(hdev);
3058         return 0;
3059 }
3060
3061 int __hci_req_hci_power_on(struct hci_dev *hdev)
3062 {
3063         /* Register the available SMP channels (BR/EDR and LE) only when
3064          * successfully powering on the controller. This late
3065          * registration is required so that LE SMP can clearly decide if
3066          * the public address or static address is used.
3067          */
3068         smp_register(hdev);
3069
3070         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3071                               NULL);
3072 }
3073
3074 void hci_request_setup(struct hci_dev *hdev)
3075 {
3076         INIT_WORK(&hdev->discov_update, discov_update);
3077         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3078         INIT_WORK(&hdev->scan_update, scan_update_work);
3079         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3080         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3081         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3082         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3083         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3084         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3085 }
3086
3087 void hci_request_cancel_all(struct hci_dev *hdev)
3088 {
3089         hci_req_sync_cancel(hdev, ENODEV);
3090
3091         cancel_work_sync(&hdev->discov_update);
3092         cancel_work_sync(&hdev->bg_scan_update);
3093         cancel_work_sync(&hdev->scan_update);
3094         cancel_work_sync(&hdev->connectable_update);
3095         cancel_work_sync(&hdev->discoverable_update);
3096         cancel_delayed_work_sync(&hdev->discov_off);
3097         cancel_delayed_work_sync(&hdev->le_scan_disable);
3098         cancel_delayed_work_sync(&hdev->le_scan_restart);
3099
3100         if (hdev->adv_instance_timeout) {
3101                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3102                 hdev->adv_instance_timeout = 0;
3103         }
3104 }