Merge tag 'trace-v5.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33
34 #define HCI_REQ_DONE      0
35 #define HCI_REQ_PEND      1
36 #define HCI_REQ_CANCELED  2
37
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40         skb_queue_head_init(&req->cmd_q);
41         req->hdev = hdev;
42         req->err = 0;
43 }
44
45 void hci_req_purge(struct hci_request *req)
46 {
47         skb_queue_purge(&req->cmd_q);
48 }
49
50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52         return hdev->req_status == HCI_REQ_PEND;
53 }
54
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56                    hci_req_complete_skb_t complete_skb)
57 {
58         struct hci_dev *hdev = req->hdev;
59         struct sk_buff *skb;
60         unsigned long flags;
61
62         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63
64         /* If an error occurred during request building, remove all HCI
65          * commands queued on the HCI request queue.
66          */
67         if (req->err) {
68                 skb_queue_purge(&req->cmd_q);
69                 return req->err;
70         }
71
72         /* Do not allow empty requests */
73         if (skb_queue_empty(&req->cmd_q))
74                 return -ENODATA;
75
76         skb = skb_peek_tail(&req->cmd_q);
77         if (complete) {
78                 bt_cb(skb)->hci.req_complete = complete;
79         } else if (complete_skb) {
80                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82         }
83
84         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88         queue_work(hdev->workqueue, &hdev->cmd_work);
89
90         return 0;
91 }
92
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95         return req_run(req, complete, NULL);
96 }
97
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100         return req_run(req, NULL, complete);
101 }
102
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104                                   struct sk_buff *skb)
105 {
106         bt_dev_dbg(hdev, "result 0x%2.2x", result);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 if (skb)
112                         hdev->req_skb = skb_get(skb);
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119         bt_dev_dbg(hdev, "err 0x%2.2x", err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129                                   const void *param, u8 event, u32 timeout)
130 {
131         struct hci_request req;
132         struct sk_buff *skb;
133         int err = 0;
134
135         bt_dev_dbg(hdev, "");
136
137         hci_req_init(&req, hdev);
138
139         hci_req_add_ev(&req, opcode, plen, param, event);
140
141         hdev->req_status = HCI_REQ_PEND;
142
143         err = hci_req_run_skb(&req, hci_req_sync_complete);
144         if (err < 0)
145                 return ERR_PTR(err);
146
147         err = wait_event_interruptible_timeout(hdev->req_wait_q,
148                         hdev->req_status != HCI_REQ_PEND, timeout);
149
150         if (err == -ERESTARTSYS)
151                 return ERR_PTR(-EINTR);
152
153         switch (hdev->req_status) {
154         case HCI_REQ_DONE:
155                 err = -bt_to_errno(hdev->req_result);
156                 break;
157
158         case HCI_REQ_CANCELED:
159                 err = -hdev->req_result;
160                 break;
161
162         default:
163                 err = -ETIMEDOUT;
164                 break;
165         }
166
167         hdev->req_status = hdev->req_result = 0;
168         skb = hdev->req_skb;
169         hdev->req_skb = NULL;
170
171         bt_dev_dbg(hdev, "end: err %d", err);
172
173         if (err < 0) {
174                 kfree_skb(skb);
175                 return ERR_PTR(err);
176         }
177
178         if (!skb)
179                 return ERR_PTR(-ENODATA);
180
181         return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186                                const void *param, u32 timeout)
187 {
188         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194                                                      unsigned long opt),
195                    unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197         struct hci_request req;
198         int err = 0;
199
200         bt_dev_dbg(hdev, "start");
201
202         hci_req_init(&req, hdev);
203
204         hdev->req_status = HCI_REQ_PEND;
205
206         err = func(&req, opt);
207         if (err) {
208                 if (hci_status)
209                         *hci_status = HCI_ERROR_UNSPECIFIED;
210                 return err;
211         }
212
213         err = hci_req_run_skb(&req, hci_req_sync_complete);
214         if (err < 0) {
215                 hdev->req_status = 0;
216
217                 /* ENODATA means the HCI request command queue is empty.
218                  * This can happen when a request with conditionals doesn't
219                  * trigger any commands to be sent. This is normal behavior
220                  * and should not trigger an error return.
221                  */
222                 if (err == -ENODATA) {
223                         if (hci_status)
224                                 *hci_status = 0;
225                         return 0;
226                 }
227
228                 if (hci_status)
229                         *hci_status = HCI_ERROR_UNSPECIFIED;
230
231                 return err;
232         }
233
234         err = wait_event_interruptible_timeout(hdev->req_wait_q,
235                         hdev->req_status != HCI_REQ_PEND, timeout);
236
237         if (err == -ERESTARTSYS)
238                 return -EINTR;
239
240         switch (hdev->req_status) {
241         case HCI_REQ_DONE:
242                 err = -bt_to_errno(hdev->req_result);
243                 if (hci_status)
244                         *hci_status = hdev->req_result;
245                 break;
246
247         case HCI_REQ_CANCELED:
248                 err = -hdev->req_result;
249                 if (hci_status)
250                         *hci_status = HCI_ERROR_UNSPECIFIED;
251                 break;
252
253         default:
254                 err = -ETIMEDOUT;
255                 if (hci_status)
256                         *hci_status = HCI_ERROR_UNSPECIFIED;
257                 break;
258         }
259
260         kfree_skb(hdev->req_skb);
261         hdev->req_skb = NULL;
262         hdev->req_status = hdev->req_result = 0;
263
264         bt_dev_dbg(hdev, "end: err %d", err);
265
266         return err;
267 }
268
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270                                                   unsigned long opt),
271                  unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273         int ret;
274
275         /* Serialize all requests */
276         hci_req_sync_lock(hdev);
277         /* check the state after obtaing the lock to protect the HCI_UP
278          * against any races from hci_dev_do_close when the controller
279          * gets removed.
280          */
281         if (test_bit(HCI_UP, &hdev->flags))
282                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283         else
284                 ret = -ENETDOWN;
285         hci_req_sync_unlock(hdev);
286
287         return ret;
288 }
289
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
291                                 const void *param)
292 {
293         int len = HCI_COMMAND_HDR_SIZE + plen;
294         struct hci_command_hdr *hdr;
295         struct sk_buff *skb;
296
297         skb = bt_skb_alloc(len, GFP_ATOMIC);
298         if (!skb)
299                 return NULL;
300
301         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302         hdr->opcode = cpu_to_le16(opcode);
303         hdr->plen   = plen;
304
305         if (plen)
306                 skb_put_data(skb, param, plen);
307
308         bt_dev_dbg(hdev, "skb len %d", skb->len);
309
310         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311         hci_skb_opcode(skb) = opcode;
312
313         return skb;
314 }
315
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318                     const void *param, u8 event)
319 {
320         struct hci_dev *hdev = req->hdev;
321         struct sk_buff *skb;
322
323         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
324
325         /* If an error occurred during request building, there is no point in
326          * queueing the HCI command. We can simply return.
327          */
328         if (req->err)
329                 return;
330
331         skb = hci_prepare_cmd(hdev, opcode, plen, param);
332         if (!skb) {
333                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334                            opcode);
335                 req->err = -ENOMEM;
336                 return;
337         }
338
339         if (skb_queue_empty(&req->cmd_q))
340                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341
342         bt_cb(skb)->hci.req_event = event;
343
344         skb_queue_tail(&req->cmd_q, skb);
345 }
346
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
348                  const void *param)
349 {
350         hci_req_add_ev(req, opcode, plen, param, 0);
351 }
352
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 {
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_write_page_scan_activity acp;
357         u8 type;
358
359         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
360                 return;
361
362         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
363                 return;
364
365         if (enable) {
366                 type = PAGE_SCAN_TYPE_INTERLACED;
367
368                 /* 160 msec page scan interval */
369                 acp.interval = cpu_to_le16(0x0100);
370         } else {
371                 type = hdev->def_page_scan_type;
372                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
373         }
374
375         acp.window = cpu_to_le16(hdev->def_page_scan_window);
376
377         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378             __cpu_to_le16(hdev->page_scan_window) != acp.window)
379                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
380                             sizeof(acp), &acp);
381
382         if (hdev->page_scan_type != type)
383                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
384 }
385
386 static void start_interleave_scan(struct hci_dev *hdev)
387 {
388         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389         queue_delayed_work(hdev->req_workqueue,
390                            &hdev->interleave_scan, 0);
391 }
392
393 static bool is_interleave_scanning(struct hci_dev *hdev)
394 {
395         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
396 }
397
398 static void cancel_interleave_scan(struct hci_dev *hdev)
399 {
400         bt_dev_dbg(hdev, "cancelling interleave scan");
401
402         cancel_delayed_work_sync(&hdev->interleave_scan);
403
404         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
405 }
406
407 /* Return true if interleave_scan wasn't started until exiting this function,
408  * otherwise, return false
409  */
410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
411 {
412         /* Do interleaved scan only if all of the following are true:
413          * - There is at least one ADV monitor
414          * - At least one pending LE connection or one device to be scanned for
415          * - Monitor offloading is not supported
416          * If so, we should alternate between allowlist scan and one without
417          * any filters to save power.
418          */
419         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420                                 !(list_empty(&hdev->pend_le_conns) &&
421                                   list_empty(&hdev->pend_le_reports)) &&
422                                 hci_get_adv_monitor_offload_ext(hdev) ==
423                                     HCI_ADV_MONITOR_EXT_NONE;
424         bool is_interleaving = is_interleave_scanning(hdev);
425
426         if (use_interleaving && !is_interleaving) {
427                 start_interleave_scan(hdev);
428                 bt_dev_dbg(hdev, "starting interleave scan");
429                 return true;
430         }
431
432         if (!use_interleaving && is_interleaving)
433                 cancel_interleave_scan(hdev);
434
435         return false;
436 }
437
438 /* This function controls the background scanning based on hdev->pend_le_conns
439  * list. If there are pending LE connection we start the background scanning,
440  * otherwise we stop it.
441  *
442  * This function requires the caller holds hdev->lock.
443  */
444 static void __hci_update_background_scan(struct hci_request *req)
445 {
446         struct hci_dev *hdev = req->hdev;
447
448         if (!test_bit(HCI_UP, &hdev->flags) ||
449             test_bit(HCI_INIT, &hdev->flags) ||
450             hci_dev_test_flag(hdev, HCI_SETUP) ||
451             hci_dev_test_flag(hdev, HCI_CONFIG) ||
452             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453             hci_dev_test_flag(hdev, HCI_UNREGISTER))
454                 return;
455
456         /* No point in doing scanning if LE support hasn't been enabled */
457         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
458                 return;
459
460         /* If discovery is active don't interfere with it */
461         if (hdev->discovery.state != DISCOVERY_STOPPED)
462                 return;
463
464         /* Reset RSSI and UUID filters when starting background scanning
465          * since these filters are meant for service discovery only.
466          *
467          * The Start Discovery and Start Service Discovery operations
468          * ensure to set proper values for RSSI threshold and UUID
469          * filter list. So it is safe to just reset them here.
470          */
471         hci_discovery_filter_clear(hdev);
472
473         bt_dev_dbg(hdev, "ADV monitoring is %s",
474                    hci_is_adv_monitoring(hdev) ? "on" : "off");
475
476         if (list_empty(&hdev->pend_le_conns) &&
477             list_empty(&hdev->pend_le_reports) &&
478             !hci_is_adv_monitoring(hdev)) {
479                 /* If there is no pending LE connections or devices
480                  * to be scanned for or no ADV monitors, we should stop the
481                  * background scanning.
482                  */
483
484                 /* If controller is not scanning we are done. */
485                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
486                         return;
487
488                 hci_req_add_le_scan_disable(req, false);
489
490                 bt_dev_dbg(hdev, "stopping background scanning");
491         } else {
492                 /* If there is at least one pending LE connection, we should
493                  * keep the background scan running.
494                  */
495
496                 /* If controller is connecting, we should not start scanning
497                  * since some controllers are not able to scan and connect at
498                  * the same time.
499                  */
500                 if (hci_lookup_le_connect(hdev))
501                         return;
502
503                 /* If controller is currently scanning, we stop it to ensure we
504                  * don't miss any advertising (due to duplicates filter).
505                  */
506                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507                         hci_req_add_le_scan_disable(req, false);
508
509                 hci_req_add_le_passive_scan(req);
510                 bt_dev_dbg(hdev, "starting background scanning");
511         }
512 }
513
514 void __hci_req_update_name(struct hci_request *req)
515 {
516         struct hci_dev *hdev = req->hdev;
517         struct hci_cp_write_local_name cp;
518
519         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
520
521         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
522 }
523
524 #define PNP_INFO_SVCLASS_ID             0x1200
525
526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528         u8 *ptr = data, *uuids_start = NULL;
529         struct bt_uuid *uuid;
530
531         if (len < 4)
532                 return ptr;
533
534         list_for_each_entry(uuid, &hdev->uuids, list) {
535                 u16 uuid16;
536
537                 if (uuid->size != 16)
538                         continue;
539
540                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
541                 if (uuid16 < 0x1100)
542                         continue;
543
544                 if (uuid16 == PNP_INFO_SVCLASS_ID)
545                         continue;
546
547                 if (!uuids_start) {
548                         uuids_start = ptr;
549                         uuids_start[0] = 1;
550                         uuids_start[1] = EIR_UUID16_ALL;
551                         ptr += 2;
552                 }
553
554                 /* Stop if not enough space to put next UUID */
555                 if ((ptr - data) + sizeof(u16) > len) {
556                         uuids_start[1] = EIR_UUID16_SOME;
557                         break;
558                 }
559
560                 *ptr++ = (uuid16 & 0x00ff);
561                 *ptr++ = (uuid16 & 0xff00) >> 8;
562                 uuids_start[0] += sizeof(uuid16);
563         }
564
565         return ptr;
566 }
567
568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
569 {
570         u8 *ptr = data, *uuids_start = NULL;
571         struct bt_uuid *uuid;
572
573         if (len < 6)
574                 return ptr;
575
576         list_for_each_entry(uuid, &hdev->uuids, list) {
577                 if (uuid->size != 32)
578                         continue;
579
580                 if (!uuids_start) {
581                         uuids_start = ptr;
582                         uuids_start[0] = 1;
583                         uuids_start[1] = EIR_UUID32_ALL;
584                         ptr += 2;
585                 }
586
587                 /* Stop if not enough space to put next UUID */
588                 if ((ptr - data) + sizeof(u32) > len) {
589                         uuids_start[1] = EIR_UUID32_SOME;
590                         break;
591                 }
592
593                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
594                 ptr += sizeof(u32);
595                 uuids_start[0] += sizeof(u32);
596         }
597
598         return ptr;
599 }
600
601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
602 {
603         u8 *ptr = data, *uuids_start = NULL;
604         struct bt_uuid *uuid;
605
606         if (len < 18)
607                 return ptr;
608
609         list_for_each_entry(uuid, &hdev->uuids, list) {
610                 if (uuid->size != 128)
611                         continue;
612
613                 if (!uuids_start) {
614                         uuids_start = ptr;
615                         uuids_start[0] = 1;
616                         uuids_start[1] = EIR_UUID128_ALL;
617                         ptr += 2;
618                 }
619
620                 /* Stop if not enough space to put next UUID */
621                 if ((ptr - data) + 16 > len) {
622                         uuids_start[1] = EIR_UUID128_SOME;
623                         break;
624                 }
625
626                 memcpy(ptr, uuid->uuid, 16);
627                 ptr += 16;
628                 uuids_start[0] += 16;
629         }
630
631         return ptr;
632 }
633
634 static void create_eir(struct hci_dev *hdev, u8 *data)
635 {
636         u8 *ptr = data;
637         size_t name_len;
638
639         name_len = strlen(hdev->dev_name);
640
641         if (name_len > 0) {
642                 /* EIR Data type */
643                 if (name_len > 48) {
644                         name_len = 48;
645                         ptr[1] = EIR_NAME_SHORT;
646                 } else
647                         ptr[1] = EIR_NAME_COMPLETE;
648
649                 /* EIR Data length */
650                 ptr[0] = name_len + 1;
651
652                 memcpy(ptr + 2, hdev->dev_name, name_len);
653
654                 ptr += (name_len + 2);
655         }
656
657         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
658                 ptr[0] = 2;
659                 ptr[1] = EIR_TX_POWER;
660                 ptr[2] = (u8) hdev->inq_tx_power;
661
662                 ptr += 3;
663         }
664
665         if (hdev->devid_source > 0) {
666                 ptr[0] = 9;
667                 ptr[1] = EIR_DEVICE_ID;
668
669                 put_unaligned_le16(hdev->devid_source, ptr + 2);
670                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671                 put_unaligned_le16(hdev->devid_product, ptr + 6);
672                 put_unaligned_le16(hdev->devid_version, ptr + 8);
673
674                 ptr += 10;
675         }
676
677         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
680 }
681
682 void __hci_req_update_eir(struct hci_request *req)
683 {
684         struct hci_dev *hdev = req->hdev;
685         struct hci_cp_write_eir cp;
686
687         if (!hdev_is_powered(hdev))
688                 return;
689
690         if (!lmp_ext_inq_capable(hdev))
691                 return;
692
693         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
694                 return;
695
696         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
697                 return;
698
699         memset(&cp, 0, sizeof(cp));
700
701         create_eir(hdev, cp.data);
702
703         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
704                 return;
705
706         memcpy(hdev->eir, cp.data, sizeof(cp.data));
707
708         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
709 }
710
711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
712 {
713         struct hci_dev *hdev = req->hdev;
714
715         if (hdev->scanning_paused) {
716                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
717                 return;
718         }
719
720         if (hdev->suspended)
721                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
722
723         if (use_ext_scan(hdev)) {
724                 struct hci_cp_le_set_ext_scan_enable cp;
725
726                 memset(&cp, 0, sizeof(cp));
727                 cp.enable = LE_SCAN_DISABLE;
728                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
729                             &cp);
730         } else {
731                 struct hci_cp_le_set_scan_enable cp;
732
733                 memset(&cp, 0, sizeof(cp));
734                 cp.enable = LE_SCAN_DISABLE;
735                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
736         }
737
738         /* Disable address resolution */
739         if (use_ll_privacy(hdev) &&
740             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
742                 __u8 enable = 0x00;
743
744                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
745         }
746 }
747
748 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
749                                 u8 bdaddr_type)
750 {
751         struct hci_cp_le_del_from_white_list cp;
752
753         cp.bdaddr_type = bdaddr_type;
754         bacpy(&cp.bdaddr, bdaddr);
755
756         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
757                    cp.bdaddr_type);
758         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
759
760         if (use_ll_privacy(req->hdev) &&
761             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
762                 struct smp_irk *irk;
763
764                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
765                 if (irk) {
766                         struct hci_cp_le_del_from_resolv_list cp;
767
768                         cp.bdaddr_type = bdaddr_type;
769                         bacpy(&cp.bdaddr, bdaddr);
770
771                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
772                                     sizeof(cp), &cp);
773                 }
774         }
775 }
776
777 /* Adds connection to white list if needed. On error, returns -1. */
778 static int add_to_white_list(struct hci_request *req,
779                              struct hci_conn_params *params, u8 *num_entries,
780                              bool allow_rpa)
781 {
782         struct hci_cp_le_add_to_white_list cp;
783         struct hci_dev *hdev = req->hdev;
784
785         /* Already in white list */
786         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
787                                    params->addr_type))
788                 return 0;
789
790         /* Select filter policy to accept all advertising */
791         if (*num_entries >= hdev->le_white_list_size)
792                 return -1;
793
794         /* White list can not be used with RPAs */
795         if (!allow_rpa &&
796             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
798                 return -1;
799         }
800
801         /* During suspend, only wakeable devices can be in whitelist */
802         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803                                                    params->current_flags))
804                 return 0;
805
806         *num_entries += 1;
807         cp.bdaddr_type = params->addr_type;
808         bacpy(&cp.bdaddr, &params->addr);
809
810         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
811                    cp.bdaddr_type);
812         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
813
814         if (use_ll_privacy(hdev) &&
815             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
816                 struct smp_irk *irk;
817
818                 irk = hci_find_irk_by_addr(hdev, &params->addr,
819                                            params->addr_type);
820                 if (irk) {
821                         struct hci_cp_le_add_to_resolv_list cp;
822
823                         cp.bdaddr_type = params->addr_type;
824                         bacpy(&cp.bdaddr, &params->addr);
825                         memcpy(cp.peer_irk, irk->val, 16);
826
827                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828                                 memcpy(cp.local_irk, hdev->irk, 16);
829                         else
830                                 memset(cp.local_irk, 0, 16);
831
832                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
833                                     sizeof(cp), &cp);
834                 }
835         }
836
837         return 0;
838 }
839
840 static u8 update_white_list(struct hci_request *req)
841 {
842         struct hci_dev *hdev = req->hdev;
843         struct hci_conn_params *params;
844         struct bdaddr_list *b;
845         u8 num_entries = 0;
846         bool pend_conn, pend_report;
847         /* We allow whitelisting even with RPAs in suspend. In the worst case,
848          * we won't be able to wake from devices that use the privacy1.2
849          * features. Additionally, once we support privacy1.2 and IRK
850          * offloading, we can update this to also check for those conditions.
851          */
852         bool allow_rpa = hdev->suspended;
853
854         if (use_ll_privacy(hdev) &&
855             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
856                 allow_rpa = true;
857
858         /* Go through the current white list programmed into the
859          * controller one by one and check if that address is still
860          * in the list of pending connections or list of devices to
861          * report. If not present in either list, then queue the
862          * command to remove it from the controller.
863          */
864         list_for_each_entry(b, &hdev->le_white_list, list) {
865                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
866                                                       &b->bdaddr,
867                                                       b->bdaddr_type);
868                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
869                                                         &b->bdaddr,
870                                                         b->bdaddr_type);
871
872                 /* If the device is not likely to connect or report,
873                  * remove it from the whitelist.
874                  */
875                 if (!pend_conn && !pend_report) {
876                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
877                         continue;
878                 }
879
880                 /* White list can not be used with RPAs */
881                 if (!allow_rpa &&
882                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
883                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
884                         return 0x00;
885                 }
886
887                 num_entries++;
888         }
889
890         /* Since all no longer valid white list entries have been
891          * removed, walk through the list of pending connections
892          * and ensure that any new device gets programmed into
893          * the controller.
894          *
895          * If the list of the devices is larger than the list of
896          * available white list entries in the controller, then
897          * just abort and return filer policy value to not use the
898          * white list.
899          */
900         list_for_each_entry(params, &hdev->pend_le_conns, action) {
901                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
902                         return 0x00;
903         }
904
905         /* After adding all new pending connections, walk through
906          * the list of pending reports and also add these to the
907          * white list if there is still space. Abort if space runs out.
908          */
909         list_for_each_entry(params, &hdev->pend_le_reports, action) {
910                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
911                         return 0x00;
912         }
913
914         /* Use the allowlist unless the following conditions are all true:
915          * - We are not currently suspending
916          * - There are 1 or more ADV monitors registered and it's not offloaded
917          * - Interleaved scanning is not currently using the allowlist
918          */
919         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
920             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
921             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
922                 return 0x00;
923
924         /* Select filter policy to use white list */
925         return 0x01;
926 }
927
928 static bool scan_use_rpa(struct hci_dev *hdev)
929 {
930         return hci_dev_test_flag(hdev, HCI_PRIVACY);
931 }
932
933 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
934                                u16 window, u8 own_addr_type, u8 filter_policy,
935                                bool addr_resolv)
936 {
937         struct hci_dev *hdev = req->hdev;
938
939         if (hdev->scanning_paused) {
940                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
941                 return;
942         }
943
944         if (use_ll_privacy(hdev) &&
945             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
946             addr_resolv) {
947                 u8 enable = 0x01;
948
949                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
950         }
951
952         /* Use ext scanning if set ext scan param and ext scan enable is
953          * supported
954          */
955         if (use_ext_scan(hdev)) {
956                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
957                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
958                 struct hci_cp_le_scan_phy_params *phy_params;
959                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
960                 u32 plen;
961
962                 ext_param_cp = (void *)data;
963                 phy_params = (void *)ext_param_cp->data;
964
965                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
966                 ext_param_cp->own_addr_type = own_addr_type;
967                 ext_param_cp->filter_policy = filter_policy;
968
969                 plen = sizeof(*ext_param_cp);
970
971                 if (scan_1m(hdev) || scan_2m(hdev)) {
972                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
973
974                         memset(phy_params, 0, sizeof(*phy_params));
975                         phy_params->type = type;
976                         phy_params->interval = cpu_to_le16(interval);
977                         phy_params->window = cpu_to_le16(window);
978
979                         plen += sizeof(*phy_params);
980                         phy_params++;
981                 }
982
983                 if (scan_coded(hdev)) {
984                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
985
986                         memset(phy_params, 0, sizeof(*phy_params));
987                         phy_params->type = type;
988                         phy_params->interval = cpu_to_le16(interval);
989                         phy_params->window = cpu_to_le16(window);
990
991                         plen += sizeof(*phy_params);
992                         phy_params++;
993                 }
994
995                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
996                             plen, ext_param_cp);
997
998                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
999                 ext_enable_cp.enable = LE_SCAN_ENABLE;
1000                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1001
1002                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1003                             sizeof(ext_enable_cp), &ext_enable_cp);
1004         } else {
1005                 struct hci_cp_le_set_scan_param param_cp;
1006                 struct hci_cp_le_set_scan_enable enable_cp;
1007
1008                 memset(&param_cp, 0, sizeof(param_cp));
1009                 param_cp.type = type;
1010                 param_cp.interval = cpu_to_le16(interval);
1011                 param_cp.window = cpu_to_le16(window);
1012                 param_cp.own_address_type = own_addr_type;
1013                 param_cp.filter_policy = filter_policy;
1014                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1015                             &param_cp);
1016
1017                 memset(&enable_cp, 0, sizeof(enable_cp));
1018                 enable_cp.enable = LE_SCAN_ENABLE;
1019                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1020                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021                             &enable_cp);
1022         }
1023 }
1024
1025 /* Returns true if an le connection is in the scanning state */
1026 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1027 {
1028         struct hci_conn_hash *h = &hdev->conn_hash;
1029         struct hci_conn  *c;
1030
1031         rcu_read_lock();
1032
1033         list_for_each_entry_rcu(c, &h->list, list) {
1034                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1035                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
1036                         rcu_read_unlock();
1037                         return true;
1038                 }
1039         }
1040
1041         rcu_read_unlock();
1042
1043         return false;
1044 }
1045
1046 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1047  * controller based address resolution to be able to reconfigure
1048  * resolving list.
1049  */
1050 void hci_req_add_le_passive_scan(struct hci_request *req)
1051 {
1052         struct hci_dev *hdev = req->hdev;
1053         u8 own_addr_type;
1054         u8 filter_policy;
1055         u16 window, interval;
1056         /* Background scanning should run with address resolution */
1057         bool addr_resolv = true;
1058
1059         if (hdev->scanning_paused) {
1060                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1061                 return;
1062         }
1063
1064         /* Set require_privacy to false since no SCAN_REQ are send
1065          * during passive scanning. Not using an non-resolvable address
1066          * here is important so that peer devices using direct
1067          * advertising with our address will be correctly reported
1068          * by the controller.
1069          */
1070         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1071                                       &own_addr_type))
1072                 return;
1073
1074         if (hdev->enable_advmon_interleave_scan &&
1075             __hci_update_interleaved_scan(hdev))
1076                 return;
1077
1078         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1079         /* Adding or removing entries from the white list must
1080          * happen before enabling scanning. The controller does
1081          * not allow white list modification while scanning.
1082          */
1083         filter_policy = update_white_list(req);
1084
1085         /* When the controller is using random resolvable addresses and
1086          * with that having LE privacy enabled, then controllers with
1087          * Extended Scanner Filter Policies support can now enable support
1088          * for handling directed advertising.
1089          *
1090          * So instead of using filter polices 0x00 (no whitelist)
1091          * and 0x01 (whitelist enabled) use the new filter policies
1092          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1093          */
1094         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1095             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1096                 filter_policy |= 0x02;
1097
1098         if (hdev->suspended) {
1099                 window = hdev->le_scan_window_suspend;
1100                 interval = hdev->le_scan_int_suspend;
1101
1102                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1103         } else if (hci_is_le_conn_scanning(hdev)) {
1104                 window = hdev->le_scan_window_connect;
1105                 interval = hdev->le_scan_int_connect;
1106         } else if (hci_is_adv_monitoring(hdev)) {
1107                 window = hdev->le_scan_window_adv_monitor;
1108                 interval = hdev->le_scan_int_adv_monitor;
1109         } else {
1110                 window = hdev->le_scan_window;
1111                 interval = hdev->le_scan_interval;
1112         }
1113
1114         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1115         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1116                            own_addr_type, filter_policy, addr_resolv);
1117 }
1118
1119 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1120 {
1121         struct adv_info *adv_instance;
1122
1123         /* Instance 0x00 always set local name */
1124         if (instance == 0x00)
1125                 return true;
1126
1127         adv_instance = hci_find_adv_instance(hdev, instance);
1128         if (!adv_instance)
1129                 return false;
1130
1131         if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1132             adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1133                 return true;
1134
1135         return adv_instance->scan_rsp_len ? true : false;
1136 }
1137
1138 static void hci_req_clear_event_filter(struct hci_request *req)
1139 {
1140         struct hci_cp_set_event_filter f;
1141
1142         if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
1143                 return;
1144
1145         if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
1146                 memset(&f, 0, sizeof(f));
1147                 f.flt_type = HCI_FLT_CLEAR_ALL;
1148                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1149         }
1150 }
1151
1152 static void hci_req_set_event_filter(struct hci_request *req)
1153 {
1154         struct bdaddr_list_with_flags *b;
1155         struct hci_cp_set_event_filter f;
1156         struct hci_dev *hdev = req->hdev;
1157         u8 scan = SCAN_DISABLED;
1158         bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
1159
1160         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1161                 return;
1162
1163         /* Always clear event filter when starting */
1164         hci_req_clear_event_filter(req);
1165
1166         list_for_each_entry(b, &hdev->whitelist, list) {
1167                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1168                                         b->current_flags))
1169                         continue;
1170
1171                 memset(&f, 0, sizeof(f));
1172                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1173                 f.flt_type = HCI_FLT_CONN_SETUP;
1174                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1175                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1176
1177                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1178                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1179                 scan = SCAN_PAGE;
1180         }
1181
1182         if (scan && !scanning) {
1183                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1184                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1185         } else if (!scan && scanning) {
1186                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1187                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1188         }
1189 }
1190
1191 static void cancel_adv_timeout(struct hci_dev *hdev)
1192 {
1193         if (hdev->adv_instance_timeout) {
1194                 hdev->adv_instance_timeout = 0;
1195                 cancel_delayed_work(&hdev->adv_instance_expire);
1196         }
1197 }
1198
1199 /* This function requires the caller holds hdev->lock */
1200 void __hci_req_pause_adv_instances(struct hci_request *req)
1201 {
1202         bt_dev_dbg(req->hdev, "Pausing advertising instances");
1203
1204         /* Call to disable any advertisements active on the controller.
1205          * This will succeed even if no advertisements are configured.
1206          */
1207         __hci_req_disable_advertising(req);
1208
1209         /* If we are using software rotation, pause the loop */
1210         if (!ext_adv_capable(req->hdev))
1211                 cancel_adv_timeout(req->hdev);
1212 }
1213
1214 /* This function requires the caller holds hdev->lock */
1215 static void __hci_req_resume_adv_instances(struct hci_request *req)
1216 {
1217         struct adv_info *adv;
1218
1219         bt_dev_dbg(req->hdev, "Resuming advertising instances");
1220
1221         if (ext_adv_capable(req->hdev)) {
1222                 /* Call for each tracked instance to be re-enabled */
1223                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1224                         __hci_req_enable_ext_advertising(req,
1225                                                          adv->instance);
1226                 }
1227
1228         } else {
1229                 /* Schedule for most recent instance to be restarted and begin
1230                  * the software rotation loop
1231                  */
1232                 __hci_req_schedule_adv_instance(req,
1233                                                 req->hdev->cur_adv_instance,
1234                                                 true);
1235         }
1236 }
1237
1238 /* This function requires the caller holds hdev->lock */
1239 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1240 {
1241         struct hci_request req;
1242
1243         hci_req_init(&req, hdev);
1244         __hci_req_resume_adv_instances(&req);
1245
1246         return hci_req_run(&req, NULL);
1247 }
1248
1249 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1250 {
1251         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1252                    status);
1253         if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1254             test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1255                 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1256                 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1257                 wake_up(&hdev->suspend_wait_q);
1258         }
1259
1260         if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1261                 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1262                 wake_up(&hdev->suspend_wait_q);
1263         }
1264 }
1265
1266 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1267                                               bool enable)
1268 {
1269         struct hci_dev *hdev = req->hdev;
1270
1271         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1272         case HCI_ADV_MONITOR_EXT_MSFT:
1273                 msft_req_add_set_filter_enable(req, enable);
1274                 break;
1275         default:
1276                 return;
1277         }
1278
1279         /* No need to block when enabling since it's on resume path */
1280         if (hdev->suspended && !enable)
1281                 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1282 }
1283
1284 /* Call with hci_dev_lock */
1285 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1286 {
1287         int old_state;
1288         struct hci_conn *conn;
1289         struct hci_request req;
1290         u8 page_scan;
1291         int disconnect_counter;
1292
1293         if (next == hdev->suspend_state) {
1294                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1295                 goto done;
1296         }
1297
1298         hdev->suspend_state = next;
1299         hci_req_init(&req, hdev);
1300
1301         if (next == BT_SUSPEND_DISCONNECT) {
1302                 /* Mark device as suspended */
1303                 hdev->suspended = true;
1304
1305                 /* Pause discovery if not already stopped */
1306                 old_state = hdev->discovery.state;
1307                 if (old_state != DISCOVERY_STOPPED) {
1308                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1309                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1310                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1311                 }
1312
1313                 hdev->discovery_paused = true;
1314                 hdev->discovery_old_state = old_state;
1315
1316                 /* Stop directed advertising */
1317                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1318                 if (old_state) {
1319                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1320                         cancel_delayed_work(&hdev->discov_off);
1321                         queue_delayed_work(hdev->req_workqueue,
1322                                            &hdev->discov_off, 0);
1323                 }
1324
1325                 /* Pause other advertisements */
1326                 if (hdev->adv_instance_cnt)
1327                         __hci_req_pause_adv_instances(&req);
1328
1329                 hdev->advertising_paused = true;
1330                 hdev->advertising_old_state = old_state;
1331
1332                 /* Disable page scan if enabled */
1333                 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1334                         page_scan = SCAN_DISABLED;
1335                         hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1336                                     &page_scan);
1337                         set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1338                 }
1339
1340                 /* Disable LE passive scan if enabled */
1341                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1342                         cancel_interleave_scan(hdev);
1343                         hci_req_add_le_scan_disable(&req, false);
1344                 }
1345
1346                 /* Disable advertisement filters */
1347                 hci_req_add_set_adv_filter_enable(&req, false);
1348
1349                 /* Prevent disconnects from causing scanning to be re-enabled */
1350                 hdev->scanning_paused = true;
1351
1352                 /* Run commands before disconnecting */
1353                 hci_req_run(&req, suspend_req_complete);
1354
1355                 disconnect_counter = 0;
1356                 /* Soft disconnect everything (power off) */
1357                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1358                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1359                         disconnect_counter++;
1360                 }
1361
1362                 if (disconnect_counter > 0) {
1363                         bt_dev_dbg(hdev,
1364                                    "Had %d disconnects. Will wait on them",
1365                                    disconnect_counter);
1366                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1367                 }
1368         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1369                 /* Unpause to take care of updating scanning params */
1370                 hdev->scanning_paused = false;
1371                 /* Enable event filter for paired devices */
1372                 hci_req_set_event_filter(&req);
1373                 /* Enable passive scan at lower duty cycle */
1374                 __hci_update_background_scan(&req);
1375                 /* Pause scan changes again. */
1376                 hdev->scanning_paused = true;
1377                 hci_req_run(&req, suspend_req_complete);
1378         } else {
1379                 hdev->suspended = false;
1380                 hdev->scanning_paused = false;
1381
1382                 /* Clear any event filters and restore scan state */
1383                 hci_req_clear_event_filter(&req);
1384                 __hci_req_update_scan(&req);
1385
1386                 /* Reset passive/background scanning to normal */
1387                 __hci_update_background_scan(&req);
1388                 /* Enable all of the advertisement filters */
1389                 hci_req_add_set_adv_filter_enable(&req, true);
1390
1391                 /* Unpause directed advertising */
1392                 hdev->advertising_paused = false;
1393                 if (hdev->advertising_old_state) {
1394                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1395                                 hdev->suspend_tasks);
1396                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1397                         queue_work(hdev->req_workqueue,
1398                                    &hdev->discoverable_update);
1399                         hdev->advertising_old_state = 0;
1400                 }
1401
1402                 /* Resume other advertisements */
1403                 if (hdev->adv_instance_cnt)
1404                         __hci_req_resume_adv_instances(&req);
1405
1406                 /* Unpause discovery */
1407                 hdev->discovery_paused = false;
1408                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1409                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1410                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1411                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1412                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1413                 }
1414
1415                 hci_req_run(&req, suspend_req_complete);
1416         }
1417
1418         hdev->suspend_state = next;
1419
1420 done:
1421         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1422         wake_up(&hdev->suspend_wait_q);
1423 }
1424
1425 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1426 {
1427         return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1428 }
1429
1430 void __hci_req_disable_advertising(struct hci_request *req)
1431 {
1432         if (ext_adv_capable(req->hdev)) {
1433                 __hci_req_disable_ext_adv_instance(req, 0x00);
1434
1435         } else {
1436                 u8 enable = 0x00;
1437
1438                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1439         }
1440 }
1441
1442 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1443 {
1444         u32 flags;
1445         struct adv_info *adv_instance;
1446
1447         if (instance == 0x00) {
1448                 /* Instance 0 always manages the "Tx Power" and "Flags"
1449                  * fields
1450                  */
1451                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1452
1453                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1454                  * corresponds to the "connectable" instance flag.
1455                  */
1456                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1457                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1458
1459                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1460                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1461                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1462                         flags |= MGMT_ADV_FLAG_DISCOV;
1463
1464                 return flags;
1465         }
1466
1467         adv_instance = hci_find_adv_instance(hdev, instance);
1468
1469         /* Return 0 when we got an invalid instance identifier. */
1470         if (!adv_instance)
1471                 return 0;
1472
1473         return adv_instance->flags;
1474 }
1475
1476 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1477 {
1478         /* If privacy is not enabled don't use RPA */
1479         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1480                 return false;
1481
1482         /* If basic privacy mode is enabled use RPA */
1483         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1484                 return true;
1485
1486         /* If limited privacy mode is enabled don't use RPA if we're
1487          * both discoverable and bondable.
1488          */
1489         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1490             hci_dev_test_flag(hdev, HCI_BONDABLE))
1491                 return false;
1492
1493         /* We're neither bondable nor discoverable in the limited
1494          * privacy mode, therefore use RPA.
1495          */
1496         return true;
1497 }
1498
1499 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1500 {
1501         /* If there is no connection we are OK to advertise. */
1502         if (hci_conn_num(hdev, LE_LINK) == 0)
1503                 return true;
1504
1505         /* Check le_states if there is any connection in slave role. */
1506         if (hdev->conn_hash.le_num_slave > 0) {
1507                 /* Slave connection state and non connectable mode bit 20. */
1508                 if (!connectable && !(hdev->le_states[2] & 0x10))
1509                         return false;
1510
1511                 /* Slave connection state and connectable mode bit 38
1512                  * and scannable bit 21.
1513                  */
1514                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1515                                     !(hdev->le_states[2] & 0x20)))
1516                         return false;
1517         }
1518
1519         /* Check le_states if there is any connection in master role. */
1520         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1521                 /* Master connection state and non connectable mode bit 18. */
1522                 if (!connectable && !(hdev->le_states[2] & 0x02))
1523                         return false;
1524
1525                 /* Master connection state and connectable mode bit 35 and
1526                  * scannable 19.
1527                  */
1528                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1529                                     !(hdev->le_states[2] & 0x08)))
1530                         return false;
1531         }
1532
1533         return true;
1534 }
1535
1536 void __hci_req_enable_advertising(struct hci_request *req)
1537 {
1538         struct hci_dev *hdev = req->hdev;
1539         struct adv_info *adv_instance;
1540         struct hci_cp_le_set_adv_param cp;
1541         u8 own_addr_type, enable = 0x01;
1542         bool connectable;
1543         u16 adv_min_interval, adv_max_interval;
1544         u32 flags;
1545
1546         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1547         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1548
1549         /* If the "connectable" instance flag was not set, then choose between
1550          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1551          */
1552         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1553                       mgmt_get_connectable(hdev);
1554
1555         if (!is_advertising_allowed(hdev, connectable))
1556                 return;
1557
1558         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1559                 __hci_req_disable_advertising(req);
1560
1561         /* Clear the HCI_LE_ADV bit temporarily so that the
1562          * hci_update_random_address knows that it's safe to go ahead
1563          * and write a new random address. The flag will be set back on
1564          * as soon as the SET_ADV_ENABLE HCI command completes.
1565          */
1566         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1567
1568         /* Set require_privacy to true only when non-connectable
1569          * advertising is used. In that case it is fine to use a
1570          * non-resolvable private address.
1571          */
1572         if (hci_update_random_address(req, !connectable,
1573                                       adv_use_rpa(hdev, flags),
1574                                       &own_addr_type) < 0)
1575                 return;
1576
1577         memset(&cp, 0, sizeof(cp));
1578
1579         if (adv_instance) {
1580                 adv_min_interval = adv_instance->min_interval;
1581                 adv_max_interval = adv_instance->max_interval;
1582         } else {
1583                 adv_min_interval = hdev->le_adv_min_interval;
1584                 adv_max_interval = hdev->le_adv_max_interval;
1585         }
1586
1587         if (connectable) {
1588                 cp.type = LE_ADV_IND;
1589         } else {
1590                 if (adv_cur_instance_is_scannable(hdev))
1591                         cp.type = LE_ADV_SCAN_IND;
1592                 else
1593                         cp.type = LE_ADV_NONCONN_IND;
1594
1595                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1596                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1597                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1598                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1599                 }
1600         }
1601
1602         cp.min_interval = cpu_to_le16(adv_min_interval);
1603         cp.max_interval = cpu_to_le16(adv_max_interval);
1604         cp.own_address_type = own_addr_type;
1605         cp.channel_map = hdev->le_adv_channel_map;
1606
1607         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1608
1609         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1610 }
1611
1612 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1613 {
1614         size_t short_len;
1615         size_t complete_len;
1616
1617         /* no space left for name (+ NULL + type + len) */
1618         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1619                 return ad_len;
1620
1621         /* use complete name if present and fits */
1622         complete_len = strlen(hdev->dev_name);
1623         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1624                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1625                                        hdev->dev_name, complete_len + 1);
1626
1627         /* use short name if present */
1628         short_len = strlen(hdev->short_name);
1629         if (short_len)
1630                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1631                                        hdev->short_name, short_len + 1);
1632
1633         /* use shortened full name if present, we already know that name
1634          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1635          */
1636         if (complete_len) {
1637                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1638
1639                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1640                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1641
1642                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1643                                        sizeof(name));
1644         }
1645
1646         return ad_len;
1647 }
1648
1649 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1650 {
1651         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1652 }
1653
1654 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1655 {
1656         u8 scan_rsp_len = 0;
1657
1658         if (hdev->appearance)
1659                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1660
1661         return append_local_name(hdev, ptr, scan_rsp_len);
1662 }
1663
1664 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1665                                         u8 *ptr)
1666 {
1667         struct adv_info *adv_instance;
1668         u32 instance_flags;
1669         u8 scan_rsp_len = 0;
1670
1671         adv_instance = hci_find_adv_instance(hdev, instance);
1672         if (!adv_instance)
1673                 return 0;
1674
1675         instance_flags = adv_instance->flags;
1676
1677         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance)
1678                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1679
1680         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1681                adv_instance->scan_rsp_len);
1682
1683         scan_rsp_len += adv_instance->scan_rsp_len;
1684
1685         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1686                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1687
1688         return scan_rsp_len;
1689 }
1690
1691 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1692 {
1693         struct hci_dev *hdev = req->hdev;
1694         u8 len;
1695
1696         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1697                 return;
1698
1699         if (ext_adv_capable(hdev)) {
1700                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1701
1702                 memset(&cp, 0, sizeof(cp));
1703
1704                 if (instance)
1705                         len = create_instance_scan_rsp_data(hdev, instance,
1706                                                             cp.data);
1707                 else
1708                         len = create_default_scan_rsp_data(hdev, cp.data);
1709
1710                 if (hdev->scan_rsp_data_len == len &&
1711                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1712                         return;
1713
1714                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1715                 hdev->scan_rsp_data_len = len;
1716
1717                 cp.handle = instance;
1718                 cp.length = len;
1719                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1720                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1721
1722                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1723                             &cp);
1724         } else {
1725                 struct hci_cp_le_set_scan_rsp_data cp;
1726
1727                 memset(&cp, 0, sizeof(cp));
1728
1729                 if (instance)
1730                         len = create_instance_scan_rsp_data(hdev, instance,
1731                                                             cp.data);
1732                 else
1733                         len = create_default_scan_rsp_data(hdev, cp.data);
1734
1735                 if (hdev->scan_rsp_data_len == len &&
1736                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1737                         return;
1738
1739                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1740                 hdev->scan_rsp_data_len = len;
1741
1742                 cp.length = len;
1743
1744                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1745         }
1746 }
1747
1748 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1749 {
1750         struct adv_info *adv_instance = NULL;
1751         u8 ad_len = 0, flags = 0;
1752         u32 instance_flags;
1753
1754         /* Return 0 when the current instance identifier is invalid. */
1755         if (instance) {
1756                 adv_instance = hci_find_adv_instance(hdev, instance);
1757                 if (!adv_instance)
1758                         return 0;
1759         }
1760
1761         instance_flags = get_adv_instance_flags(hdev, instance);
1762
1763         /* If instance already has the flags set skip adding it once
1764          * again.
1765          */
1766         if (adv_instance && eir_get_data(adv_instance->adv_data,
1767                                          adv_instance->adv_data_len, EIR_FLAGS,
1768                                          NULL))
1769                 goto skip_flags;
1770
1771         /* The Add Advertising command allows userspace to set both the general
1772          * and limited discoverable flags.
1773          */
1774         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1775                 flags |= LE_AD_GENERAL;
1776
1777         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1778                 flags |= LE_AD_LIMITED;
1779
1780         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1781                 flags |= LE_AD_NO_BREDR;
1782
1783         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1784                 /* If a discovery flag wasn't provided, simply use the global
1785                  * settings.
1786                  */
1787                 if (!flags)
1788                         flags |= mgmt_get_adv_discov_flags(hdev);
1789
1790                 /* If flags would still be empty, then there is no need to
1791                  * include the "Flags" AD field".
1792                  */
1793                 if (flags) {
1794                         ptr[0] = 0x02;
1795                         ptr[1] = EIR_FLAGS;
1796                         ptr[2] = flags;
1797
1798                         ad_len += 3;
1799                         ptr += 3;
1800                 }
1801         }
1802
1803 skip_flags:
1804         if (adv_instance) {
1805                 memcpy(ptr, adv_instance->adv_data,
1806                        adv_instance->adv_data_len);
1807                 ad_len += adv_instance->adv_data_len;
1808                 ptr += adv_instance->adv_data_len;
1809         }
1810
1811         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1812                 s8 adv_tx_power;
1813
1814                 if (ext_adv_capable(hdev)) {
1815                         if (adv_instance)
1816                                 adv_tx_power = adv_instance->tx_power;
1817                         else
1818                                 adv_tx_power = hdev->adv_tx_power;
1819                 } else {
1820                         adv_tx_power = hdev->adv_tx_power;
1821                 }
1822
1823                 /* Provide Tx Power only if we can provide a valid value for it */
1824                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1825                         ptr[0] = 0x02;
1826                         ptr[1] = EIR_TX_POWER;
1827                         ptr[2] = (u8)adv_tx_power;
1828
1829                         ad_len += 3;
1830                         ptr += 3;
1831                 }
1832         }
1833
1834         return ad_len;
1835 }
1836
1837 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1838 {
1839         struct hci_dev *hdev = req->hdev;
1840         u8 len;
1841
1842         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1843                 return;
1844
1845         if (ext_adv_capable(hdev)) {
1846                 struct hci_cp_le_set_ext_adv_data cp;
1847
1848                 memset(&cp, 0, sizeof(cp));
1849
1850                 len = create_instance_adv_data(hdev, instance, cp.data);
1851
1852                 /* There's nothing to do if the data hasn't changed */
1853                 if (hdev->adv_data_len == len &&
1854                     memcmp(cp.data, hdev->adv_data, len) == 0)
1855                         return;
1856
1857                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1858                 hdev->adv_data_len = len;
1859
1860                 cp.length = len;
1861                 cp.handle = instance;
1862                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1863                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1864
1865                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1866         } else {
1867                 struct hci_cp_le_set_adv_data cp;
1868
1869                 memset(&cp, 0, sizeof(cp));
1870
1871                 len = create_instance_adv_data(hdev, instance, cp.data);
1872
1873                 /* There's nothing to do if the data hasn't changed */
1874                 if (hdev->adv_data_len == len &&
1875                     memcmp(cp.data, hdev->adv_data, len) == 0)
1876                         return;
1877
1878                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1879                 hdev->adv_data_len = len;
1880
1881                 cp.length = len;
1882
1883                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1884         }
1885 }
1886
1887 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1888 {
1889         struct hci_request req;
1890
1891         hci_req_init(&req, hdev);
1892         __hci_req_update_adv_data(&req, instance);
1893
1894         return hci_req_run(&req, NULL);
1895 }
1896
1897 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1898                                             u16 opcode)
1899 {
1900         BT_DBG("%s status %u", hdev->name, status);
1901 }
1902
1903 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1904 {
1905         struct hci_request req;
1906         __u8 enable = 0x00;
1907
1908         if (!use_ll_privacy(hdev) &&
1909             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1910                 return;
1911
1912         hci_req_init(&req, hdev);
1913
1914         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1915
1916         hci_req_run(&req, enable_addr_resolution_complete);
1917 }
1918
1919 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1920 {
1921         bt_dev_dbg(hdev, "status %u", status);
1922 }
1923
1924 void hci_req_reenable_advertising(struct hci_dev *hdev)
1925 {
1926         struct hci_request req;
1927
1928         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1929             list_empty(&hdev->adv_instances))
1930                 return;
1931
1932         hci_req_init(&req, hdev);
1933
1934         if (hdev->cur_adv_instance) {
1935                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1936                                                 true);
1937         } else {
1938                 if (ext_adv_capable(hdev)) {
1939                         __hci_req_start_ext_adv(&req, 0x00);
1940                 } else {
1941                         __hci_req_update_adv_data(&req, 0x00);
1942                         __hci_req_update_scan_rsp_data(&req, 0x00);
1943                         __hci_req_enable_advertising(&req);
1944                 }
1945         }
1946
1947         hci_req_run(&req, adv_enable_complete);
1948 }
1949
1950 static void adv_timeout_expire(struct work_struct *work)
1951 {
1952         struct hci_dev *hdev = container_of(work, struct hci_dev,
1953                                             adv_instance_expire.work);
1954
1955         struct hci_request req;
1956         u8 instance;
1957
1958         bt_dev_dbg(hdev, "");
1959
1960         hci_dev_lock(hdev);
1961
1962         hdev->adv_instance_timeout = 0;
1963
1964         instance = hdev->cur_adv_instance;
1965         if (instance == 0x00)
1966                 goto unlock;
1967
1968         hci_req_init(&req, hdev);
1969
1970         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1971
1972         if (list_empty(&hdev->adv_instances))
1973                 __hci_req_disable_advertising(&req);
1974
1975         hci_req_run(&req, NULL);
1976
1977 unlock:
1978         hci_dev_unlock(hdev);
1979 }
1980
1981 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1982                                            unsigned long opt)
1983 {
1984         struct hci_dev *hdev = req->hdev;
1985         int ret = 0;
1986
1987         hci_dev_lock(hdev);
1988
1989         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1990                 hci_req_add_le_scan_disable(req, false);
1991         hci_req_add_le_passive_scan(req);
1992
1993         switch (hdev->interleave_scan_state) {
1994         case INTERLEAVE_SCAN_ALLOWLIST:
1995                 bt_dev_dbg(hdev, "next state: allowlist");
1996                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1997                 break;
1998         case INTERLEAVE_SCAN_NO_FILTER:
1999                 bt_dev_dbg(hdev, "next state: no filter");
2000                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
2001                 break;
2002         case INTERLEAVE_SCAN_NONE:
2003                 BT_ERR("unexpected error");
2004                 ret = -1;
2005         }
2006
2007         hci_dev_unlock(hdev);
2008
2009         return ret;
2010 }
2011
2012 static void interleave_scan_work(struct work_struct *work)
2013 {
2014         struct hci_dev *hdev = container_of(work, struct hci_dev,
2015                                             interleave_scan.work);
2016         u8 status;
2017         unsigned long timeout;
2018
2019         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2020                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2021         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2022                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2023         } else {
2024                 bt_dev_err(hdev, "unexpected error");
2025                 return;
2026         }
2027
2028         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2029                      HCI_CMD_TIMEOUT, &status);
2030
2031         /* Don't continue interleaving if it was canceled */
2032         if (is_interleave_scanning(hdev))
2033                 queue_delayed_work(hdev->req_workqueue,
2034                                    &hdev->interleave_scan, timeout);
2035 }
2036
2037 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2038                            bool use_rpa, struct adv_info *adv_instance,
2039                            u8 *own_addr_type, bdaddr_t *rand_addr)
2040 {
2041         int err;
2042
2043         bacpy(rand_addr, BDADDR_ANY);
2044
2045         /* If privacy is enabled use a resolvable private address. If
2046          * current RPA has expired then generate a new one.
2047          */
2048         if (use_rpa) {
2049                 int to;
2050
2051                 /* If Controller supports LL Privacy use own address type is
2052                  * 0x03
2053                  */
2054                 if (use_ll_privacy(hdev) &&
2055                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2056                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2057                 else
2058                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2059
2060                 if (adv_instance) {
2061                         if (!adv_instance->rpa_expired &&
2062                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
2063                                 return 0;
2064
2065                         adv_instance->rpa_expired = false;
2066                 } else {
2067                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2068                             !bacmp(&hdev->random_addr, &hdev->rpa))
2069                                 return 0;
2070                 }
2071
2072                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2073                 if (err < 0) {
2074                         bt_dev_err(hdev, "failed to generate new RPA");
2075                         return err;
2076                 }
2077
2078                 bacpy(rand_addr, &hdev->rpa);
2079
2080                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2081                 if (adv_instance)
2082                         queue_delayed_work(hdev->workqueue,
2083                                            &adv_instance->rpa_expired_cb, to);
2084                 else
2085                         queue_delayed_work(hdev->workqueue,
2086                                            &hdev->rpa_expired, to);
2087
2088                 return 0;
2089         }
2090
2091         /* In case of required privacy without resolvable private address,
2092          * use an non-resolvable private address. This is useful for
2093          * non-connectable advertising.
2094          */
2095         if (require_privacy) {
2096                 bdaddr_t nrpa;
2097
2098                 while (true) {
2099                         /* The non-resolvable private address is generated
2100                          * from random six bytes with the two most significant
2101                          * bits cleared.
2102                          */
2103                         get_random_bytes(&nrpa, 6);
2104                         nrpa.b[5] &= 0x3f;
2105
2106                         /* The non-resolvable private address shall not be
2107                          * equal to the public address.
2108                          */
2109                         if (bacmp(&hdev->bdaddr, &nrpa))
2110                                 break;
2111                 }
2112
2113                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2114                 bacpy(rand_addr, &nrpa);
2115
2116                 return 0;
2117         }
2118
2119         /* No privacy so use a public address. */
2120         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2121
2122         return 0;
2123 }
2124
2125 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2126 {
2127         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2128 }
2129
2130 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2131 {
2132         struct hci_cp_le_set_ext_adv_params cp;
2133         struct hci_dev *hdev = req->hdev;
2134         bool connectable;
2135         u32 flags;
2136         bdaddr_t random_addr;
2137         u8 own_addr_type;
2138         int err;
2139         struct adv_info *adv_instance;
2140         bool secondary_adv;
2141
2142         if (instance > 0) {
2143                 adv_instance = hci_find_adv_instance(hdev, instance);
2144                 if (!adv_instance)
2145                         return -EINVAL;
2146         } else {
2147                 adv_instance = NULL;
2148         }
2149
2150         flags = get_adv_instance_flags(hdev, instance);
2151
2152         /* If the "connectable" instance flag was not set, then choose between
2153          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2154          */
2155         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2156                       mgmt_get_connectable(hdev);
2157
2158         if (!is_advertising_allowed(hdev, connectable))
2159                 return -EPERM;
2160
2161         /* Set require_privacy to true only when non-connectable
2162          * advertising is used. In that case it is fine to use a
2163          * non-resolvable private address.
2164          */
2165         err = hci_get_random_address(hdev, !connectable,
2166                                      adv_use_rpa(hdev, flags), adv_instance,
2167                                      &own_addr_type, &random_addr);
2168         if (err < 0)
2169                 return err;
2170
2171         memset(&cp, 0, sizeof(cp));
2172
2173         if (adv_instance) {
2174                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2175                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2176                 cp.tx_power = adv_instance->tx_power;
2177         } else {
2178                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2179                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2180                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2181         }
2182
2183         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2184
2185         if (connectable) {
2186                 if (secondary_adv)
2187                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2188                 else
2189                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2190         } else if (adv_instance_is_scannable(hdev, instance) ||
2191                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
2192                 if (secondary_adv)
2193                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2194                 else
2195                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2196         } else {
2197                 if (secondary_adv)
2198                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2199                 else
2200                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2201         }
2202
2203         cp.own_addr_type = own_addr_type;
2204         cp.channel_map = hdev->le_adv_channel_map;
2205         cp.handle = instance;
2206
2207         if (flags & MGMT_ADV_FLAG_SEC_2M) {
2208                 cp.primary_phy = HCI_ADV_PHY_1M;
2209                 cp.secondary_phy = HCI_ADV_PHY_2M;
2210         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2211                 cp.primary_phy = HCI_ADV_PHY_CODED;
2212                 cp.secondary_phy = HCI_ADV_PHY_CODED;
2213         } else {
2214                 /* In all other cases use 1M */
2215                 cp.primary_phy = HCI_ADV_PHY_1M;
2216                 cp.secondary_phy = HCI_ADV_PHY_1M;
2217         }
2218
2219         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2220
2221         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2222             bacmp(&random_addr, BDADDR_ANY)) {
2223                 struct hci_cp_le_set_adv_set_rand_addr cp;
2224
2225                 /* Check if random address need to be updated */
2226                 if (adv_instance) {
2227                         if (!bacmp(&random_addr, &adv_instance->random_addr))
2228                                 return 0;
2229                 } else {
2230                         if (!bacmp(&random_addr, &hdev->random_addr))
2231                                 return 0;
2232                 }
2233
2234                 memset(&cp, 0, sizeof(cp));
2235
2236                 cp.handle = instance;
2237                 bacpy(&cp.bdaddr, &random_addr);
2238
2239                 hci_req_add(req,
2240                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2241                             sizeof(cp), &cp);
2242         }
2243
2244         return 0;
2245 }
2246
2247 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2248 {
2249         struct hci_dev *hdev = req->hdev;
2250         struct hci_cp_le_set_ext_adv_enable *cp;
2251         struct hci_cp_ext_adv_set *adv_set;
2252         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2253         struct adv_info *adv_instance;
2254
2255         if (instance > 0) {
2256                 adv_instance = hci_find_adv_instance(hdev, instance);
2257                 if (!adv_instance)
2258                         return -EINVAL;
2259         } else {
2260                 adv_instance = NULL;
2261         }
2262
2263         cp = (void *) data;
2264         adv_set = (void *) cp->data;
2265
2266         memset(cp, 0, sizeof(*cp));
2267
2268         cp->enable = 0x01;
2269         cp->num_of_sets = 0x01;
2270
2271         memset(adv_set, 0, sizeof(*adv_set));
2272
2273         adv_set->handle = instance;
2274
2275         /* Set duration per instance since controller is responsible for
2276          * scheduling it.
2277          */
2278         if (adv_instance && adv_instance->duration) {
2279                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2280
2281                 /* Time = N * 10 ms */
2282                 adv_set->duration = cpu_to_le16(duration / 10);
2283         }
2284
2285         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2286                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2287                     data);
2288
2289         return 0;
2290 }
2291
2292 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2293 {
2294         struct hci_dev *hdev = req->hdev;
2295         struct hci_cp_le_set_ext_adv_enable *cp;
2296         struct hci_cp_ext_adv_set *adv_set;
2297         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2298         u8 req_size;
2299
2300         /* If request specifies an instance that doesn't exist, fail */
2301         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2302                 return -EINVAL;
2303
2304         memset(data, 0, sizeof(data));
2305
2306         cp = (void *)data;
2307         adv_set = (void *)cp->data;
2308
2309         /* Instance 0x00 indicates all advertising instances will be disabled */
2310         cp->num_of_sets = !!instance;
2311         cp->enable = 0x00;
2312
2313         adv_set->handle = instance;
2314
2315         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2316         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2317
2318         return 0;
2319 }
2320
2321 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2322 {
2323         struct hci_dev *hdev = req->hdev;
2324
2325         /* If request specifies an instance that doesn't exist, fail */
2326         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2327                 return -EINVAL;
2328
2329         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2330
2331         return 0;
2332 }
2333
2334 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2335 {
2336         struct hci_dev *hdev = req->hdev;
2337         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2338         int err;
2339
2340         /* If instance isn't pending, the chip knows about it, and it's safe to
2341          * disable
2342          */
2343         if (adv_instance && !adv_instance->pending)
2344                 __hci_req_disable_ext_adv_instance(req, instance);
2345
2346         err = __hci_req_setup_ext_adv_instance(req, instance);
2347         if (err < 0)
2348                 return err;
2349
2350         __hci_req_update_scan_rsp_data(req, instance);
2351         __hci_req_enable_ext_advertising(req, instance);
2352
2353         return 0;
2354 }
2355
2356 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2357                                     bool force)
2358 {
2359         struct hci_dev *hdev = req->hdev;
2360         struct adv_info *adv_instance = NULL;
2361         u16 timeout;
2362
2363         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2364             list_empty(&hdev->adv_instances))
2365                 return -EPERM;
2366
2367         if (hdev->adv_instance_timeout)
2368                 return -EBUSY;
2369
2370         adv_instance = hci_find_adv_instance(hdev, instance);
2371         if (!adv_instance)
2372                 return -ENOENT;
2373
2374         /* A zero timeout means unlimited advertising. As long as there is
2375          * only one instance, duration should be ignored. We still set a timeout
2376          * in case further instances are being added later on.
2377          *
2378          * If the remaining lifetime of the instance is more than the duration
2379          * then the timeout corresponds to the duration, otherwise it will be
2380          * reduced to the remaining instance lifetime.
2381          */
2382         if (adv_instance->timeout == 0 ||
2383             adv_instance->duration <= adv_instance->remaining_time)
2384                 timeout = adv_instance->duration;
2385         else
2386                 timeout = adv_instance->remaining_time;
2387
2388         /* The remaining time is being reduced unless the instance is being
2389          * advertised without time limit.
2390          */
2391         if (adv_instance->timeout)
2392                 adv_instance->remaining_time =
2393                                 adv_instance->remaining_time - timeout;
2394
2395         /* Only use work for scheduling instances with legacy advertising */
2396         if (!ext_adv_capable(hdev)) {
2397                 hdev->adv_instance_timeout = timeout;
2398                 queue_delayed_work(hdev->req_workqueue,
2399                            &hdev->adv_instance_expire,
2400                            msecs_to_jiffies(timeout * 1000));
2401         }
2402
2403         /* If we're just re-scheduling the same instance again then do not
2404          * execute any HCI commands. This happens when a single instance is
2405          * being advertised.
2406          */
2407         if (!force && hdev->cur_adv_instance == instance &&
2408             hci_dev_test_flag(hdev, HCI_LE_ADV))
2409                 return 0;
2410
2411         hdev->cur_adv_instance = instance;
2412         if (ext_adv_capable(hdev)) {
2413                 __hci_req_start_ext_adv(req, instance);
2414         } else {
2415                 __hci_req_update_adv_data(req, instance);
2416                 __hci_req_update_scan_rsp_data(req, instance);
2417                 __hci_req_enable_advertising(req);
2418         }
2419
2420         return 0;
2421 }
2422
2423 /* For a single instance:
2424  * - force == true: The instance will be removed even when its remaining
2425  *   lifetime is not zero.
2426  * - force == false: the instance will be deactivated but kept stored unless
2427  *   the remaining lifetime is zero.
2428  *
2429  * For instance == 0x00:
2430  * - force == true: All instances will be removed regardless of their timeout
2431  *   setting.
2432  * - force == false: Only instances that have a timeout will be removed.
2433  */
2434 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2435                                 struct hci_request *req, u8 instance,
2436                                 bool force)
2437 {
2438         struct adv_info *adv_instance, *n, *next_instance = NULL;
2439         int err;
2440         u8 rem_inst;
2441
2442         /* Cancel any timeout concerning the removed instance(s). */
2443         if (!instance || hdev->cur_adv_instance == instance)
2444                 cancel_adv_timeout(hdev);
2445
2446         /* Get the next instance to advertise BEFORE we remove
2447          * the current one. This can be the same instance again
2448          * if there is only one instance.
2449          */
2450         if (instance && hdev->cur_adv_instance == instance)
2451                 next_instance = hci_get_next_instance(hdev, instance);
2452
2453         if (instance == 0x00) {
2454                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2455                                          list) {
2456                         if (!(force || adv_instance->timeout))
2457                                 continue;
2458
2459                         rem_inst = adv_instance->instance;
2460                         err = hci_remove_adv_instance(hdev, rem_inst);
2461                         if (!err)
2462                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2463                 }
2464         } else {
2465                 adv_instance = hci_find_adv_instance(hdev, instance);
2466
2467                 if (force || (adv_instance && adv_instance->timeout &&
2468                               !adv_instance->remaining_time)) {
2469                         /* Don't advertise a removed instance. */
2470                         if (next_instance &&
2471                             next_instance->instance == instance)
2472                                 next_instance = NULL;
2473
2474                         err = hci_remove_adv_instance(hdev, instance);
2475                         if (!err)
2476                                 mgmt_advertising_removed(sk, hdev, instance);
2477                 }
2478         }
2479
2480         if (!req || !hdev_is_powered(hdev) ||
2481             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2482                 return;
2483
2484         if (next_instance && !ext_adv_capable(hdev))
2485                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2486                                                 false);
2487 }
2488
2489 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2490 {
2491         struct hci_dev *hdev = req->hdev;
2492
2493         /* If we're advertising or initiating an LE connection we can't
2494          * go ahead and change the random address at this time. This is
2495          * because the eventual initiator address used for the
2496          * subsequently created connection will be undefined (some
2497          * controllers use the new address and others the one we had
2498          * when the operation started).
2499          *
2500          * In this kind of scenario skip the update and let the random
2501          * address be updated at the next cycle.
2502          */
2503         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2504             hci_lookup_le_connect(hdev)) {
2505                 bt_dev_dbg(hdev, "Deferring random address update");
2506                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2507                 return;
2508         }
2509
2510         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2511 }
2512
2513 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2514                               bool use_rpa, u8 *own_addr_type)
2515 {
2516         struct hci_dev *hdev = req->hdev;
2517         int err;
2518
2519         /* If privacy is enabled use a resolvable private address. If
2520          * current RPA has expired or there is something else than
2521          * the current RPA in use, then generate a new one.
2522          */
2523         if (use_rpa) {
2524                 int to;
2525
2526                 /* If Controller supports LL Privacy use own address type is
2527                  * 0x03
2528                  */
2529                 if (use_ll_privacy(hdev) &&
2530                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2531                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2532                 else
2533                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2534
2535                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2536                     !bacmp(&hdev->random_addr, &hdev->rpa))
2537                         return 0;
2538
2539                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2540                 if (err < 0) {
2541                         bt_dev_err(hdev, "failed to generate new RPA");
2542                         return err;
2543                 }
2544
2545                 set_random_addr(req, &hdev->rpa);
2546
2547                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2548                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2549
2550                 return 0;
2551         }
2552
2553         /* In case of required privacy without resolvable private address,
2554          * use an non-resolvable private address. This is useful for active
2555          * scanning and non-connectable advertising.
2556          */
2557         if (require_privacy) {
2558                 bdaddr_t nrpa;
2559
2560                 while (true) {
2561                         /* The non-resolvable private address is generated
2562                          * from random six bytes with the two most significant
2563                          * bits cleared.
2564                          */
2565                         get_random_bytes(&nrpa, 6);
2566                         nrpa.b[5] &= 0x3f;
2567
2568                         /* The non-resolvable private address shall not be
2569                          * equal to the public address.
2570                          */
2571                         if (bacmp(&hdev->bdaddr, &nrpa))
2572                                 break;
2573                 }
2574
2575                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2576                 set_random_addr(req, &nrpa);
2577                 return 0;
2578         }
2579
2580         /* If forcing static address is in use or there is no public
2581          * address use the static address as random address (but skip
2582          * the HCI command if the current random address is already the
2583          * static one.
2584          *
2585          * In case BR/EDR has been disabled on a dual-mode controller
2586          * and a static address has been configured, then use that
2587          * address instead of the public BR/EDR address.
2588          */
2589         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2590             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2591             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2592              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2593                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2594                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2595                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2596                                     &hdev->static_addr);
2597                 return 0;
2598         }
2599
2600         /* Neither privacy nor static address is being used so use a
2601          * public address.
2602          */
2603         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2604
2605         return 0;
2606 }
2607
2608 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2609 {
2610         struct bdaddr_list *b;
2611
2612         list_for_each_entry(b, &hdev->whitelist, list) {
2613                 struct hci_conn *conn;
2614
2615                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2616                 if (!conn)
2617                         return true;
2618
2619                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2620                         return true;
2621         }
2622
2623         return false;
2624 }
2625
2626 void __hci_req_update_scan(struct hci_request *req)
2627 {
2628         struct hci_dev *hdev = req->hdev;
2629         u8 scan;
2630
2631         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2632                 return;
2633
2634         if (!hdev_is_powered(hdev))
2635                 return;
2636
2637         if (mgmt_powering_down(hdev))
2638                 return;
2639
2640         if (hdev->scanning_paused)
2641                 return;
2642
2643         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2644             disconnected_whitelist_entries(hdev))
2645                 scan = SCAN_PAGE;
2646         else
2647                 scan = SCAN_DISABLED;
2648
2649         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2650                 scan |= SCAN_INQUIRY;
2651
2652         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2653             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2654                 return;
2655
2656         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2657 }
2658
2659 static int update_scan(struct hci_request *req, unsigned long opt)
2660 {
2661         hci_dev_lock(req->hdev);
2662         __hci_req_update_scan(req);
2663         hci_dev_unlock(req->hdev);
2664         return 0;
2665 }
2666
2667 static void scan_update_work(struct work_struct *work)
2668 {
2669         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2670
2671         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2672 }
2673
2674 static int connectable_update(struct hci_request *req, unsigned long opt)
2675 {
2676         struct hci_dev *hdev = req->hdev;
2677
2678         hci_dev_lock(hdev);
2679
2680         __hci_req_update_scan(req);
2681
2682         /* If BR/EDR is not enabled and we disable advertising as a
2683          * by-product of disabling connectable, we need to update the
2684          * advertising flags.
2685          */
2686         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2687                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2688
2689         /* Update the advertising parameters if necessary */
2690         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2691             !list_empty(&hdev->adv_instances)) {
2692                 if (ext_adv_capable(hdev))
2693                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2694                 else
2695                         __hci_req_enable_advertising(req);
2696         }
2697
2698         __hci_update_background_scan(req);
2699
2700         hci_dev_unlock(hdev);
2701
2702         return 0;
2703 }
2704
2705 static void connectable_update_work(struct work_struct *work)
2706 {
2707         struct hci_dev *hdev = container_of(work, struct hci_dev,
2708                                             connectable_update);
2709         u8 status;
2710
2711         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2712         mgmt_set_connectable_complete(hdev, status);
2713 }
2714
2715 static u8 get_service_classes(struct hci_dev *hdev)
2716 {
2717         struct bt_uuid *uuid;
2718         u8 val = 0;
2719
2720         list_for_each_entry(uuid, &hdev->uuids, list)
2721                 val |= uuid->svc_hint;
2722
2723         return val;
2724 }
2725
2726 void __hci_req_update_class(struct hci_request *req)
2727 {
2728         struct hci_dev *hdev = req->hdev;
2729         u8 cod[3];
2730
2731         bt_dev_dbg(hdev, "");
2732
2733         if (!hdev_is_powered(hdev))
2734                 return;
2735
2736         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2737                 return;
2738
2739         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2740                 return;
2741
2742         cod[0] = hdev->minor_class;
2743         cod[1] = hdev->major_class;
2744         cod[2] = get_service_classes(hdev);
2745
2746         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2747                 cod[1] |= 0x20;
2748
2749         if (memcmp(cod, hdev->dev_class, 3) == 0)
2750                 return;
2751
2752         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2753 }
2754
2755 static void write_iac(struct hci_request *req)
2756 {
2757         struct hci_dev *hdev = req->hdev;
2758         struct hci_cp_write_current_iac_lap cp;
2759
2760         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2761                 return;
2762
2763         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2764                 /* Limited discoverable mode */
2765                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2766                 cp.iac_lap[0] = 0x00;   /* LIAC */
2767                 cp.iac_lap[1] = 0x8b;
2768                 cp.iac_lap[2] = 0x9e;
2769                 cp.iac_lap[3] = 0x33;   /* GIAC */
2770                 cp.iac_lap[4] = 0x8b;
2771                 cp.iac_lap[5] = 0x9e;
2772         } else {
2773                 /* General discoverable mode */
2774                 cp.num_iac = 1;
2775                 cp.iac_lap[0] = 0x33;   /* GIAC */
2776                 cp.iac_lap[1] = 0x8b;
2777                 cp.iac_lap[2] = 0x9e;
2778         }
2779
2780         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2781                     (cp.num_iac * 3) + 1, &cp);
2782 }
2783
2784 static int discoverable_update(struct hci_request *req, unsigned long opt)
2785 {
2786         struct hci_dev *hdev = req->hdev;
2787
2788         hci_dev_lock(hdev);
2789
2790         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2791                 write_iac(req);
2792                 __hci_req_update_scan(req);
2793                 __hci_req_update_class(req);
2794         }
2795
2796         /* Advertising instances don't use the global discoverable setting, so
2797          * only update AD if advertising was enabled using Set Advertising.
2798          */
2799         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2800                 __hci_req_update_adv_data(req, 0x00);
2801
2802                 /* Discoverable mode affects the local advertising
2803                  * address in limited privacy mode.
2804                  */
2805                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2806                         if (ext_adv_capable(hdev))
2807                                 __hci_req_start_ext_adv(req, 0x00);
2808                         else
2809                                 __hci_req_enable_advertising(req);
2810                 }
2811         }
2812
2813         hci_dev_unlock(hdev);
2814
2815         return 0;
2816 }
2817
2818 static void discoverable_update_work(struct work_struct *work)
2819 {
2820         struct hci_dev *hdev = container_of(work, struct hci_dev,
2821                                             discoverable_update);
2822         u8 status;
2823
2824         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2825         mgmt_set_discoverable_complete(hdev, status);
2826 }
2827
2828 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2829                       u8 reason)
2830 {
2831         switch (conn->state) {
2832         case BT_CONNECTED:
2833         case BT_CONFIG:
2834                 if (conn->type == AMP_LINK) {
2835                         struct hci_cp_disconn_phy_link cp;
2836
2837                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2838                         cp.reason = reason;
2839                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2840                                     &cp);
2841                 } else {
2842                         struct hci_cp_disconnect dc;
2843
2844                         dc.handle = cpu_to_le16(conn->handle);
2845                         dc.reason = reason;
2846                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2847                 }
2848
2849                 conn->state = BT_DISCONN;
2850
2851                 break;
2852         case BT_CONNECT:
2853                 if (conn->type == LE_LINK) {
2854                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2855                                 break;
2856                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2857                                     0, NULL);
2858                 } else if (conn->type == ACL_LINK) {
2859                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2860                                 break;
2861                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2862                                     6, &conn->dst);
2863                 }
2864                 break;
2865         case BT_CONNECT2:
2866                 if (conn->type == ACL_LINK) {
2867                         struct hci_cp_reject_conn_req rej;
2868
2869                         bacpy(&rej.bdaddr, &conn->dst);
2870                         rej.reason = reason;
2871
2872                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2873                                     sizeof(rej), &rej);
2874                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2875                         struct hci_cp_reject_sync_conn_req rej;
2876
2877                         bacpy(&rej.bdaddr, &conn->dst);
2878
2879                         /* SCO rejection has its own limited set of
2880                          * allowed error values (0x0D-0x0F) which isn't
2881                          * compatible with most values passed to this
2882                          * function. To be safe hard-code one of the
2883                          * values that's suitable for SCO.
2884                          */
2885                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2886
2887                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2888                                     sizeof(rej), &rej);
2889                 }
2890                 break;
2891         default:
2892                 conn->state = BT_CLOSED;
2893                 break;
2894         }
2895 }
2896
2897 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2898 {
2899         if (status)
2900                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2901 }
2902
2903 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2904 {
2905         struct hci_request req;
2906         int err;
2907
2908         hci_req_init(&req, conn->hdev);
2909
2910         __hci_abort_conn(&req, conn, reason);
2911
2912         err = hci_req_run(&req, abort_conn_complete);
2913         if (err && err != -ENODATA) {
2914                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2915                 return err;
2916         }
2917
2918         return 0;
2919 }
2920
2921 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2922 {
2923         hci_dev_lock(req->hdev);
2924         __hci_update_background_scan(req);
2925         hci_dev_unlock(req->hdev);
2926         return 0;
2927 }
2928
2929 static void bg_scan_update(struct work_struct *work)
2930 {
2931         struct hci_dev *hdev = container_of(work, struct hci_dev,
2932                                             bg_scan_update);
2933         struct hci_conn *conn;
2934         u8 status;
2935         int err;
2936
2937         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2938         if (!err)
2939                 return;
2940
2941         hci_dev_lock(hdev);
2942
2943         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2944         if (conn)
2945                 hci_le_conn_failed(conn, status);
2946
2947         hci_dev_unlock(hdev);
2948 }
2949
2950 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2951 {
2952         hci_req_add_le_scan_disable(req, false);
2953         return 0;
2954 }
2955
2956 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2957 {
2958         u8 length = opt;
2959         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2960         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2961         struct hci_cp_inquiry cp;
2962
2963         if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2964                 return 0;
2965
2966         bt_dev_dbg(req->hdev, "");
2967
2968         hci_dev_lock(req->hdev);
2969         hci_inquiry_cache_flush(req->hdev);
2970         hci_dev_unlock(req->hdev);
2971
2972         memset(&cp, 0, sizeof(cp));
2973
2974         if (req->hdev->discovery.limited)
2975                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2976         else
2977                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2978
2979         cp.length = length;
2980
2981         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2982
2983         return 0;
2984 }
2985
2986 static void le_scan_disable_work(struct work_struct *work)
2987 {
2988         struct hci_dev *hdev = container_of(work, struct hci_dev,
2989                                             le_scan_disable.work);
2990         u8 status;
2991
2992         bt_dev_dbg(hdev, "");
2993
2994         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2995                 return;
2996
2997         cancel_delayed_work(&hdev->le_scan_restart);
2998
2999         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
3000         if (status) {
3001                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
3002                            status);
3003                 return;
3004         }
3005
3006         hdev->discovery.scan_start = 0;
3007
3008         /* If we were running LE only scan, change discovery state. If
3009          * we were running both LE and BR/EDR inquiry simultaneously,
3010          * and BR/EDR inquiry is already finished, stop discovery,
3011          * otherwise BR/EDR inquiry will stop discovery when finished.
3012          * If we will resolve remote device name, do not change
3013          * discovery state.
3014          */
3015
3016         if (hdev->discovery.type == DISCOV_TYPE_LE)
3017                 goto discov_stopped;
3018
3019         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3020                 return;
3021
3022         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3023                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3024                     hdev->discovery.state != DISCOVERY_RESOLVING)
3025                         goto discov_stopped;
3026
3027                 return;
3028         }
3029
3030         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3031                      HCI_CMD_TIMEOUT, &status);
3032         if (status) {
3033                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3034                 goto discov_stopped;
3035         }
3036
3037         return;
3038
3039 discov_stopped:
3040         hci_dev_lock(hdev);
3041         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3042         hci_dev_unlock(hdev);
3043 }
3044
3045 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3046 {
3047         struct hci_dev *hdev = req->hdev;
3048
3049         /* If controller is not scanning we are done. */
3050         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3051                 return 0;
3052
3053         if (hdev->scanning_paused) {
3054                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3055                 return 0;
3056         }
3057
3058         hci_req_add_le_scan_disable(req, false);
3059
3060         if (use_ext_scan(hdev)) {
3061                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3062
3063                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3064                 ext_enable_cp.enable = LE_SCAN_ENABLE;
3065                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3066
3067                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3068                             sizeof(ext_enable_cp), &ext_enable_cp);
3069         } else {
3070                 struct hci_cp_le_set_scan_enable cp;
3071
3072                 memset(&cp, 0, sizeof(cp));
3073                 cp.enable = LE_SCAN_ENABLE;
3074                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3075                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3076         }
3077
3078         return 0;
3079 }
3080
3081 static void le_scan_restart_work(struct work_struct *work)
3082 {
3083         struct hci_dev *hdev = container_of(work, struct hci_dev,
3084                                             le_scan_restart.work);
3085         unsigned long timeout, duration, scan_start, now;
3086         u8 status;
3087
3088         bt_dev_dbg(hdev, "");
3089
3090         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3091         if (status) {
3092                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3093                            status);
3094                 return;
3095         }
3096
3097         hci_dev_lock(hdev);
3098
3099         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3100             !hdev->discovery.scan_start)
3101                 goto unlock;
3102
3103         /* When the scan was started, hdev->le_scan_disable has been queued
3104          * after duration from scan_start. During scan restart this job
3105          * has been canceled, and we need to queue it again after proper
3106          * timeout, to make sure that scan does not run indefinitely.
3107          */
3108         duration = hdev->discovery.scan_duration;
3109         scan_start = hdev->discovery.scan_start;
3110         now = jiffies;
3111         if (now - scan_start <= duration) {
3112                 int elapsed;
3113
3114                 if (now >= scan_start)
3115                         elapsed = now - scan_start;
3116                 else
3117                         elapsed = ULONG_MAX - scan_start + now;
3118
3119                 timeout = duration - elapsed;
3120         } else {
3121                 timeout = 0;
3122         }
3123
3124         queue_delayed_work(hdev->req_workqueue,
3125                            &hdev->le_scan_disable, timeout);
3126
3127 unlock:
3128         hci_dev_unlock(hdev);
3129 }
3130
3131 static int active_scan(struct hci_request *req, unsigned long opt)
3132 {
3133         uint16_t interval = opt;
3134         struct hci_dev *hdev = req->hdev;
3135         u8 own_addr_type;
3136         /* White list is not used for discovery */
3137         u8 filter_policy = 0x00;
3138         /* Discovery doesn't require controller address resolution */
3139         bool addr_resolv = false;
3140         int err;
3141
3142         bt_dev_dbg(hdev, "");
3143
3144         /* If controller is scanning, it means the background scanning is
3145          * running. Thus, we should temporarily stop it in order to set the
3146          * discovery scanning parameters.
3147          */
3148         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3149                 hci_req_add_le_scan_disable(req, false);
3150                 cancel_interleave_scan(hdev);
3151         }
3152
3153         /* All active scans will be done with either a resolvable private
3154          * address (when privacy feature has been enabled) or non-resolvable
3155          * private address.
3156          */
3157         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3158                                         &own_addr_type);
3159         if (err < 0)
3160                 own_addr_type = ADDR_LE_DEV_PUBLIC;
3161
3162         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3163                            hdev->le_scan_window_discovery, own_addr_type,
3164                            filter_policy, addr_resolv);
3165         return 0;
3166 }
3167
3168 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3169 {
3170         int err;
3171
3172         bt_dev_dbg(req->hdev, "");
3173
3174         err = active_scan(req, opt);
3175         if (err)
3176                 return err;
3177
3178         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3179 }
3180
3181 static void start_discovery(struct hci_dev *hdev, u8 *status)
3182 {
3183         unsigned long timeout;
3184
3185         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3186
3187         switch (hdev->discovery.type) {
3188         case DISCOV_TYPE_BREDR:
3189                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3190                         hci_req_sync(hdev, bredr_inquiry,
3191                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3192                                      status);
3193                 return;
3194         case DISCOV_TYPE_INTERLEAVED:
3195                 /* When running simultaneous discovery, the LE scanning time
3196                  * should occupy the whole discovery time sine BR/EDR inquiry
3197                  * and LE scanning are scheduled by the controller.
3198                  *
3199                  * For interleaving discovery in comparison, BR/EDR inquiry
3200                  * and LE scanning are done sequentially with separate
3201                  * timeouts.
3202                  */
3203                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3204                              &hdev->quirks)) {
3205                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3206                         /* During simultaneous discovery, we double LE scan
3207                          * interval. We must leave some time for the controller
3208                          * to do BR/EDR inquiry.
3209                          */
3210                         hci_req_sync(hdev, interleaved_discov,
3211                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3212                                      status);
3213                         break;
3214                 }
3215
3216                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3217                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3218                              HCI_CMD_TIMEOUT, status);
3219                 break;
3220         case DISCOV_TYPE_LE:
3221                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3222                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3223                              HCI_CMD_TIMEOUT, status);
3224                 break;
3225         default:
3226                 *status = HCI_ERROR_UNSPECIFIED;
3227                 return;
3228         }
3229
3230         if (*status)
3231                 return;
3232
3233         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3234
3235         /* When service discovery is used and the controller has a
3236          * strict duplicate filter, it is important to remember the
3237          * start and duration of the scan. This is required for
3238          * restarting scanning during the discovery phase.
3239          */
3240         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3241                      hdev->discovery.result_filtering) {
3242                 hdev->discovery.scan_start = jiffies;
3243                 hdev->discovery.scan_duration = timeout;
3244         }
3245
3246         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3247                            timeout);
3248 }
3249
3250 bool hci_req_stop_discovery(struct hci_request *req)
3251 {
3252         struct hci_dev *hdev = req->hdev;
3253         struct discovery_state *d = &hdev->discovery;
3254         struct hci_cp_remote_name_req_cancel cp;
3255         struct inquiry_entry *e;
3256         bool ret = false;
3257
3258         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3259
3260         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3261                 if (test_bit(HCI_INQUIRY, &hdev->flags))
3262                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3263
3264                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3265                         cancel_delayed_work(&hdev->le_scan_disable);
3266                         cancel_delayed_work(&hdev->le_scan_restart);
3267                         hci_req_add_le_scan_disable(req, false);
3268                 }
3269
3270                 ret = true;
3271         } else {
3272                 /* Passive scanning */
3273                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3274                         hci_req_add_le_scan_disable(req, false);
3275                         ret = true;
3276                 }
3277         }
3278
3279         /* No further actions needed for LE-only discovery */
3280         if (d->type == DISCOV_TYPE_LE)
3281                 return ret;
3282
3283         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3284                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3285                                                      NAME_PENDING);
3286                 if (!e)
3287                         return ret;
3288
3289                 bacpy(&cp.bdaddr, &e->data.bdaddr);
3290                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3291                             &cp);
3292                 ret = true;
3293         }
3294
3295         return ret;
3296 }
3297
3298 static int stop_discovery(struct hci_request *req, unsigned long opt)
3299 {
3300         hci_dev_lock(req->hdev);
3301         hci_req_stop_discovery(req);
3302         hci_dev_unlock(req->hdev);
3303
3304         return 0;
3305 }
3306
3307 static void discov_update(struct work_struct *work)
3308 {
3309         struct hci_dev *hdev = container_of(work, struct hci_dev,
3310                                             discov_update);
3311         u8 status = 0;
3312
3313         switch (hdev->discovery.state) {
3314         case DISCOVERY_STARTING:
3315                 start_discovery(hdev, &status);
3316                 mgmt_start_discovery_complete(hdev, status);
3317                 if (status)
3318                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3319                 else
3320                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3321                 break;
3322         case DISCOVERY_STOPPING:
3323                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3324                 mgmt_stop_discovery_complete(hdev, status);
3325                 if (!status)
3326                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3327                 break;
3328         case DISCOVERY_STOPPED:
3329         default:
3330                 return;
3331         }
3332 }
3333
3334 static void discov_off(struct work_struct *work)
3335 {
3336         struct hci_dev *hdev = container_of(work, struct hci_dev,
3337                                             discov_off.work);
3338
3339         bt_dev_dbg(hdev, "");
3340
3341         hci_dev_lock(hdev);
3342
3343         /* When discoverable timeout triggers, then just make sure
3344          * the limited discoverable flag is cleared. Even in the case
3345          * of a timeout triggered from general discoverable, it is
3346          * safe to unconditionally clear the flag.
3347          */
3348         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3349         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3350         hdev->discov_timeout = 0;
3351
3352         hci_dev_unlock(hdev);
3353
3354         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3355         mgmt_new_settings(hdev);
3356 }
3357
3358 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3359 {
3360         struct hci_dev *hdev = req->hdev;
3361         u8 link_sec;
3362
3363         hci_dev_lock(hdev);
3364
3365         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3366             !lmp_host_ssp_capable(hdev)) {
3367                 u8 mode = 0x01;
3368
3369                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3370
3371                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3372                         u8 support = 0x01;
3373
3374                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3375                                     sizeof(support), &support);
3376                 }
3377         }
3378
3379         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3380             lmp_bredr_capable(hdev)) {
3381                 struct hci_cp_write_le_host_supported cp;
3382
3383                 cp.le = 0x01;
3384                 cp.simul = 0x00;
3385
3386                 /* Check first if we already have the right
3387                  * host state (host features set)
3388                  */
3389                 if (cp.le != lmp_host_le_capable(hdev) ||
3390                     cp.simul != lmp_host_le_br_capable(hdev))
3391                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3392                                     sizeof(cp), &cp);
3393         }
3394
3395         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3396                 /* Make sure the controller has a good default for
3397                  * advertising data. This also applies to the case
3398                  * where BR/EDR was toggled during the AUTO_OFF phase.
3399                  */
3400                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3401                     list_empty(&hdev->adv_instances)) {
3402                         int err;
3403
3404                         if (ext_adv_capable(hdev)) {
3405                                 err = __hci_req_setup_ext_adv_instance(req,
3406                                                                        0x00);
3407                                 if (!err)
3408                                         __hci_req_update_scan_rsp_data(req,
3409                                                                        0x00);
3410                         } else {
3411                                 err = 0;
3412                                 __hci_req_update_adv_data(req, 0x00);
3413                                 __hci_req_update_scan_rsp_data(req, 0x00);
3414                         }
3415
3416                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3417                                 if (!ext_adv_capable(hdev))
3418                                         __hci_req_enable_advertising(req);
3419                                 else if (!err)
3420                                         __hci_req_enable_ext_advertising(req,
3421                                                                          0x00);
3422                         }
3423                 } else if (!list_empty(&hdev->adv_instances)) {
3424                         struct adv_info *adv_instance;
3425
3426                         adv_instance = list_first_entry(&hdev->adv_instances,
3427                                                         struct adv_info, list);
3428                         __hci_req_schedule_adv_instance(req,
3429                                                         adv_instance->instance,
3430                                                         true);
3431                 }
3432         }
3433
3434         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3435         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3436                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3437                             sizeof(link_sec), &link_sec);
3438
3439         if (lmp_bredr_capable(hdev)) {
3440                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3441                         __hci_req_write_fast_connectable(req, true);
3442                 else
3443                         __hci_req_write_fast_connectable(req, false);
3444                 __hci_req_update_scan(req);
3445                 __hci_req_update_class(req);
3446                 __hci_req_update_name(req);
3447                 __hci_req_update_eir(req);
3448         }
3449
3450         hci_dev_unlock(hdev);
3451         return 0;
3452 }
3453
3454 int __hci_req_hci_power_on(struct hci_dev *hdev)
3455 {
3456         /* Register the available SMP channels (BR/EDR and LE) only when
3457          * successfully powering on the controller. This late
3458          * registration is required so that LE SMP can clearly decide if
3459          * the public address or static address is used.
3460          */
3461         smp_register(hdev);
3462
3463         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3464                               NULL);
3465 }
3466
3467 void hci_request_setup(struct hci_dev *hdev)
3468 {
3469         INIT_WORK(&hdev->discov_update, discov_update);
3470         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3471         INIT_WORK(&hdev->scan_update, scan_update_work);
3472         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3473         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3474         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3475         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3476         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3477         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3478         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3479 }
3480
3481 void hci_request_cancel_all(struct hci_dev *hdev)
3482 {
3483         hci_req_sync_cancel(hdev, ENODEV);
3484
3485         cancel_work_sync(&hdev->discov_update);
3486         cancel_work_sync(&hdev->bg_scan_update);
3487         cancel_work_sync(&hdev->scan_update);
3488         cancel_work_sync(&hdev->connectable_update);
3489         cancel_work_sync(&hdev->discoverable_update);
3490         cancel_delayed_work_sync(&hdev->discov_off);
3491         cancel_delayed_work_sync(&hdev->le_scan_disable);
3492         cancel_delayed_work_sync(&hdev->le_scan_restart);
3493
3494         if (hdev->adv_instance_timeout) {
3495                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3496                 hdev->adv_instance_timeout = 0;
3497         }
3498
3499         cancel_interleave_scan(hdev);
3500 }