Merge tag 'nfs-for-5.11-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 void hci_req_purge(struct hci_request *req)
45 {
46         skb_queue_purge(&req->cmd_q);
47 }
48
49 bool hci_req_status_pend(struct hci_dev *hdev)
50 {
51         return hdev->req_status == HCI_REQ_PEND;
52 }
53
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55                    hci_req_complete_skb_t complete_skb)
56 {
57         struct hci_dev *hdev = req->hdev;
58         struct sk_buff *skb;
59         unsigned long flags;
60
61         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
62
63         /* If an error occurred during request building, remove all HCI
64          * commands queued on the HCI request queue.
65          */
66         if (req->err) {
67                 skb_queue_purge(&req->cmd_q);
68                 return req->err;
69         }
70
71         /* Do not allow empty requests */
72         if (skb_queue_empty(&req->cmd_q))
73                 return -ENODATA;
74
75         skb = skb_peek_tail(&req->cmd_q);
76         if (complete) {
77                 bt_cb(skb)->hci.req_complete = complete;
78         } else if (complete_skb) {
79                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81         }
82
83         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87         queue_work(hdev->workqueue, &hdev->cmd_work);
88
89         return 0;
90 }
91
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93 {
94         return req_run(req, complete, NULL);
95 }
96
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98 {
99         return req_run(req, NULL, complete);
100 }
101
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103                                   struct sk_buff *skb)
104 {
105         bt_dev_dbg(hdev, "result 0x%2.2x", result);
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 if (skb)
111                         hdev->req_skb = skb_get(skb);
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117 {
118         bt_dev_dbg(hdev, "err 0x%2.2x", err);
119
120         if (hdev->req_status == HCI_REQ_PEND) {
121                 hdev->req_result = err;
122                 hdev->req_status = HCI_REQ_CANCELED;
123                 wake_up_interruptible(&hdev->req_wait_q);
124         }
125 }
126
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128                                   const void *param, u8 event, u32 timeout)
129 {
130         struct hci_request req;
131         struct sk_buff *skb;
132         int err = 0;
133
134         bt_dev_dbg(hdev, "");
135
136         hci_req_init(&req, hdev);
137
138         hci_req_add_ev(&req, opcode, plen, param, event);
139
140         hdev->req_status = HCI_REQ_PEND;
141
142         err = hci_req_run_skb(&req, hci_req_sync_complete);
143         if (err < 0)
144                 return ERR_PTR(err);
145
146         err = wait_event_interruptible_timeout(hdev->req_wait_q,
147                         hdev->req_status != HCI_REQ_PEND, timeout);
148
149         if (err == -ERESTARTSYS)
150                 return ERR_PTR(-EINTR);
151
152         switch (hdev->req_status) {
153         case HCI_REQ_DONE:
154                 err = -bt_to_errno(hdev->req_result);
155                 break;
156
157         case HCI_REQ_CANCELED:
158                 err = -hdev->req_result;
159                 break;
160
161         default:
162                 err = -ETIMEDOUT;
163                 break;
164         }
165
166         hdev->req_status = hdev->req_result = 0;
167         skb = hdev->req_skb;
168         hdev->req_skb = NULL;
169
170         bt_dev_dbg(hdev, "end: err %d", err);
171
172         if (err < 0) {
173                 kfree_skb(skb);
174                 return ERR_PTR(err);
175         }
176
177         if (!skb)
178                 return ERR_PTR(-ENODATA);
179
180         return skb;
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185                                const void *param, u32 timeout)
186 {
187         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync);
190
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193                                                      unsigned long opt),
194                    unsigned long opt, u32 timeout, u8 *hci_status)
195 {
196         struct hci_request req;
197         int err = 0;
198
199         bt_dev_dbg(hdev, "start");
200
201         hci_req_init(&req, hdev);
202
203         hdev->req_status = HCI_REQ_PEND;
204
205         err = func(&req, opt);
206         if (err) {
207                 if (hci_status)
208                         *hci_status = HCI_ERROR_UNSPECIFIED;
209                 return err;
210         }
211
212         err = hci_req_run_skb(&req, hci_req_sync_complete);
213         if (err < 0) {
214                 hdev->req_status = 0;
215
216                 /* ENODATA means the HCI request command queue is empty.
217                  * This can happen when a request with conditionals doesn't
218                  * trigger any commands to be sent. This is normal behavior
219                  * and should not trigger an error return.
220                  */
221                 if (err == -ENODATA) {
222                         if (hci_status)
223                                 *hci_status = 0;
224                         return 0;
225                 }
226
227                 if (hci_status)
228                         *hci_status = HCI_ERROR_UNSPECIFIED;
229
230                 return err;
231         }
232
233         err = wait_event_interruptible_timeout(hdev->req_wait_q,
234                         hdev->req_status != HCI_REQ_PEND, timeout);
235
236         if (err == -ERESTARTSYS)
237                 return -EINTR;
238
239         switch (hdev->req_status) {
240         case HCI_REQ_DONE:
241                 err = -bt_to_errno(hdev->req_result);
242                 if (hci_status)
243                         *hci_status = hdev->req_result;
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 if (hci_status)
249                         *hci_status = HCI_ERROR_UNSPECIFIED;
250                 break;
251
252         default:
253                 err = -ETIMEDOUT;
254                 if (hci_status)
255                         *hci_status = HCI_ERROR_UNSPECIFIED;
256                 break;
257         }
258
259         kfree_skb(hdev->req_skb);
260         hdev->req_skb = NULL;
261         hdev->req_status = hdev->req_result = 0;
262
263         bt_dev_dbg(hdev, "end: err %d", err);
264
265         return err;
266 }
267
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269                                                   unsigned long opt),
270                  unsigned long opt, u32 timeout, u8 *hci_status)
271 {
272         int ret;
273
274         if (!test_bit(HCI_UP, &hdev->flags))
275                 return -ENETDOWN;
276
277         /* Serialize all requests */
278         hci_req_sync_lock(hdev);
279         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280         hci_req_sync_unlock(hdev);
281
282         return ret;
283 }
284
285 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286                                 const void *param)
287 {
288         int len = HCI_COMMAND_HDR_SIZE + plen;
289         struct hci_command_hdr *hdr;
290         struct sk_buff *skb;
291
292         skb = bt_skb_alloc(len, GFP_ATOMIC);
293         if (!skb)
294                 return NULL;
295
296         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297         hdr->opcode = cpu_to_le16(opcode);
298         hdr->plen   = plen;
299
300         if (plen)
301                 skb_put_data(skb, param, plen);
302
303         bt_dev_dbg(hdev, "skb len %d", skb->len);
304
305         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306         hci_skb_opcode(skb) = opcode;
307
308         return skb;
309 }
310
311 /* Queue a command to an asynchronous HCI request */
312 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313                     const void *param, u8 event)
314 {
315         struct hci_dev *hdev = req->hdev;
316         struct sk_buff *skb;
317
318         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
319
320         /* If an error occurred during request building, there is no point in
321          * queueing the HCI command. We can simply return.
322          */
323         if (req->err)
324                 return;
325
326         skb = hci_prepare_cmd(hdev, opcode, plen, param);
327         if (!skb) {
328                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329                            opcode);
330                 req->err = -ENOMEM;
331                 return;
332         }
333
334         if (skb_queue_empty(&req->cmd_q))
335                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337         bt_cb(skb)->hci.req_event = event;
338
339         skb_queue_tail(&req->cmd_q, skb);
340 }
341
342 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343                  const void *param)
344 {
345         hci_req_add_ev(req, opcode, plen, param, 0);
346 }
347
348 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349 {
350         struct hci_dev *hdev = req->hdev;
351         struct hci_cp_write_page_scan_activity acp;
352         u8 type;
353
354         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355                 return;
356
357         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358                 return;
359
360         if (enable) {
361                 type = PAGE_SCAN_TYPE_INTERLACED;
362
363                 /* 160 msec page scan interval */
364                 acp.interval = cpu_to_le16(0x0100);
365         } else {
366                 type = hdev->def_page_scan_type;
367                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
368         }
369
370         acp.window = cpu_to_le16(hdev->def_page_scan_window);
371
372         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373             __cpu_to_le16(hdev->page_scan_window) != acp.window)
374                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375                             sizeof(acp), &acp);
376
377         if (hdev->page_scan_type != type)
378                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379 }
380
381 static void start_interleave_scan(struct hci_dev *hdev)
382 {
383         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
384         queue_delayed_work(hdev->req_workqueue,
385                            &hdev->interleave_scan, 0);
386 }
387
388 static bool is_interleave_scanning(struct hci_dev *hdev)
389 {
390         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
391 }
392
393 static void cancel_interleave_scan(struct hci_dev *hdev)
394 {
395         bt_dev_dbg(hdev, "cancelling interleave scan");
396
397         cancel_delayed_work_sync(&hdev->interleave_scan);
398
399         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
400 }
401
402 /* Return true if interleave_scan wasn't started until exiting this function,
403  * otherwise, return false
404  */
405 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
406 {
407         /* If there is at least one ADV monitors and one pending LE connection
408          * or one device to be scanned for, we should alternate between
409          * allowlist scan and one without any filters to save power.
410          */
411         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
412                                 !(list_empty(&hdev->pend_le_conns) &&
413                                   list_empty(&hdev->pend_le_reports));
414         bool is_interleaving = is_interleave_scanning(hdev);
415
416         if (use_interleaving && !is_interleaving) {
417                 start_interleave_scan(hdev);
418                 bt_dev_dbg(hdev, "starting interleave scan");
419                 return true;
420         }
421
422         if (!use_interleaving && is_interleaving)
423                 cancel_interleave_scan(hdev);
424
425         return false;
426 }
427
428 /* This function controls the background scanning based on hdev->pend_le_conns
429  * list. If there are pending LE connection we start the background scanning,
430  * otherwise we stop it.
431  *
432  * This function requires the caller holds hdev->lock.
433  */
434 static void __hci_update_background_scan(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         if (!test_bit(HCI_UP, &hdev->flags) ||
439             test_bit(HCI_INIT, &hdev->flags) ||
440             hci_dev_test_flag(hdev, HCI_SETUP) ||
441             hci_dev_test_flag(hdev, HCI_CONFIG) ||
442             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
443             hci_dev_test_flag(hdev, HCI_UNREGISTER))
444                 return;
445
446         /* No point in doing scanning if LE support hasn't been enabled */
447         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
448                 return;
449
450         /* If discovery is active don't interfere with it */
451         if (hdev->discovery.state != DISCOVERY_STOPPED)
452                 return;
453
454         /* Reset RSSI and UUID filters when starting background scanning
455          * since these filters are meant for service discovery only.
456          *
457          * The Start Discovery and Start Service Discovery operations
458          * ensure to set proper values for RSSI threshold and UUID
459          * filter list. So it is safe to just reset them here.
460          */
461         hci_discovery_filter_clear(hdev);
462
463         bt_dev_dbg(hdev, "ADV monitoring is %s",
464                    hci_is_adv_monitoring(hdev) ? "on" : "off");
465
466         if (list_empty(&hdev->pend_le_conns) &&
467             list_empty(&hdev->pend_le_reports) &&
468             !hci_is_adv_monitoring(hdev)) {
469                 /* If there is no pending LE connections or devices
470                  * to be scanned for or no ADV monitors, we should stop the
471                  * background scanning.
472                  */
473
474                 /* If controller is not scanning we are done. */
475                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
476                         return;
477
478                 hci_req_add_le_scan_disable(req, false);
479
480                 bt_dev_dbg(hdev, "stopping background scanning");
481         } else {
482                 /* If there is at least one pending LE connection, we should
483                  * keep the background scan running.
484                  */
485
486                 /* If controller is connecting, we should not start scanning
487                  * since some controllers are not able to scan and connect at
488                  * the same time.
489                  */
490                 if (hci_lookup_le_connect(hdev))
491                         return;
492
493                 /* If controller is currently scanning, we stop it to ensure we
494                  * don't miss any advertising (due to duplicates filter).
495                  */
496                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
497                         hci_req_add_le_scan_disable(req, false);
498
499                 hci_req_add_le_passive_scan(req);
500                 bt_dev_dbg(hdev, "starting background scanning");
501         }
502 }
503
504 void __hci_req_update_name(struct hci_request *req)
505 {
506         struct hci_dev *hdev = req->hdev;
507         struct hci_cp_write_local_name cp;
508
509         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
510
511         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
512 }
513
514 #define PNP_INFO_SVCLASS_ID             0x1200
515
516 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518         u8 *ptr = data, *uuids_start = NULL;
519         struct bt_uuid *uuid;
520
521         if (len < 4)
522                 return ptr;
523
524         list_for_each_entry(uuid, &hdev->uuids, list) {
525                 u16 uuid16;
526
527                 if (uuid->size != 16)
528                         continue;
529
530                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
531                 if (uuid16 < 0x1100)
532                         continue;
533
534                 if (uuid16 == PNP_INFO_SVCLASS_ID)
535                         continue;
536
537                 if (!uuids_start) {
538                         uuids_start = ptr;
539                         uuids_start[0] = 1;
540                         uuids_start[1] = EIR_UUID16_ALL;
541                         ptr += 2;
542                 }
543
544                 /* Stop if not enough space to put next UUID */
545                 if ((ptr - data) + sizeof(u16) > len) {
546                         uuids_start[1] = EIR_UUID16_SOME;
547                         break;
548                 }
549
550                 *ptr++ = (uuid16 & 0x00ff);
551                 *ptr++ = (uuid16 & 0xff00) >> 8;
552                 uuids_start[0] += sizeof(uuid16);
553         }
554
555         return ptr;
556 }
557
558 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
559 {
560         u8 *ptr = data, *uuids_start = NULL;
561         struct bt_uuid *uuid;
562
563         if (len < 6)
564                 return ptr;
565
566         list_for_each_entry(uuid, &hdev->uuids, list) {
567                 if (uuid->size != 32)
568                         continue;
569
570                 if (!uuids_start) {
571                         uuids_start = ptr;
572                         uuids_start[0] = 1;
573                         uuids_start[1] = EIR_UUID32_ALL;
574                         ptr += 2;
575                 }
576
577                 /* Stop if not enough space to put next UUID */
578                 if ((ptr - data) + sizeof(u32) > len) {
579                         uuids_start[1] = EIR_UUID32_SOME;
580                         break;
581                 }
582
583                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
584                 ptr += sizeof(u32);
585                 uuids_start[0] += sizeof(u32);
586         }
587
588         return ptr;
589 }
590
591 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
592 {
593         u8 *ptr = data, *uuids_start = NULL;
594         struct bt_uuid *uuid;
595
596         if (len < 18)
597                 return ptr;
598
599         list_for_each_entry(uuid, &hdev->uuids, list) {
600                 if (uuid->size != 128)
601                         continue;
602
603                 if (!uuids_start) {
604                         uuids_start = ptr;
605                         uuids_start[0] = 1;
606                         uuids_start[1] = EIR_UUID128_ALL;
607                         ptr += 2;
608                 }
609
610                 /* Stop if not enough space to put next UUID */
611                 if ((ptr - data) + 16 > len) {
612                         uuids_start[1] = EIR_UUID128_SOME;
613                         break;
614                 }
615
616                 memcpy(ptr, uuid->uuid, 16);
617                 ptr += 16;
618                 uuids_start[0] += 16;
619         }
620
621         return ptr;
622 }
623
624 static void create_eir(struct hci_dev *hdev, u8 *data)
625 {
626         u8 *ptr = data;
627         size_t name_len;
628
629         name_len = strlen(hdev->dev_name);
630
631         if (name_len > 0) {
632                 /* EIR Data type */
633                 if (name_len > 48) {
634                         name_len = 48;
635                         ptr[1] = EIR_NAME_SHORT;
636                 } else
637                         ptr[1] = EIR_NAME_COMPLETE;
638
639                 /* EIR Data length */
640                 ptr[0] = name_len + 1;
641
642                 memcpy(ptr + 2, hdev->dev_name, name_len);
643
644                 ptr += (name_len + 2);
645         }
646
647         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
648                 ptr[0] = 2;
649                 ptr[1] = EIR_TX_POWER;
650                 ptr[2] = (u8) hdev->inq_tx_power;
651
652                 ptr += 3;
653         }
654
655         if (hdev->devid_source > 0) {
656                 ptr[0] = 9;
657                 ptr[1] = EIR_DEVICE_ID;
658
659                 put_unaligned_le16(hdev->devid_source, ptr + 2);
660                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
661                 put_unaligned_le16(hdev->devid_product, ptr + 6);
662                 put_unaligned_le16(hdev->devid_version, ptr + 8);
663
664                 ptr += 10;
665         }
666
667         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
668         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
669         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
670 }
671
672 void __hci_req_update_eir(struct hci_request *req)
673 {
674         struct hci_dev *hdev = req->hdev;
675         struct hci_cp_write_eir cp;
676
677         if (!hdev_is_powered(hdev))
678                 return;
679
680         if (!lmp_ext_inq_capable(hdev))
681                 return;
682
683         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
684                 return;
685
686         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
687                 return;
688
689         memset(&cp, 0, sizeof(cp));
690
691         create_eir(hdev, cp.data);
692
693         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
694                 return;
695
696         memcpy(hdev->eir, cp.data, sizeof(cp.data));
697
698         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
699 }
700
701 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
702 {
703         struct hci_dev *hdev = req->hdev;
704
705         if (hdev->scanning_paused) {
706                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
707                 return;
708         }
709
710         if (hdev->suspended)
711                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
712
713         if (use_ext_scan(hdev)) {
714                 struct hci_cp_le_set_ext_scan_enable cp;
715
716                 memset(&cp, 0, sizeof(cp));
717                 cp.enable = LE_SCAN_DISABLE;
718                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
719                             &cp);
720         } else {
721                 struct hci_cp_le_set_scan_enable cp;
722
723                 memset(&cp, 0, sizeof(cp));
724                 cp.enable = LE_SCAN_DISABLE;
725                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
726         }
727
728         /* Disable address resolution */
729         if (use_ll_privacy(hdev) &&
730             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
731             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
732                 __u8 enable = 0x00;
733
734                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
735         }
736 }
737
738 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
739                                 u8 bdaddr_type)
740 {
741         struct hci_cp_le_del_from_white_list cp;
742
743         cp.bdaddr_type = bdaddr_type;
744         bacpy(&cp.bdaddr, bdaddr);
745
746         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
747                    cp.bdaddr_type);
748         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
749
750         if (use_ll_privacy(req->hdev) &&
751             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
752                 struct smp_irk *irk;
753
754                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
755                 if (irk) {
756                         struct hci_cp_le_del_from_resolv_list cp;
757
758                         cp.bdaddr_type = bdaddr_type;
759                         bacpy(&cp.bdaddr, bdaddr);
760
761                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
762                                     sizeof(cp), &cp);
763                 }
764         }
765 }
766
767 /* Adds connection to white list if needed. On error, returns -1. */
768 static int add_to_white_list(struct hci_request *req,
769                              struct hci_conn_params *params, u8 *num_entries,
770                              bool allow_rpa)
771 {
772         struct hci_cp_le_add_to_white_list cp;
773         struct hci_dev *hdev = req->hdev;
774
775         /* Already in white list */
776         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
777                                    params->addr_type))
778                 return 0;
779
780         /* Select filter policy to accept all advertising */
781         if (*num_entries >= hdev->le_white_list_size)
782                 return -1;
783
784         /* White list can not be used with RPAs */
785         if (!allow_rpa &&
786             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
787             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
788                 return -1;
789         }
790
791         /* During suspend, only wakeable devices can be in whitelist */
792         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
793                                                    params->current_flags))
794                 return 0;
795
796         *num_entries += 1;
797         cp.bdaddr_type = params->addr_type;
798         bacpy(&cp.bdaddr, &params->addr);
799
800         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
801                    cp.bdaddr_type);
802         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
803
804         if (use_ll_privacy(hdev) &&
805             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
806                 struct smp_irk *irk;
807
808                 irk = hci_find_irk_by_addr(hdev, &params->addr,
809                                            params->addr_type);
810                 if (irk) {
811                         struct hci_cp_le_add_to_resolv_list cp;
812
813                         cp.bdaddr_type = params->addr_type;
814                         bacpy(&cp.bdaddr, &params->addr);
815                         memcpy(cp.peer_irk, irk->val, 16);
816
817                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
818                                 memcpy(cp.local_irk, hdev->irk, 16);
819                         else
820                                 memset(cp.local_irk, 0, 16);
821
822                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
823                                     sizeof(cp), &cp);
824                 }
825         }
826
827         return 0;
828 }
829
830 static u8 update_white_list(struct hci_request *req)
831 {
832         struct hci_dev *hdev = req->hdev;
833         struct hci_conn_params *params;
834         struct bdaddr_list *b;
835         u8 num_entries = 0;
836         bool pend_conn, pend_report;
837         /* We allow whitelisting even with RPAs in suspend. In the worst case,
838          * we won't be able to wake from devices that use the privacy1.2
839          * features. Additionally, once we support privacy1.2 and IRK
840          * offloading, we can update this to also check for those conditions.
841          */
842         bool allow_rpa = hdev->suspended;
843
844         /* Go through the current white list programmed into the
845          * controller one by one and check if that address is still
846          * in the list of pending connections or list of devices to
847          * report. If not present in either list, then queue the
848          * command to remove it from the controller.
849          */
850         list_for_each_entry(b, &hdev->le_white_list, list) {
851                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
852                                                       &b->bdaddr,
853                                                       b->bdaddr_type);
854                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
855                                                         &b->bdaddr,
856                                                         b->bdaddr_type);
857
858                 /* If the device is not likely to connect or report,
859                  * remove it from the whitelist.
860                  */
861                 if (!pend_conn && !pend_report) {
862                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
863                         continue;
864                 }
865
866                 /* White list can not be used with RPAs */
867                 if (!allow_rpa &&
868                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
869                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
870                         return 0x00;
871                 }
872
873                 num_entries++;
874         }
875
876         /* Since all no longer valid white list entries have been
877          * removed, walk through the list of pending connections
878          * and ensure that any new device gets programmed into
879          * the controller.
880          *
881          * If the list of the devices is larger than the list of
882          * available white list entries in the controller, then
883          * just abort and return filer policy value to not use the
884          * white list.
885          */
886         list_for_each_entry(params, &hdev->pend_le_conns, action) {
887                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
888                         return 0x00;
889         }
890
891         /* After adding all new pending connections, walk through
892          * the list of pending reports and also add these to the
893          * white list if there is still space. Abort if space runs out.
894          */
895         list_for_each_entry(params, &hdev->pend_le_reports, action) {
896                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
897                         return 0x00;
898         }
899
900         /* Use the allowlist unless the following conditions are all true:
901          * - We are not currently suspending
902          * - There are 1 or more ADV monitors registered
903          * - Interleaved scanning is not currently using the allowlist
904          *
905          * Once the controller offloading of advertisement monitor is in place,
906          * the above condition should include the support of MSFT extension
907          * support.
908          */
909         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
910             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
911                 return 0x00;
912
913         /* Select filter policy to use white list */
914         return 0x01;
915 }
916
917 static bool scan_use_rpa(struct hci_dev *hdev)
918 {
919         return hci_dev_test_flag(hdev, HCI_PRIVACY);
920 }
921
922 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
923                                u16 window, u8 own_addr_type, u8 filter_policy,
924                                bool addr_resolv)
925 {
926         struct hci_dev *hdev = req->hdev;
927
928         if (hdev->scanning_paused) {
929                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
930                 return;
931         }
932
933         if (use_ll_privacy(hdev) &&
934             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
935             addr_resolv) {
936                 u8 enable = 0x01;
937
938                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
939         }
940
941         /* Use ext scanning if set ext scan param and ext scan enable is
942          * supported
943          */
944         if (use_ext_scan(hdev)) {
945                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
946                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
947                 struct hci_cp_le_scan_phy_params *phy_params;
948                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
949                 u32 plen;
950
951                 ext_param_cp = (void *)data;
952                 phy_params = (void *)ext_param_cp->data;
953
954                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
955                 ext_param_cp->own_addr_type = own_addr_type;
956                 ext_param_cp->filter_policy = filter_policy;
957
958                 plen = sizeof(*ext_param_cp);
959
960                 if (scan_1m(hdev) || scan_2m(hdev)) {
961                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
962
963                         memset(phy_params, 0, sizeof(*phy_params));
964                         phy_params->type = type;
965                         phy_params->interval = cpu_to_le16(interval);
966                         phy_params->window = cpu_to_le16(window);
967
968                         plen += sizeof(*phy_params);
969                         phy_params++;
970                 }
971
972                 if (scan_coded(hdev)) {
973                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
974
975                         memset(phy_params, 0, sizeof(*phy_params));
976                         phy_params->type = type;
977                         phy_params->interval = cpu_to_le16(interval);
978                         phy_params->window = cpu_to_le16(window);
979
980                         plen += sizeof(*phy_params);
981                         phy_params++;
982                 }
983
984                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
985                             plen, ext_param_cp);
986
987                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
988                 ext_enable_cp.enable = LE_SCAN_ENABLE;
989                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
990
991                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
992                             sizeof(ext_enable_cp), &ext_enable_cp);
993         } else {
994                 struct hci_cp_le_set_scan_param param_cp;
995                 struct hci_cp_le_set_scan_enable enable_cp;
996
997                 memset(&param_cp, 0, sizeof(param_cp));
998                 param_cp.type = type;
999                 param_cp.interval = cpu_to_le16(interval);
1000                 param_cp.window = cpu_to_le16(window);
1001                 param_cp.own_address_type = own_addr_type;
1002                 param_cp.filter_policy = filter_policy;
1003                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1004                             &param_cp);
1005
1006                 memset(&enable_cp, 0, sizeof(enable_cp));
1007                 enable_cp.enable = LE_SCAN_ENABLE;
1008                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1009                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1010                             &enable_cp);
1011         }
1012 }
1013
1014 /* Returns true if an le connection is in the scanning state */
1015 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1016 {
1017         struct hci_conn_hash *h = &hdev->conn_hash;
1018         struct hci_conn  *c;
1019
1020         rcu_read_lock();
1021
1022         list_for_each_entry_rcu(c, &h->list, list) {
1023                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1024                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
1025                         rcu_read_unlock();
1026                         return true;
1027                 }
1028         }
1029
1030         rcu_read_unlock();
1031
1032         return false;
1033 }
1034
1035 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1036  * controller based address resolution to be able to reconfigure
1037  * resolving list.
1038  */
1039 void hci_req_add_le_passive_scan(struct hci_request *req)
1040 {
1041         struct hci_dev *hdev = req->hdev;
1042         u8 own_addr_type;
1043         u8 filter_policy;
1044         u16 window, interval;
1045         /* Background scanning should run with address resolution */
1046         bool addr_resolv = true;
1047
1048         if (hdev->scanning_paused) {
1049                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1050                 return;
1051         }
1052
1053         /* Set require_privacy to false since no SCAN_REQ are send
1054          * during passive scanning. Not using an non-resolvable address
1055          * here is important so that peer devices using direct
1056          * advertising with our address will be correctly reported
1057          * by the controller.
1058          */
1059         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1060                                       &own_addr_type))
1061                 return;
1062
1063         if (hdev->enable_advmon_interleave_scan &&
1064             __hci_update_interleaved_scan(hdev))
1065                 return;
1066
1067         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1068         /* Adding or removing entries from the white list must
1069          * happen before enabling scanning. The controller does
1070          * not allow white list modification while scanning.
1071          */
1072         filter_policy = update_white_list(req);
1073
1074         /* When the controller is using random resolvable addresses and
1075          * with that having LE privacy enabled, then controllers with
1076          * Extended Scanner Filter Policies support can now enable support
1077          * for handling directed advertising.
1078          *
1079          * So instead of using filter polices 0x00 (no whitelist)
1080          * and 0x01 (whitelist enabled) use the new filter policies
1081          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1082          */
1083         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1084             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1085                 filter_policy |= 0x02;
1086
1087         if (hdev->suspended) {
1088                 window = hdev->le_scan_window_suspend;
1089                 interval = hdev->le_scan_int_suspend;
1090         } else if (hci_is_le_conn_scanning(hdev)) {
1091                 window = hdev->le_scan_window_connect;
1092                 interval = hdev->le_scan_int_connect;
1093         } else if (hci_is_adv_monitoring(hdev)) {
1094                 window = hdev->le_scan_window_adv_monitor;
1095                 interval = hdev->le_scan_int_adv_monitor;
1096         } else {
1097                 window = hdev->le_scan_window;
1098                 interval = hdev->le_scan_interval;
1099         }
1100
1101         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1102         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1103                            own_addr_type, filter_policy, addr_resolv);
1104 }
1105
1106 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1107 {
1108         struct adv_info *adv_instance;
1109
1110         /* Instance 0x00 always set local name */
1111         if (instance == 0x00)
1112                 return true;
1113
1114         adv_instance = hci_find_adv_instance(hdev, instance);
1115         if (!adv_instance)
1116                 return false;
1117
1118         if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1119             adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1120                 return true;
1121
1122         return adv_instance->scan_rsp_len ? true : false;
1123 }
1124
1125 static void hci_req_clear_event_filter(struct hci_request *req)
1126 {
1127         struct hci_cp_set_event_filter f;
1128
1129         memset(&f, 0, sizeof(f));
1130         f.flt_type = HCI_FLT_CLEAR_ALL;
1131         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1132
1133         /* Update page scan state (since we may have modified it when setting
1134          * the event filter).
1135          */
1136         __hci_req_update_scan(req);
1137 }
1138
1139 static void hci_req_set_event_filter(struct hci_request *req)
1140 {
1141         struct bdaddr_list_with_flags *b;
1142         struct hci_cp_set_event_filter f;
1143         struct hci_dev *hdev = req->hdev;
1144         u8 scan = SCAN_DISABLED;
1145
1146         /* Always clear event filter when starting */
1147         hci_req_clear_event_filter(req);
1148
1149         list_for_each_entry(b, &hdev->whitelist, list) {
1150                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1151                                         b->current_flags))
1152                         continue;
1153
1154                 memset(&f, 0, sizeof(f));
1155                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1156                 f.flt_type = HCI_FLT_CONN_SETUP;
1157                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1158                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1159
1160                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1161                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1162                 scan = SCAN_PAGE;
1163         }
1164
1165         if (scan)
1166                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1167         else
1168                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1169
1170         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1171 }
1172
1173 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1174 {
1175         /* Before changing params disable scan if enabled */
1176         if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1177                 hci_req_add_le_scan_disable(req, false);
1178
1179         /* Configure params and enable scanning */
1180         hci_req_add_le_passive_scan(req);
1181
1182         /* Block suspend notifier on response */
1183         set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1184 }
1185
1186 static void cancel_adv_timeout(struct hci_dev *hdev)
1187 {
1188         if (hdev->adv_instance_timeout) {
1189                 hdev->adv_instance_timeout = 0;
1190                 cancel_delayed_work(&hdev->adv_instance_expire);
1191         }
1192 }
1193
1194 /* This function requires the caller holds hdev->lock */
1195 void __hci_req_pause_adv_instances(struct hci_request *req)
1196 {
1197         bt_dev_dbg(req->hdev, "Pausing advertising instances");
1198
1199         /* Call to disable any advertisements active on the controller.
1200          * This will succeed even if no advertisements are configured.
1201          */
1202         __hci_req_disable_advertising(req);
1203
1204         /* If we are using software rotation, pause the loop */
1205         if (!ext_adv_capable(req->hdev))
1206                 cancel_adv_timeout(req->hdev);
1207 }
1208
1209 /* This function requires the caller holds hdev->lock */
1210 static void __hci_req_resume_adv_instances(struct hci_request *req)
1211 {
1212         struct adv_info *adv;
1213
1214         bt_dev_dbg(req->hdev, "Resuming advertising instances");
1215
1216         if (ext_adv_capable(req->hdev)) {
1217                 /* Call for each tracked instance to be re-enabled */
1218                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1219                         __hci_req_enable_ext_advertising(req,
1220                                                          adv->instance);
1221                 }
1222
1223         } else {
1224                 /* Schedule for most recent instance to be restarted and begin
1225                  * the software rotation loop
1226                  */
1227                 __hci_req_schedule_adv_instance(req,
1228                                                 req->hdev->cur_adv_instance,
1229                                                 true);
1230         }
1231 }
1232
1233 /* This function requires the caller holds hdev->lock */
1234 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1235 {
1236         struct hci_request req;
1237
1238         hci_req_init(&req, hdev);
1239         __hci_req_resume_adv_instances(&req);
1240
1241         return hci_req_run(&req, NULL);
1242 }
1243
1244 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1245 {
1246         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1247                    status);
1248         if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1249             test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1250                 wake_up(&hdev->suspend_wait_q);
1251         }
1252 }
1253
1254 /* Call with hci_dev_lock */
1255 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1256 {
1257         int old_state;
1258         struct hci_conn *conn;
1259         struct hci_request req;
1260         u8 page_scan;
1261         int disconnect_counter;
1262
1263         if (next == hdev->suspend_state) {
1264                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1265                 goto done;
1266         }
1267
1268         hdev->suspend_state = next;
1269         hci_req_init(&req, hdev);
1270
1271         if (next == BT_SUSPEND_DISCONNECT) {
1272                 /* Mark device as suspended */
1273                 hdev->suspended = true;
1274
1275                 /* Pause discovery if not already stopped */
1276                 old_state = hdev->discovery.state;
1277                 if (old_state != DISCOVERY_STOPPED) {
1278                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1279                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1280                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1281                 }
1282
1283                 hdev->discovery_paused = true;
1284                 hdev->discovery_old_state = old_state;
1285
1286                 /* Stop directed advertising */
1287                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1288                 if (old_state) {
1289                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1290                         cancel_delayed_work(&hdev->discov_off);
1291                         queue_delayed_work(hdev->req_workqueue,
1292                                            &hdev->discov_off, 0);
1293                 }
1294
1295                 /* Pause other advertisements */
1296                 if (hdev->adv_instance_cnt)
1297                         __hci_req_pause_adv_instances(&req);
1298
1299                 hdev->advertising_paused = true;
1300                 hdev->advertising_old_state = old_state;
1301                 /* Disable page scan */
1302                 page_scan = SCAN_DISABLED;
1303                 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1304
1305                 /* Disable LE passive scan if enabled */
1306                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1307                         cancel_interleave_scan(hdev);
1308                         hci_req_add_le_scan_disable(&req, false);
1309                 }
1310
1311                 /* Mark task needing completion */
1312                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1313
1314                 /* Prevent disconnects from causing scanning to be re-enabled */
1315                 hdev->scanning_paused = true;
1316
1317                 /* Run commands before disconnecting */
1318                 hci_req_run(&req, suspend_req_complete);
1319
1320                 disconnect_counter = 0;
1321                 /* Soft disconnect everything (power off) */
1322                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1323                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1324                         disconnect_counter++;
1325                 }
1326
1327                 if (disconnect_counter > 0) {
1328                         bt_dev_dbg(hdev,
1329                                    "Had %d disconnects. Will wait on them",
1330                                    disconnect_counter);
1331                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1332                 }
1333         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1334                 /* Unpause to take care of updating scanning params */
1335                 hdev->scanning_paused = false;
1336                 /* Enable event filter for paired devices */
1337                 hci_req_set_event_filter(&req);
1338                 /* Enable passive scan at lower duty cycle */
1339                 hci_req_config_le_suspend_scan(&req);
1340                 /* Pause scan changes again. */
1341                 hdev->scanning_paused = true;
1342                 hci_req_run(&req, suspend_req_complete);
1343         } else {
1344                 hdev->suspended = false;
1345                 hdev->scanning_paused = false;
1346
1347                 hci_req_clear_event_filter(&req);
1348                 /* Reset passive/background scanning to normal */
1349                 hci_req_config_le_suspend_scan(&req);
1350
1351                 /* Unpause directed advertising */
1352                 hdev->advertising_paused = false;
1353                 if (hdev->advertising_old_state) {
1354                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1355                                 hdev->suspend_tasks);
1356                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1357                         queue_work(hdev->req_workqueue,
1358                                    &hdev->discoverable_update);
1359                         hdev->advertising_old_state = 0;
1360                 }
1361
1362                 /* Resume other advertisements */
1363                 if (hdev->adv_instance_cnt)
1364                         __hci_req_resume_adv_instances(&req);
1365
1366                 /* Unpause discovery */
1367                 hdev->discovery_paused = false;
1368                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1369                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1370                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1371                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1372                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1373                 }
1374
1375                 hci_req_run(&req, suspend_req_complete);
1376         }
1377
1378         hdev->suspend_state = next;
1379
1380 done:
1381         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1382         wake_up(&hdev->suspend_wait_q);
1383 }
1384
1385 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1386 {
1387         return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1388 }
1389
1390 void __hci_req_disable_advertising(struct hci_request *req)
1391 {
1392         if (ext_adv_capable(req->hdev)) {
1393                 __hci_req_disable_ext_adv_instance(req, 0x00);
1394
1395         } else {
1396                 u8 enable = 0x00;
1397
1398                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1399         }
1400 }
1401
1402 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1403 {
1404         u32 flags;
1405         struct adv_info *adv_instance;
1406
1407         if (instance == 0x00) {
1408                 /* Instance 0 always manages the "Tx Power" and "Flags"
1409                  * fields
1410                  */
1411                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1412
1413                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1414                  * corresponds to the "connectable" instance flag.
1415                  */
1416                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1417                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1418
1419                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1420                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1421                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1422                         flags |= MGMT_ADV_FLAG_DISCOV;
1423
1424                 return flags;
1425         }
1426
1427         adv_instance = hci_find_adv_instance(hdev, instance);
1428
1429         /* Return 0 when we got an invalid instance identifier. */
1430         if (!adv_instance)
1431                 return 0;
1432
1433         return adv_instance->flags;
1434 }
1435
1436 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1437 {
1438         /* If privacy is not enabled don't use RPA */
1439         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1440                 return false;
1441
1442         /* If basic privacy mode is enabled use RPA */
1443         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1444                 return true;
1445
1446         /* If limited privacy mode is enabled don't use RPA if we're
1447          * both discoverable and bondable.
1448          */
1449         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1450             hci_dev_test_flag(hdev, HCI_BONDABLE))
1451                 return false;
1452
1453         /* We're neither bondable nor discoverable in the limited
1454          * privacy mode, therefore use RPA.
1455          */
1456         return true;
1457 }
1458
1459 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1460 {
1461         /* If there is no connection we are OK to advertise. */
1462         if (hci_conn_num(hdev, LE_LINK) == 0)
1463                 return true;
1464
1465         /* Check le_states if there is any connection in slave role. */
1466         if (hdev->conn_hash.le_num_slave > 0) {
1467                 /* Slave connection state and non connectable mode bit 20. */
1468                 if (!connectable && !(hdev->le_states[2] & 0x10))
1469                         return false;
1470
1471                 /* Slave connection state and connectable mode bit 38
1472                  * and scannable bit 21.
1473                  */
1474                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1475                                     !(hdev->le_states[2] & 0x20)))
1476                         return false;
1477         }
1478
1479         /* Check le_states if there is any connection in master role. */
1480         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1481                 /* Master connection state and non connectable mode bit 18. */
1482                 if (!connectable && !(hdev->le_states[2] & 0x02))
1483                         return false;
1484
1485                 /* Master connection state and connectable mode bit 35 and
1486                  * scannable 19.
1487                  */
1488                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1489                                     !(hdev->le_states[2] & 0x08)))
1490                         return false;
1491         }
1492
1493         return true;
1494 }
1495
1496 void __hci_req_enable_advertising(struct hci_request *req)
1497 {
1498         struct hci_dev *hdev = req->hdev;
1499         struct adv_info *adv_instance;
1500         struct hci_cp_le_set_adv_param cp;
1501         u8 own_addr_type, enable = 0x01;
1502         bool connectable;
1503         u16 adv_min_interval, adv_max_interval;
1504         u32 flags;
1505
1506         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1507         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1508
1509         /* If the "connectable" instance flag was not set, then choose between
1510          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1511          */
1512         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1513                       mgmt_get_connectable(hdev);
1514
1515         if (!is_advertising_allowed(hdev, connectable))
1516                 return;
1517
1518         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1519                 __hci_req_disable_advertising(req);
1520
1521         /* Clear the HCI_LE_ADV bit temporarily so that the
1522          * hci_update_random_address knows that it's safe to go ahead
1523          * and write a new random address. The flag will be set back on
1524          * as soon as the SET_ADV_ENABLE HCI command completes.
1525          */
1526         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1527
1528         /* Set require_privacy to true only when non-connectable
1529          * advertising is used. In that case it is fine to use a
1530          * non-resolvable private address.
1531          */
1532         if (hci_update_random_address(req, !connectable,
1533                                       adv_use_rpa(hdev, flags),
1534                                       &own_addr_type) < 0)
1535                 return;
1536
1537         memset(&cp, 0, sizeof(cp));
1538
1539         if (adv_instance) {
1540                 adv_min_interval = adv_instance->min_interval;
1541                 adv_max_interval = adv_instance->max_interval;
1542         } else {
1543                 adv_min_interval = hdev->le_adv_min_interval;
1544                 adv_max_interval = hdev->le_adv_max_interval;
1545         }
1546
1547         if (connectable) {
1548                 cp.type = LE_ADV_IND;
1549         } else {
1550                 if (adv_cur_instance_is_scannable(hdev))
1551                         cp.type = LE_ADV_SCAN_IND;
1552                 else
1553                         cp.type = LE_ADV_NONCONN_IND;
1554
1555                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1556                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1557                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1558                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1559                 }
1560         }
1561
1562         cp.min_interval = cpu_to_le16(adv_min_interval);
1563         cp.max_interval = cpu_to_le16(adv_max_interval);
1564         cp.own_address_type = own_addr_type;
1565         cp.channel_map = hdev->le_adv_channel_map;
1566
1567         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1568
1569         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1570 }
1571
1572 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1573 {
1574         size_t short_len;
1575         size_t complete_len;
1576
1577         /* no space left for name (+ NULL + type + len) */
1578         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1579                 return ad_len;
1580
1581         /* use complete name if present and fits */
1582         complete_len = strlen(hdev->dev_name);
1583         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1584                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1585                                        hdev->dev_name, complete_len + 1);
1586
1587         /* use short name if present */
1588         short_len = strlen(hdev->short_name);
1589         if (short_len)
1590                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1591                                        hdev->short_name, short_len + 1);
1592
1593         /* use shortened full name if present, we already know that name
1594          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1595          */
1596         if (complete_len) {
1597                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1598
1599                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1600                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1601
1602                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1603                                        sizeof(name));
1604         }
1605
1606         return ad_len;
1607 }
1608
1609 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1610 {
1611         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1612 }
1613
1614 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1615 {
1616         u8 scan_rsp_len = 0;
1617
1618         if (hdev->appearance) {
1619                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1620         }
1621
1622         return append_local_name(hdev, ptr, scan_rsp_len);
1623 }
1624
1625 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1626                                         u8 *ptr)
1627 {
1628         struct adv_info *adv_instance;
1629         u32 instance_flags;
1630         u8 scan_rsp_len = 0;
1631
1632         adv_instance = hci_find_adv_instance(hdev, instance);
1633         if (!adv_instance)
1634                 return 0;
1635
1636         instance_flags = adv_instance->flags;
1637
1638         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1639                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1640         }
1641
1642         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1643                adv_instance->scan_rsp_len);
1644
1645         scan_rsp_len += adv_instance->scan_rsp_len;
1646
1647         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1648                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1649
1650         return scan_rsp_len;
1651 }
1652
1653 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1654 {
1655         struct hci_dev *hdev = req->hdev;
1656         u8 len;
1657
1658         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1659                 return;
1660
1661         if (ext_adv_capable(hdev)) {
1662                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1663
1664                 memset(&cp, 0, sizeof(cp));
1665
1666                 if (instance)
1667                         len = create_instance_scan_rsp_data(hdev, instance,
1668                                                             cp.data);
1669                 else
1670                         len = create_default_scan_rsp_data(hdev, cp.data);
1671
1672                 if (hdev->scan_rsp_data_len == len &&
1673                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1674                         return;
1675
1676                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1677                 hdev->scan_rsp_data_len = len;
1678
1679                 cp.handle = instance;
1680                 cp.length = len;
1681                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1682                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1683
1684                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1685                             &cp);
1686         } else {
1687                 struct hci_cp_le_set_scan_rsp_data cp;
1688
1689                 memset(&cp, 0, sizeof(cp));
1690
1691                 if (instance)
1692                         len = create_instance_scan_rsp_data(hdev, instance,
1693                                                             cp.data);
1694                 else
1695                         len = create_default_scan_rsp_data(hdev, cp.data);
1696
1697                 if (hdev->scan_rsp_data_len == len &&
1698                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1699                         return;
1700
1701                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1702                 hdev->scan_rsp_data_len = len;
1703
1704                 cp.length = len;
1705
1706                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1707         }
1708 }
1709
1710 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1711 {
1712         struct adv_info *adv_instance = NULL;
1713         u8 ad_len = 0, flags = 0;
1714         u32 instance_flags;
1715
1716         /* Return 0 when the current instance identifier is invalid. */
1717         if (instance) {
1718                 adv_instance = hci_find_adv_instance(hdev, instance);
1719                 if (!adv_instance)
1720                         return 0;
1721         }
1722
1723         instance_flags = get_adv_instance_flags(hdev, instance);
1724
1725         /* If instance already has the flags set skip adding it once
1726          * again.
1727          */
1728         if (adv_instance && eir_get_data(adv_instance->adv_data,
1729                                          adv_instance->adv_data_len, EIR_FLAGS,
1730                                          NULL))
1731                 goto skip_flags;
1732
1733         /* The Add Advertising command allows userspace to set both the general
1734          * and limited discoverable flags.
1735          */
1736         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1737                 flags |= LE_AD_GENERAL;
1738
1739         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1740                 flags |= LE_AD_LIMITED;
1741
1742         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743                 flags |= LE_AD_NO_BREDR;
1744
1745         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1746                 /* If a discovery flag wasn't provided, simply use the global
1747                  * settings.
1748                  */
1749                 if (!flags)
1750                         flags |= mgmt_get_adv_discov_flags(hdev);
1751
1752                 /* If flags would still be empty, then there is no need to
1753                  * include the "Flags" AD field".
1754                  */
1755                 if (flags) {
1756                         ptr[0] = 0x02;
1757                         ptr[1] = EIR_FLAGS;
1758                         ptr[2] = flags;
1759
1760                         ad_len += 3;
1761                         ptr += 3;
1762                 }
1763         }
1764
1765 skip_flags:
1766         if (adv_instance) {
1767                 memcpy(ptr, adv_instance->adv_data,
1768                        adv_instance->adv_data_len);
1769                 ad_len += adv_instance->adv_data_len;
1770                 ptr += adv_instance->adv_data_len;
1771         }
1772
1773         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1774                 s8 adv_tx_power;
1775
1776                 if (ext_adv_capable(hdev)) {
1777                         if (adv_instance)
1778                                 adv_tx_power = adv_instance->tx_power;
1779                         else
1780                                 adv_tx_power = hdev->adv_tx_power;
1781                 } else {
1782                         adv_tx_power = hdev->adv_tx_power;
1783                 }
1784
1785                 /* Provide Tx Power only if we can provide a valid value for it */
1786                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1787                         ptr[0] = 0x02;
1788                         ptr[1] = EIR_TX_POWER;
1789                         ptr[2] = (u8)adv_tx_power;
1790
1791                         ad_len += 3;
1792                         ptr += 3;
1793                 }
1794         }
1795
1796         return ad_len;
1797 }
1798
1799 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1800 {
1801         struct hci_dev *hdev = req->hdev;
1802         u8 len;
1803
1804         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1805                 return;
1806
1807         if (ext_adv_capable(hdev)) {
1808                 struct hci_cp_le_set_ext_adv_data cp;
1809
1810                 memset(&cp, 0, sizeof(cp));
1811
1812                 len = create_instance_adv_data(hdev, instance, cp.data);
1813
1814                 /* There's nothing to do if the data hasn't changed */
1815                 if (hdev->adv_data_len == len &&
1816                     memcmp(cp.data, hdev->adv_data, len) == 0)
1817                         return;
1818
1819                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1820                 hdev->adv_data_len = len;
1821
1822                 cp.length = len;
1823                 cp.handle = instance;
1824                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1825                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1826
1827                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1828         } else {
1829                 struct hci_cp_le_set_adv_data cp;
1830
1831                 memset(&cp, 0, sizeof(cp));
1832
1833                 len = create_instance_adv_data(hdev, instance, cp.data);
1834
1835                 /* There's nothing to do if the data hasn't changed */
1836                 if (hdev->adv_data_len == len &&
1837                     memcmp(cp.data, hdev->adv_data, len) == 0)
1838                         return;
1839
1840                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1841                 hdev->adv_data_len = len;
1842
1843                 cp.length = len;
1844
1845                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1846         }
1847 }
1848
1849 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1850 {
1851         struct hci_request req;
1852
1853         hci_req_init(&req, hdev);
1854         __hci_req_update_adv_data(&req, instance);
1855
1856         return hci_req_run(&req, NULL);
1857 }
1858
1859 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1860                                             u16 opcode)
1861 {
1862         BT_DBG("%s status %u", hdev->name, status);
1863 }
1864
1865 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1866 {
1867         struct hci_request req;
1868         __u8 enable = 0x00;
1869
1870         if (!use_ll_privacy(hdev) &&
1871             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1872                 return;
1873
1874         hci_req_init(&req, hdev);
1875
1876         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1877
1878         hci_req_run(&req, enable_addr_resolution_complete);
1879 }
1880
1881 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1882 {
1883         bt_dev_dbg(hdev, "status %u", status);
1884 }
1885
1886 void hci_req_reenable_advertising(struct hci_dev *hdev)
1887 {
1888         struct hci_request req;
1889
1890         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1891             list_empty(&hdev->adv_instances))
1892                 return;
1893
1894         hci_req_init(&req, hdev);
1895
1896         if (hdev->cur_adv_instance) {
1897                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1898                                                 true);
1899         } else {
1900                 if (ext_adv_capable(hdev)) {
1901                         __hci_req_start_ext_adv(&req, 0x00);
1902                 } else {
1903                         __hci_req_update_adv_data(&req, 0x00);
1904                         __hci_req_update_scan_rsp_data(&req, 0x00);
1905                         __hci_req_enable_advertising(&req);
1906                 }
1907         }
1908
1909         hci_req_run(&req, adv_enable_complete);
1910 }
1911
1912 static void adv_timeout_expire(struct work_struct *work)
1913 {
1914         struct hci_dev *hdev = container_of(work, struct hci_dev,
1915                                             adv_instance_expire.work);
1916
1917         struct hci_request req;
1918         u8 instance;
1919
1920         bt_dev_dbg(hdev, "");
1921
1922         hci_dev_lock(hdev);
1923
1924         hdev->adv_instance_timeout = 0;
1925
1926         instance = hdev->cur_adv_instance;
1927         if (instance == 0x00)
1928                 goto unlock;
1929
1930         hci_req_init(&req, hdev);
1931
1932         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1933
1934         if (list_empty(&hdev->adv_instances))
1935                 __hci_req_disable_advertising(&req);
1936
1937         hci_req_run(&req, NULL);
1938
1939 unlock:
1940         hci_dev_unlock(hdev);
1941 }
1942
1943 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1944                                            unsigned long opt)
1945 {
1946         struct hci_dev *hdev = req->hdev;
1947         int ret = 0;
1948
1949         hci_dev_lock(hdev);
1950
1951         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1952                 hci_req_add_le_scan_disable(req, false);
1953         hci_req_add_le_passive_scan(req);
1954
1955         switch (hdev->interleave_scan_state) {
1956         case INTERLEAVE_SCAN_ALLOWLIST:
1957                 bt_dev_dbg(hdev, "next state: allowlist");
1958                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1959                 break;
1960         case INTERLEAVE_SCAN_NO_FILTER:
1961                 bt_dev_dbg(hdev, "next state: no filter");
1962                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1963                 break;
1964         case INTERLEAVE_SCAN_NONE:
1965                 BT_ERR("unexpected error");
1966                 ret = -1;
1967         }
1968
1969         hci_dev_unlock(hdev);
1970
1971         return ret;
1972 }
1973
1974 static void interleave_scan_work(struct work_struct *work)
1975 {
1976         struct hci_dev *hdev = container_of(work, struct hci_dev,
1977                                             interleave_scan.work);
1978         u8 status;
1979         unsigned long timeout;
1980
1981         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1982                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1983         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1984                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1985         } else {
1986                 bt_dev_err(hdev, "unexpected error");
1987                 return;
1988         }
1989
1990         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1991                      HCI_CMD_TIMEOUT, &status);
1992
1993         /* Don't continue interleaving if it was canceled */
1994         if (is_interleave_scanning(hdev))
1995                 queue_delayed_work(hdev->req_workqueue,
1996                                    &hdev->interleave_scan, timeout);
1997 }
1998
1999 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2000                            bool use_rpa, struct adv_info *adv_instance,
2001                            u8 *own_addr_type, bdaddr_t *rand_addr)
2002 {
2003         int err;
2004
2005         bacpy(rand_addr, BDADDR_ANY);
2006
2007         /* If privacy is enabled use a resolvable private address. If
2008          * current RPA has expired then generate a new one.
2009          */
2010         if (use_rpa) {
2011                 int to;
2012
2013                 /* If Controller supports LL Privacy use own address type is
2014                  * 0x03
2015                  */
2016                 if (use_ll_privacy(hdev))
2017                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2018                 else
2019                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2020
2021                 if (adv_instance) {
2022                         if (!adv_instance->rpa_expired &&
2023                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
2024                                 return 0;
2025
2026                         adv_instance->rpa_expired = false;
2027                 } else {
2028                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2029                             !bacmp(&hdev->random_addr, &hdev->rpa))
2030                                 return 0;
2031                 }
2032
2033                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2034                 if (err < 0) {
2035                         bt_dev_err(hdev, "failed to generate new RPA");
2036                         return err;
2037                 }
2038
2039                 bacpy(rand_addr, &hdev->rpa);
2040
2041                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2042                 if (adv_instance)
2043                         queue_delayed_work(hdev->workqueue,
2044                                            &adv_instance->rpa_expired_cb, to);
2045                 else
2046                         queue_delayed_work(hdev->workqueue,
2047                                            &hdev->rpa_expired, to);
2048
2049                 return 0;
2050         }
2051
2052         /* In case of required privacy without resolvable private address,
2053          * use an non-resolvable private address. This is useful for
2054          * non-connectable advertising.
2055          */
2056         if (require_privacy) {
2057                 bdaddr_t nrpa;
2058
2059                 while (true) {
2060                         /* The non-resolvable private address is generated
2061                          * from random six bytes with the two most significant
2062                          * bits cleared.
2063                          */
2064                         get_random_bytes(&nrpa, 6);
2065                         nrpa.b[5] &= 0x3f;
2066
2067                         /* The non-resolvable private address shall not be
2068                          * equal to the public address.
2069                          */
2070                         if (bacmp(&hdev->bdaddr, &nrpa))
2071                                 break;
2072                 }
2073
2074                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2075                 bacpy(rand_addr, &nrpa);
2076
2077                 return 0;
2078         }
2079
2080         /* No privacy so use a public address. */
2081         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2082
2083         return 0;
2084 }
2085
2086 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2087 {
2088         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2089 }
2090
2091 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2092 {
2093         struct hci_cp_le_set_ext_adv_params cp;
2094         struct hci_dev *hdev = req->hdev;
2095         bool connectable;
2096         u32 flags;
2097         bdaddr_t random_addr;
2098         u8 own_addr_type;
2099         int err;
2100         struct adv_info *adv_instance;
2101         bool secondary_adv;
2102
2103         if (instance > 0) {
2104                 adv_instance = hci_find_adv_instance(hdev, instance);
2105                 if (!adv_instance)
2106                         return -EINVAL;
2107         } else {
2108                 adv_instance = NULL;
2109         }
2110
2111         flags = get_adv_instance_flags(hdev, instance);
2112
2113         /* If the "connectable" instance flag was not set, then choose between
2114          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2115          */
2116         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2117                       mgmt_get_connectable(hdev);
2118
2119         if (!is_advertising_allowed(hdev, connectable))
2120                 return -EPERM;
2121
2122         /* Set require_privacy to true only when non-connectable
2123          * advertising is used. In that case it is fine to use a
2124          * non-resolvable private address.
2125          */
2126         err = hci_get_random_address(hdev, !connectable,
2127                                      adv_use_rpa(hdev, flags), adv_instance,
2128                                      &own_addr_type, &random_addr);
2129         if (err < 0)
2130                 return err;
2131
2132         memset(&cp, 0, sizeof(cp));
2133
2134         if (adv_instance) {
2135                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2136                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2137                 cp.tx_power = adv_instance->tx_power;
2138         } else {
2139                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2140                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2141                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2142         }
2143
2144         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2145
2146         if (connectable) {
2147                 if (secondary_adv)
2148                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2149                 else
2150                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2151         } else if (adv_instance_is_scannable(hdev, instance)) {
2152                 if (secondary_adv)
2153                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2154                 else
2155                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2156         } else {
2157                 if (secondary_adv)
2158                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2159                 else
2160                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2161         }
2162
2163         cp.own_addr_type = own_addr_type;
2164         cp.channel_map = hdev->le_adv_channel_map;
2165         cp.handle = instance;
2166
2167         if (flags & MGMT_ADV_FLAG_SEC_2M) {
2168                 cp.primary_phy = HCI_ADV_PHY_1M;
2169                 cp.secondary_phy = HCI_ADV_PHY_2M;
2170         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2171                 cp.primary_phy = HCI_ADV_PHY_CODED;
2172                 cp.secondary_phy = HCI_ADV_PHY_CODED;
2173         } else {
2174                 /* In all other cases use 1M */
2175                 cp.primary_phy = HCI_ADV_PHY_1M;
2176                 cp.secondary_phy = HCI_ADV_PHY_1M;
2177         }
2178
2179         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2180
2181         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2182             bacmp(&random_addr, BDADDR_ANY)) {
2183                 struct hci_cp_le_set_adv_set_rand_addr cp;
2184
2185                 /* Check if random address need to be updated */
2186                 if (adv_instance) {
2187                         if (!bacmp(&random_addr, &adv_instance->random_addr))
2188                                 return 0;
2189                 } else {
2190                         if (!bacmp(&random_addr, &hdev->random_addr))
2191                                 return 0;
2192                 }
2193
2194                 memset(&cp, 0, sizeof(cp));
2195
2196                 cp.handle = instance;
2197                 bacpy(&cp.bdaddr, &random_addr);
2198
2199                 hci_req_add(req,
2200                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2201                             sizeof(cp), &cp);
2202         }
2203
2204         return 0;
2205 }
2206
2207 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2208 {
2209         struct hci_dev *hdev = req->hdev;
2210         struct hci_cp_le_set_ext_adv_enable *cp;
2211         struct hci_cp_ext_adv_set *adv_set;
2212         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2213         struct adv_info *adv_instance;
2214
2215         if (instance > 0) {
2216                 adv_instance = hci_find_adv_instance(hdev, instance);
2217                 if (!adv_instance)
2218                         return -EINVAL;
2219         } else {
2220                 adv_instance = NULL;
2221         }
2222
2223         cp = (void *) data;
2224         adv_set = (void *) cp->data;
2225
2226         memset(cp, 0, sizeof(*cp));
2227
2228         cp->enable = 0x01;
2229         cp->num_of_sets = 0x01;
2230
2231         memset(adv_set, 0, sizeof(*adv_set));
2232
2233         adv_set->handle = instance;
2234
2235         /* Set duration per instance since controller is responsible for
2236          * scheduling it.
2237          */
2238         if (adv_instance && adv_instance->duration) {
2239                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2240
2241                 /* Time = N * 10 ms */
2242                 adv_set->duration = cpu_to_le16(duration / 10);
2243         }
2244
2245         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2246                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2247                     data);
2248
2249         return 0;
2250 }
2251
2252 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2253 {
2254         struct hci_dev *hdev = req->hdev;
2255         struct hci_cp_le_set_ext_adv_enable *cp;
2256         struct hci_cp_ext_adv_set *adv_set;
2257         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2258         u8 req_size;
2259
2260         /* If request specifies an instance that doesn't exist, fail */
2261         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2262                 return -EINVAL;
2263
2264         memset(data, 0, sizeof(data));
2265
2266         cp = (void *)data;
2267         adv_set = (void *)cp->data;
2268
2269         /* Instance 0x00 indicates all advertising instances will be disabled */
2270         cp->num_of_sets = !!instance;
2271         cp->enable = 0x00;
2272
2273         adv_set->handle = instance;
2274
2275         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2276         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2277
2278         return 0;
2279 }
2280
2281 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2282 {
2283         struct hci_dev *hdev = req->hdev;
2284
2285         /* If request specifies an instance that doesn't exist, fail */
2286         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2287                 return -EINVAL;
2288
2289         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2290
2291         return 0;
2292 }
2293
2294 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2295 {
2296         struct hci_dev *hdev = req->hdev;
2297         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2298         int err;
2299
2300         /* If instance isn't pending, the chip knows about it, and it's safe to
2301          * disable
2302          */
2303         if (adv_instance && !adv_instance->pending)
2304                 __hci_req_disable_ext_adv_instance(req, instance);
2305
2306         err = __hci_req_setup_ext_adv_instance(req, instance);
2307         if (err < 0)
2308                 return err;
2309
2310         __hci_req_update_scan_rsp_data(req, instance);
2311         __hci_req_enable_ext_advertising(req, instance);
2312
2313         return 0;
2314 }
2315
2316 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2317                                     bool force)
2318 {
2319         struct hci_dev *hdev = req->hdev;
2320         struct adv_info *adv_instance = NULL;
2321         u16 timeout;
2322
2323         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2324             list_empty(&hdev->adv_instances))
2325                 return -EPERM;
2326
2327         if (hdev->adv_instance_timeout)
2328                 return -EBUSY;
2329
2330         adv_instance = hci_find_adv_instance(hdev, instance);
2331         if (!adv_instance)
2332                 return -ENOENT;
2333
2334         /* A zero timeout means unlimited advertising. As long as there is
2335          * only one instance, duration should be ignored. We still set a timeout
2336          * in case further instances are being added later on.
2337          *
2338          * If the remaining lifetime of the instance is more than the duration
2339          * then the timeout corresponds to the duration, otherwise it will be
2340          * reduced to the remaining instance lifetime.
2341          */
2342         if (adv_instance->timeout == 0 ||
2343             adv_instance->duration <= adv_instance->remaining_time)
2344                 timeout = adv_instance->duration;
2345         else
2346                 timeout = adv_instance->remaining_time;
2347
2348         /* The remaining time is being reduced unless the instance is being
2349          * advertised without time limit.
2350          */
2351         if (adv_instance->timeout)
2352                 adv_instance->remaining_time =
2353                                 adv_instance->remaining_time - timeout;
2354
2355         /* Only use work for scheduling instances with legacy advertising */
2356         if (!ext_adv_capable(hdev)) {
2357                 hdev->adv_instance_timeout = timeout;
2358                 queue_delayed_work(hdev->req_workqueue,
2359                            &hdev->adv_instance_expire,
2360                            msecs_to_jiffies(timeout * 1000));
2361         }
2362
2363         /* If we're just re-scheduling the same instance again then do not
2364          * execute any HCI commands. This happens when a single instance is
2365          * being advertised.
2366          */
2367         if (!force && hdev->cur_adv_instance == instance &&
2368             hci_dev_test_flag(hdev, HCI_LE_ADV))
2369                 return 0;
2370
2371         hdev->cur_adv_instance = instance;
2372         if (ext_adv_capable(hdev)) {
2373                 __hci_req_start_ext_adv(req, instance);
2374         } else {
2375                 __hci_req_update_adv_data(req, instance);
2376                 __hci_req_update_scan_rsp_data(req, instance);
2377                 __hci_req_enable_advertising(req);
2378         }
2379
2380         return 0;
2381 }
2382
2383 /* For a single instance:
2384  * - force == true: The instance will be removed even when its remaining
2385  *   lifetime is not zero.
2386  * - force == false: the instance will be deactivated but kept stored unless
2387  *   the remaining lifetime is zero.
2388  *
2389  * For instance == 0x00:
2390  * - force == true: All instances will be removed regardless of their timeout
2391  *   setting.
2392  * - force == false: Only instances that have a timeout will be removed.
2393  */
2394 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2395                                 struct hci_request *req, u8 instance,
2396                                 bool force)
2397 {
2398         struct adv_info *adv_instance, *n, *next_instance = NULL;
2399         int err;
2400         u8 rem_inst;
2401
2402         /* Cancel any timeout concerning the removed instance(s). */
2403         if (!instance || hdev->cur_adv_instance == instance)
2404                 cancel_adv_timeout(hdev);
2405
2406         /* Get the next instance to advertise BEFORE we remove
2407          * the current one. This can be the same instance again
2408          * if there is only one instance.
2409          */
2410         if (instance && hdev->cur_adv_instance == instance)
2411                 next_instance = hci_get_next_instance(hdev, instance);
2412
2413         if (instance == 0x00) {
2414                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2415                                          list) {
2416                         if (!(force || adv_instance->timeout))
2417                                 continue;
2418
2419                         rem_inst = adv_instance->instance;
2420                         err = hci_remove_adv_instance(hdev, rem_inst);
2421                         if (!err)
2422                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2423                 }
2424         } else {
2425                 adv_instance = hci_find_adv_instance(hdev, instance);
2426
2427                 if (force || (adv_instance && adv_instance->timeout &&
2428                               !adv_instance->remaining_time)) {
2429                         /* Don't advertise a removed instance. */
2430                         if (next_instance &&
2431                             next_instance->instance == instance)
2432                                 next_instance = NULL;
2433
2434                         err = hci_remove_adv_instance(hdev, instance);
2435                         if (!err)
2436                                 mgmt_advertising_removed(sk, hdev, instance);
2437                 }
2438         }
2439
2440         if (!req || !hdev_is_powered(hdev) ||
2441             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2442                 return;
2443
2444         if (next_instance && !ext_adv_capable(hdev))
2445                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2446                                                 false);
2447 }
2448
2449 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2450 {
2451         struct hci_dev *hdev = req->hdev;
2452
2453         /* If we're advertising or initiating an LE connection we can't
2454          * go ahead and change the random address at this time. This is
2455          * because the eventual initiator address used for the
2456          * subsequently created connection will be undefined (some
2457          * controllers use the new address and others the one we had
2458          * when the operation started).
2459          *
2460          * In this kind of scenario skip the update and let the random
2461          * address be updated at the next cycle.
2462          */
2463         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2464             hci_lookup_le_connect(hdev)) {
2465                 bt_dev_dbg(hdev, "Deferring random address update");
2466                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2467                 return;
2468         }
2469
2470         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2471 }
2472
2473 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2474                               bool use_rpa, u8 *own_addr_type)
2475 {
2476         struct hci_dev *hdev = req->hdev;
2477         int err;
2478
2479         /* If privacy is enabled use a resolvable private address. If
2480          * current RPA has expired or there is something else than
2481          * the current RPA in use, then generate a new one.
2482          */
2483         if (use_rpa) {
2484                 int to;
2485
2486                 /* If Controller supports LL Privacy use own address type is
2487                  * 0x03
2488                  */
2489                 if (use_ll_privacy(hdev))
2490                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2491                 else
2492                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2493
2494                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2495                     !bacmp(&hdev->random_addr, &hdev->rpa))
2496                         return 0;
2497
2498                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2499                 if (err < 0) {
2500                         bt_dev_err(hdev, "failed to generate new RPA");
2501                         return err;
2502                 }
2503
2504                 set_random_addr(req, &hdev->rpa);
2505
2506                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2507                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2508
2509                 return 0;
2510         }
2511
2512         /* In case of required privacy without resolvable private address,
2513          * use an non-resolvable private address. This is useful for active
2514          * scanning and non-connectable advertising.
2515          */
2516         if (require_privacy) {
2517                 bdaddr_t nrpa;
2518
2519                 while (true) {
2520                         /* The non-resolvable private address is generated
2521                          * from random six bytes with the two most significant
2522                          * bits cleared.
2523                          */
2524                         get_random_bytes(&nrpa, 6);
2525                         nrpa.b[5] &= 0x3f;
2526
2527                         /* The non-resolvable private address shall not be
2528                          * equal to the public address.
2529                          */
2530                         if (bacmp(&hdev->bdaddr, &nrpa))
2531                                 break;
2532                 }
2533
2534                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2535                 set_random_addr(req, &nrpa);
2536                 return 0;
2537         }
2538
2539         /* If forcing static address is in use or there is no public
2540          * address use the static address as random address (but skip
2541          * the HCI command if the current random address is already the
2542          * static one.
2543          *
2544          * In case BR/EDR has been disabled on a dual-mode controller
2545          * and a static address has been configured, then use that
2546          * address instead of the public BR/EDR address.
2547          */
2548         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2549             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2550             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2551              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2552                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2553                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2554                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2555                                     &hdev->static_addr);
2556                 return 0;
2557         }
2558
2559         /* Neither privacy nor static address is being used so use a
2560          * public address.
2561          */
2562         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2563
2564         return 0;
2565 }
2566
2567 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2568 {
2569         struct bdaddr_list *b;
2570
2571         list_for_each_entry(b, &hdev->whitelist, list) {
2572                 struct hci_conn *conn;
2573
2574                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2575                 if (!conn)
2576                         return true;
2577
2578                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2579                         return true;
2580         }
2581
2582         return false;
2583 }
2584
2585 void __hci_req_update_scan(struct hci_request *req)
2586 {
2587         struct hci_dev *hdev = req->hdev;
2588         u8 scan;
2589
2590         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2591                 return;
2592
2593         if (!hdev_is_powered(hdev))
2594                 return;
2595
2596         if (mgmt_powering_down(hdev))
2597                 return;
2598
2599         if (hdev->scanning_paused)
2600                 return;
2601
2602         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2603             disconnected_whitelist_entries(hdev))
2604                 scan = SCAN_PAGE;
2605         else
2606                 scan = SCAN_DISABLED;
2607
2608         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2609                 scan |= SCAN_INQUIRY;
2610
2611         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2612             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2613                 return;
2614
2615         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2616 }
2617
2618 static int update_scan(struct hci_request *req, unsigned long opt)
2619 {
2620         hci_dev_lock(req->hdev);
2621         __hci_req_update_scan(req);
2622         hci_dev_unlock(req->hdev);
2623         return 0;
2624 }
2625
2626 static void scan_update_work(struct work_struct *work)
2627 {
2628         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2629
2630         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2631 }
2632
2633 static int connectable_update(struct hci_request *req, unsigned long opt)
2634 {
2635         struct hci_dev *hdev = req->hdev;
2636
2637         hci_dev_lock(hdev);
2638
2639         __hci_req_update_scan(req);
2640
2641         /* If BR/EDR is not enabled and we disable advertising as a
2642          * by-product of disabling connectable, we need to update the
2643          * advertising flags.
2644          */
2645         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2646                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2647
2648         /* Update the advertising parameters if necessary */
2649         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2650             !list_empty(&hdev->adv_instances)) {
2651                 if (ext_adv_capable(hdev))
2652                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2653                 else
2654                         __hci_req_enable_advertising(req);
2655         }
2656
2657         __hci_update_background_scan(req);
2658
2659         hci_dev_unlock(hdev);
2660
2661         return 0;
2662 }
2663
2664 static void connectable_update_work(struct work_struct *work)
2665 {
2666         struct hci_dev *hdev = container_of(work, struct hci_dev,
2667                                             connectable_update);
2668         u8 status;
2669
2670         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2671         mgmt_set_connectable_complete(hdev, status);
2672 }
2673
2674 static u8 get_service_classes(struct hci_dev *hdev)
2675 {
2676         struct bt_uuid *uuid;
2677         u8 val = 0;
2678
2679         list_for_each_entry(uuid, &hdev->uuids, list)
2680                 val |= uuid->svc_hint;
2681
2682         return val;
2683 }
2684
2685 void __hci_req_update_class(struct hci_request *req)
2686 {
2687         struct hci_dev *hdev = req->hdev;
2688         u8 cod[3];
2689
2690         bt_dev_dbg(hdev, "");
2691
2692         if (!hdev_is_powered(hdev))
2693                 return;
2694
2695         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2696                 return;
2697
2698         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2699                 return;
2700
2701         cod[0] = hdev->minor_class;
2702         cod[1] = hdev->major_class;
2703         cod[2] = get_service_classes(hdev);
2704
2705         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2706                 cod[1] |= 0x20;
2707
2708         if (memcmp(cod, hdev->dev_class, 3) == 0)
2709                 return;
2710
2711         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2712 }
2713
2714 static void write_iac(struct hci_request *req)
2715 {
2716         struct hci_dev *hdev = req->hdev;
2717         struct hci_cp_write_current_iac_lap cp;
2718
2719         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2720                 return;
2721
2722         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2723                 /* Limited discoverable mode */
2724                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2725                 cp.iac_lap[0] = 0x00;   /* LIAC */
2726                 cp.iac_lap[1] = 0x8b;
2727                 cp.iac_lap[2] = 0x9e;
2728                 cp.iac_lap[3] = 0x33;   /* GIAC */
2729                 cp.iac_lap[4] = 0x8b;
2730                 cp.iac_lap[5] = 0x9e;
2731         } else {
2732                 /* General discoverable mode */
2733                 cp.num_iac = 1;
2734                 cp.iac_lap[0] = 0x33;   /* GIAC */
2735                 cp.iac_lap[1] = 0x8b;
2736                 cp.iac_lap[2] = 0x9e;
2737         }
2738
2739         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2740                     (cp.num_iac * 3) + 1, &cp);
2741 }
2742
2743 static int discoverable_update(struct hci_request *req, unsigned long opt)
2744 {
2745         struct hci_dev *hdev = req->hdev;
2746
2747         hci_dev_lock(hdev);
2748
2749         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2750                 write_iac(req);
2751                 __hci_req_update_scan(req);
2752                 __hci_req_update_class(req);
2753         }
2754
2755         /* Advertising instances don't use the global discoverable setting, so
2756          * only update AD if advertising was enabled using Set Advertising.
2757          */
2758         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2759                 __hci_req_update_adv_data(req, 0x00);
2760
2761                 /* Discoverable mode affects the local advertising
2762                  * address in limited privacy mode.
2763                  */
2764                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2765                         if (ext_adv_capable(hdev))
2766                                 __hci_req_start_ext_adv(req, 0x00);
2767                         else
2768                                 __hci_req_enable_advertising(req);
2769                 }
2770         }
2771
2772         hci_dev_unlock(hdev);
2773
2774         return 0;
2775 }
2776
2777 static void discoverable_update_work(struct work_struct *work)
2778 {
2779         struct hci_dev *hdev = container_of(work, struct hci_dev,
2780                                             discoverable_update);
2781         u8 status;
2782
2783         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2784         mgmt_set_discoverable_complete(hdev, status);
2785 }
2786
2787 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2788                       u8 reason)
2789 {
2790         switch (conn->state) {
2791         case BT_CONNECTED:
2792         case BT_CONFIG:
2793                 if (conn->type == AMP_LINK) {
2794                         struct hci_cp_disconn_phy_link cp;
2795
2796                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2797                         cp.reason = reason;
2798                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2799                                     &cp);
2800                 } else {
2801                         struct hci_cp_disconnect dc;
2802
2803                         dc.handle = cpu_to_le16(conn->handle);
2804                         dc.reason = reason;
2805                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2806                 }
2807
2808                 conn->state = BT_DISCONN;
2809
2810                 break;
2811         case BT_CONNECT:
2812                 if (conn->type == LE_LINK) {
2813                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2814                                 break;
2815                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2816                                     0, NULL);
2817                 } else if (conn->type == ACL_LINK) {
2818                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2819                                 break;
2820                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2821                                     6, &conn->dst);
2822                 }
2823                 break;
2824         case BT_CONNECT2:
2825                 if (conn->type == ACL_LINK) {
2826                         struct hci_cp_reject_conn_req rej;
2827
2828                         bacpy(&rej.bdaddr, &conn->dst);
2829                         rej.reason = reason;
2830
2831                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2832                                     sizeof(rej), &rej);
2833                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2834                         struct hci_cp_reject_sync_conn_req rej;
2835
2836                         bacpy(&rej.bdaddr, &conn->dst);
2837
2838                         /* SCO rejection has its own limited set of
2839                          * allowed error values (0x0D-0x0F) which isn't
2840                          * compatible with most values passed to this
2841                          * function. To be safe hard-code one of the
2842                          * values that's suitable for SCO.
2843                          */
2844                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2845
2846                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2847                                     sizeof(rej), &rej);
2848                 }
2849                 break;
2850         default:
2851                 conn->state = BT_CLOSED;
2852                 break;
2853         }
2854 }
2855
2856 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2857 {
2858         if (status)
2859                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2860 }
2861
2862 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2863 {
2864         struct hci_request req;
2865         int err;
2866
2867         hci_req_init(&req, conn->hdev);
2868
2869         __hci_abort_conn(&req, conn, reason);
2870
2871         err = hci_req_run(&req, abort_conn_complete);
2872         if (err && err != -ENODATA) {
2873                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2874                 return err;
2875         }
2876
2877         return 0;
2878 }
2879
2880 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2881 {
2882         hci_dev_lock(req->hdev);
2883         __hci_update_background_scan(req);
2884         hci_dev_unlock(req->hdev);
2885         return 0;
2886 }
2887
2888 static void bg_scan_update(struct work_struct *work)
2889 {
2890         struct hci_dev *hdev = container_of(work, struct hci_dev,
2891                                             bg_scan_update);
2892         struct hci_conn *conn;
2893         u8 status;
2894         int err;
2895
2896         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2897         if (!err)
2898                 return;
2899
2900         hci_dev_lock(hdev);
2901
2902         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2903         if (conn)
2904                 hci_le_conn_failed(conn, status);
2905
2906         hci_dev_unlock(hdev);
2907 }
2908
2909 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2910 {
2911         hci_req_add_le_scan_disable(req, false);
2912         return 0;
2913 }
2914
2915 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2916 {
2917         u8 length = opt;
2918         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2919         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2920         struct hci_cp_inquiry cp;
2921
2922         bt_dev_dbg(req->hdev, "");
2923
2924         hci_dev_lock(req->hdev);
2925         hci_inquiry_cache_flush(req->hdev);
2926         hci_dev_unlock(req->hdev);
2927
2928         memset(&cp, 0, sizeof(cp));
2929
2930         if (req->hdev->discovery.limited)
2931                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2932         else
2933                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2934
2935         cp.length = length;
2936
2937         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2938
2939         return 0;
2940 }
2941
2942 static void le_scan_disable_work(struct work_struct *work)
2943 {
2944         struct hci_dev *hdev = container_of(work, struct hci_dev,
2945                                             le_scan_disable.work);
2946         u8 status;
2947
2948         bt_dev_dbg(hdev, "");
2949
2950         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2951                 return;
2952
2953         cancel_delayed_work(&hdev->le_scan_restart);
2954
2955         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2956         if (status) {
2957                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2958                            status);
2959                 return;
2960         }
2961
2962         hdev->discovery.scan_start = 0;
2963
2964         /* If we were running LE only scan, change discovery state. If
2965          * we were running both LE and BR/EDR inquiry simultaneously,
2966          * and BR/EDR inquiry is already finished, stop discovery,
2967          * otherwise BR/EDR inquiry will stop discovery when finished.
2968          * If we will resolve remote device name, do not change
2969          * discovery state.
2970          */
2971
2972         if (hdev->discovery.type == DISCOV_TYPE_LE)
2973                 goto discov_stopped;
2974
2975         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2976                 return;
2977
2978         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2979                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2980                     hdev->discovery.state != DISCOVERY_RESOLVING)
2981                         goto discov_stopped;
2982
2983                 return;
2984         }
2985
2986         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2987                      HCI_CMD_TIMEOUT, &status);
2988         if (status) {
2989                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2990                 goto discov_stopped;
2991         }
2992
2993         return;
2994
2995 discov_stopped:
2996         hci_dev_lock(hdev);
2997         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2998         hci_dev_unlock(hdev);
2999 }
3000
3001 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3002 {
3003         struct hci_dev *hdev = req->hdev;
3004
3005         /* If controller is not scanning we are done. */
3006         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3007                 return 0;
3008
3009         if (hdev->scanning_paused) {
3010                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3011                 return 0;
3012         }
3013
3014         hci_req_add_le_scan_disable(req, false);
3015
3016         if (use_ext_scan(hdev)) {
3017                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3018
3019                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3020                 ext_enable_cp.enable = LE_SCAN_ENABLE;
3021                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3022
3023                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3024                             sizeof(ext_enable_cp), &ext_enable_cp);
3025         } else {
3026                 struct hci_cp_le_set_scan_enable cp;
3027
3028                 memset(&cp, 0, sizeof(cp));
3029                 cp.enable = LE_SCAN_ENABLE;
3030                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3031                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3032         }
3033
3034         return 0;
3035 }
3036
3037 static void le_scan_restart_work(struct work_struct *work)
3038 {
3039         struct hci_dev *hdev = container_of(work, struct hci_dev,
3040                                             le_scan_restart.work);
3041         unsigned long timeout, duration, scan_start, now;
3042         u8 status;
3043
3044         bt_dev_dbg(hdev, "");
3045
3046         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3047         if (status) {
3048                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3049                            status);
3050                 return;
3051         }
3052
3053         hci_dev_lock(hdev);
3054
3055         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3056             !hdev->discovery.scan_start)
3057                 goto unlock;
3058
3059         /* When the scan was started, hdev->le_scan_disable has been queued
3060          * after duration from scan_start. During scan restart this job
3061          * has been canceled, and we need to queue it again after proper
3062          * timeout, to make sure that scan does not run indefinitely.
3063          */
3064         duration = hdev->discovery.scan_duration;
3065         scan_start = hdev->discovery.scan_start;
3066         now = jiffies;
3067         if (now - scan_start <= duration) {
3068                 int elapsed;
3069
3070                 if (now >= scan_start)
3071                         elapsed = now - scan_start;
3072                 else
3073                         elapsed = ULONG_MAX - scan_start + now;
3074
3075                 timeout = duration - elapsed;
3076         } else {
3077                 timeout = 0;
3078         }
3079
3080         queue_delayed_work(hdev->req_workqueue,
3081                            &hdev->le_scan_disable, timeout);
3082
3083 unlock:
3084         hci_dev_unlock(hdev);
3085 }
3086
3087 static int active_scan(struct hci_request *req, unsigned long opt)
3088 {
3089         uint16_t interval = opt;
3090         struct hci_dev *hdev = req->hdev;
3091         u8 own_addr_type;
3092         /* White list is not used for discovery */
3093         u8 filter_policy = 0x00;
3094         /* Discovery doesn't require controller address resolution */
3095         bool addr_resolv = false;
3096         int err;
3097
3098         bt_dev_dbg(hdev, "");
3099
3100         /* If controller is scanning, it means the background scanning is
3101          * running. Thus, we should temporarily stop it in order to set the
3102          * discovery scanning parameters.
3103          */
3104         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3105                 hci_req_add_le_scan_disable(req, false);
3106                 cancel_interleave_scan(hdev);
3107         }
3108
3109         /* All active scans will be done with either a resolvable private
3110          * address (when privacy feature has been enabled) or non-resolvable
3111          * private address.
3112          */
3113         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3114                                         &own_addr_type);
3115         if (err < 0)
3116                 own_addr_type = ADDR_LE_DEV_PUBLIC;
3117
3118         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3119                            hdev->le_scan_window_discovery, own_addr_type,
3120                            filter_policy, addr_resolv);
3121         return 0;
3122 }
3123
3124 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3125 {
3126         int err;
3127
3128         bt_dev_dbg(req->hdev, "");
3129
3130         err = active_scan(req, opt);
3131         if (err)
3132                 return err;
3133
3134         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3135 }
3136
3137 static void start_discovery(struct hci_dev *hdev, u8 *status)
3138 {
3139         unsigned long timeout;
3140
3141         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3142
3143         switch (hdev->discovery.type) {
3144         case DISCOV_TYPE_BREDR:
3145                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3146                         hci_req_sync(hdev, bredr_inquiry,
3147                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3148                                      status);
3149                 return;
3150         case DISCOV_TYPE_INTERLEAVED:
3151                 /* When running simultaneous discovery, the LE scanning time
3152                  * should occupy the whole discovery time sine BR/EDR inquiry
3153                  * and LE scanning are scheduled by the controller.
3154                  *
3155                  * For interleaving discovery in comparison, BR/EDR inquiry
3156                  * and LE scanning are done sequentially with separate
3157                  * timeouts.
3158                  */
3159                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3160                              &hdev->quirks)) {
3161                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3162                         /* During simultaneous discovery, we double LE scan
3163                          * interval. We must leave some time for the controller
3164                          * to do BR/EDR inquiry.
3165                          */
3166                         hci_req_sync(hdev, interleaved_discov,
3167                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3168                                      status);
3169                         break;
3170                 }
3171
3172                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3173                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3174                              HCI_CMD_TIMEOUT, status);
3175                 break;
3176         case DISCOV_TYPE_LE:
3177                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3178                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3179                              HCI_CMD_TIMEOUT, status);
3180                 break;
3181         default:
3182                 *status = HCI_ERROR_UNSPECIFIED;
3183                 return;
3184         }
3185
3186         if (*status)
3187                 return;
3188
3189         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3190
3191         /* When service discovery is used and the controller has a
3192          * strict duplicate filter, it is important to remember the
3193          * start and duration of the scan. This is required for
3194          * restarting scanning during the discovery phase.
3195          */
3196         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3197                      hdev->discovery.result_filtering) {
3198                 hdev->discovery.scan_start = jiffies;
3199                 hdev->discovery.scan_duration = timeout;
3200         }
3201
3202         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3203                            timeout);
3204 }
3205
3206 bool hci_req_stop_discovery(struct hci_request *req)
3207 {
3208         struct hci_dev *hdev = req->hdev;
3209         struct discovery_state *d = &hdev->discovery;
3210         struct hci_cp_remote_name_req_cancel cp;
3211         struct inquiry_entry *e;
3212         bool ret = false;
3213
3214         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3215
3216         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3217                 if (test_bit(HCI_INQUIRY, &hdev->flags))
3218                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3219
3220                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3221                         cancel_delayed_work(&hdev->le_scan_disable);
3222                         hci_req_add_le_scan_disable(req, false);
3223                 }
3224
3225                 ret = true;
3226         } else {
3227                 /* Passive scanning */
3228                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3229                         hci_req_add_le_scan_disable(req, false);
3230                         ret = true;
3231                 }
3232         }
3233
3234         /* No further actions needed for LE-only discovery */
3235         if (d->type == DISCOV_TYPE_LE)
3236                 return ret;
3237
3238         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3239                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3240                                                      NAME_PENDING);
3241                 if (!e)
3242                         return ret;
3243
3244                 bacpy(&cp.bdaddr, &e->data.bdaddr);
3245                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3246                             &cp);
3247                 ret = true;
3248         }
3249
3250         return ret;
3251 }
3252
3253 static int stop_discovery(struct hci_request *req, unsigned long opt)
3254 {
3255         hci_dev_lock(req->hdev);
3256         hci_req_stop_discovery(req);
3257         hci_dev_unlock(req->hdev);
3258
3259         return 0;
3260 }
3261
3262 static void discov_update(struct work_struct *work)
3263 {
3264         struct hci_dev *hdev = container_of(work, struct hci_dev,
3265                                             discov_update);
3266         u8 status = 0;
3267
3268         switch (hdev->discovery.state) {
3269         case DISCOVERY_STARTING:
3270                 start_discovery(hdev, &status);
3271                 mgmt_start_discovery_complete(hdev, status);
3272                 if (status)
3273                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3274                 else
3275                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3276                 break;
3277         case DISCOVERY_STOPPING:
3278                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3279                 mgmt_stop_discovery_complete(hdev, status);
3280                 if (!status)
3281                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3282                 break;
3283         case DISCOVERY_STOPPED:
3284         default:
3285                 return;
3286         }
3287 }
3288
3289 static void discov_off(struct work_struct *work)
3290 {
3291         struct hci_dev *hdev = container_of(work, struct hci_dev,
3292                                             discov_off.work);
3293
3294         bt_dev_dbg(hdev, "");
3295
3296         hci_dev_lock(hdev);
3297
3298         /* When discoverable timeout triggers, then just make sure
3299          * the limited discoverable flag is cleared. Even in the case
3300          * of a timeout triggered from general discoverable, it is
3301          * safe to unconditionally clear the flag.
3302          */
3303         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3304         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3305         hdev->discov_timeout = 0;
3306
3307         hci_dev_unlock(hdev);
3308
3309         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3310         mgmt_new_settings(hdev);
3311 }
3312
3313 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3314 {
3315         struct hci_dev *hdev = req->hdev;
3316         u8 link_sec;
3317
3318         hci_dev_lock(hdev);
3319
3320         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3321             !lmp_host_ssp_capable(hdev)) {
3322                 u8 mode = 0x01;
3323
3324                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3325
3326                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3327                         u8 support = 0x01;
3328
3329                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3330                                     sizeof(support), &support);
3331                 }
3332         }
3333
3334         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3335             lmp_bredr_capable(hdev)) {
3336                 struct hci_cp_write_le_host_supported cp;
3337
3338                 cp.le = 0x01;
3339                 cp.simul = 0x00;
3340
3341                 /* Check first if we already have the right
3342                  * host state (host features set)
3343                  */
3344                 if (cp.le != lmp_host_le_capable(hdev) ||
3345                     cp.simul != lmp_host_le_br_capable(hdev))
3346                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3347                                     sizeof(cp), &cp);
3348         }
3349
3350         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3351                 /* Make sure the controller has a good default for
3352                  * advertising data. This also applies to the case
3353                  * where BR/EDR was toggled during the AUTO_OFF phase.
3354                  */
3355                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3356                     list_empty(&hdev->adv_instances)) {
3357                         int err;
3358
3359                         if (ext_adv_capable(hdev)) {
3360                                 err = __hci_req_setup_ext_adv_instance(req,
3361                                                                        0x00);
3362                                 if (!err)
3363                                         __hci_req_update_scan_rsp_data(req,
3364                                                                        0x00);
3365                         } else {
3366                                 err = 0;
3367                                 __hci_req_update_adv_data(req, 0x00);
3368                                 __hci_req_update_scan_rsp_data(req, 0x00);
3369                         }
3370
3371                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3372                                 if (!ext_adv_capable(hdev))
3373                                         __hci_req_enable_advertising(req);
3374                                 else if (!err)
3375                                         __hci_req_enable_ext_advertising(req,
3376                                                                          0x00);
3377                         }
3378                 } else if (!list_empty(&hdev->adv_instances)) {
3379                         struct adv_info *adv_instance;
3380
3381                         adv_instance = list_first_entry(&hdev->adv_instances,
3382                                                         struct adv_info, list);
3383                         __hci_req_schedule_adv_instance(req,
3384                                                         adv_instance->instance,
3385                                                         true);
3386                 }
3387         }
3388
3389         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3390         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3391                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3392                             sizeof(link_sec), &link_sec);
3393
3394         if (lmp_bredr_capable(hdev)) {
3395                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3396                         __hci_req_write_fast_connectable(req, true);
3397                 else
3398                         __hci_req_write_fast_connectable(req, false);
3399                 __hci_req_update_scan(req);
3400                 __hci_req_update_class(req);
3401                 __hci_req_update_name(req);
3402                 __hci_req_update_eir(req);
3403         }
3404
3405         hci_dev_unlock(hdev);
3406         return 0;
3407 }
3408
3409 int __hci_req_hci_power_on(struct hci_dev *hdev)
3410 {
3411         /* Register the available SMP channels (BR/EDR and LE) only when
3412          * successfully powering on the controller. This late
3413          * registration is required so that LE SMP can clearly decide if
3414          * the public address or static address is used.
3415          */
3416         smp_register(hdev);
3417
3418         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3419                               NULL);
3420 }
3421
3422 void hci_request_setup(struct hci_dev *hdev)
3423 {
3424         INIT_WORK(&hdev->discov_update, discov_update);
3425         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3426         INIT_WORK(&hdev->scan_update, scan_update_work);
3427         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3428         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3429         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3430         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3431         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3432         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3433         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3434 }
3435
3436 void hci_request_cancel_all(struct hci_dev *hdev)
3437 {
3438         hci_req_sync_cancel(hdev, ENODEV);
3439
3440         cancel_work_sync(&hdev->discov_update);
3441         cancel_work_sync(&hdev->bg_scan_update);
3442         cancel_work_sync(&hdev->scan_update);
3443         cancel_work_sync(&hdev->connectable_update);
3444         cancel_work_sync(&hdev->discoverable_update);
3445         cancel_delayed_work_sync(&hdev->discov_off);
3446         cancel_delayed_work_sync(&hdev->le_scan_disable);
3447         cancel_delayed_work_sync(&hdev->le_scan_restart);
3448
3449         if (hdev->adv_instance_timeout) {
3450                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3451                 hdev->adv_instance_timeout = 0;
3452         }
3453
3454         cancel_interleave_scan(hdev);
3455 }