Merge tag 'for_v5.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33
34 #define HCI_REQ_DONE      0
35 #define HCI_REQ_PEND      1
36 #define HCI_REQ_CANCELED  2
37
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40         skb_queue_head_init(&req->cmd_q);
41         req->hdev = hdev;
42         req->err = 0;
43 }
44
45 void hci_req_purge(struct hci_request *req)
46 {
47         skb_queue_purge(&req->cmd_q);
48 }
49
50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52         return hdev->req_status == HCI_REQ_PEND;
53 }
54
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56                    hci_req_complete_skb_t complete_skb)
57 {
58         struct hci_dev *hdev = req->hdev;
59         struct sk_buff *skb;
60         unsigned long flags;
61
62         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63
64         /* If an error occurred during request building, remove all HCI
65          * commands queued on the HCI request queue.
66          */
67         if (req->err) {
68                 skb_queue_purge(&req->cmd_q);
69                 return req->err;
70         }
71
72         /* Do not allow empty requests */
73         if (skb_queue_empty(&req->cmd_q))
74                 return -ENODATA;
75
76         skb = skb_peek_tail(&req->cmd_q);
77         if (complete) {
78                 bt_cb(skb)->hci.req_complete = complete;
79         } else if (complete_skb) {
80                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82         }
83
84         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88         queue_work(hdev->workqueue, &hdev->cmd_work);
89
90         return 0;
91 }
92
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95         return req_run(req, complete, NULL);
96 }
97
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100         return req_run(req, NULL, complete);
101 }
102
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104                                   struct sk_buff *skb)
105 {
106         bt_dev_dbg(hdev, "result 0x%2.2x", result);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 if (skb)
112                         hdev->req_skb = skb_get(skb);
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119         bt_dev_dbg(hdev, "err 0x%2.2x", err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129                                   const void *param, u8 event, u32 timeout)
130 {
131         struct hci_request req;
132         struct sk_buff *skb;
133         int err = 0;
134
135         bt_dev_dbg(hdev, "");
136
137         hci_req_init(&req, hdev);
138
139         hci_req_add_ev(&req, opcode, plen, param, event);
140
141         hdev->req_status = HCI_REQ_PEND;
142
143         err = hci_req_run_skb(&req, hci_req_sync_complete);
144         if (err < 0)
145                 return ERR_PTR(err);
146
147         err = wait_event_interruptible_timeout(hdev->req_wait_q,
148                         hdev->req_status != HCI_REQ_PEND, timeout);
149
150         if (err == -ERESTARTSYS)
151                 return ERR_PTR(-EINTR);
152
153         switch (hdev->req_status) {
154         case HCI_REQ_DONE:
155                 err = -bt_to_errno(hdev->req_result);
156                 break;
157
158         case HCI_REQ_CANCELED:
159                 err = -hdev->req_result;
160                 break;
161
162         default:
163                 err = -ETIMEDOUT;
164                 break;
165         }
166
167         hdev->req_status = hdev->req_result = 0;
168         skb = hdev->req_skb;
169         hdev->req_skb = NULL;
170
171         bt_dev_dbg(hdev, "end: err %d", err);
172
173         if (err < 0) {
174                 kfree_skb(skb);
175                 return ERR_PTR(err);
176         }
177
178         if (!skb)
179                 return ERR_PTR(-ENODATA);
180
181         return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186                                const void *param, u32 timeout)
187 {
188         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194                                                      unsigned long opt),
195                    unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197         struct hci_request req;
198         int err = 0;
199
200         bt_dev_dbg(hdev, "start");
201
202         hci_req_init(&req, hdev);
203
204         hdev->req_status = HCI_REQ_PEND;
205
206         err = func(&req, opt);
207         if (err) {
208                 if (hci_status)
209                         *hci_status = HCI_ERROR_UNSPECIFIED;
210                 return err;
211         }
212
213         err = hci_req_run_skb(&req, hci_req_sync_complete);
214         if (err < 0) {
215                 hdev->req_status = 0;
216
217                 /* ENODATA means the HCI request command queue is empty.
218                  * This can happen when a request with conditionals doesn't
219                  * trigger any commands to be sent. This is normal behavior
220                  * and should not trigger an error return.
221                  */
222                 if (err == -ENODATA) {
223                         if (hci_status)
224                                 *hci_status = 0;
225                         return 0;
226                 }
227
228                 if (hci_status)
229                         *hci_status = HCI_ERROR_UNSPECIFIED;
230
231                 return err;
232         }
233
234         err = wait_event_interruptible_timeout(hdev->req_wait_q,
235                         hdev->req_status != HCI_REQ_PEND, timeout);
236
237         if (err == -ERESTARTSYS)
238                 return -EINTR;
239
240         switch (hdev->req_status) {
241         case HCI_REQ_DONE:
242                 err = -bt_to_errno(hdev->req_result);
243                 if (hci_status)
244                         *hci_status = hdev->req_result;
245                 break;
246
247         case HCI_REQ_CANCELED:
248                 err = -hdev->req_result;
249                 if (hci_status)
250                         *hci_status = HCI_ERROR_UNSPECIFIED;
251                 break;
252
253         default:
254                 err = -ETIMEDOUT;
255                 if (hci_status)
256                         *hci_status = HCI_ERROR_UNSPECIFIED;
257                 break;
258         }
259
260         kfree_skb(hdev->req_skb);
261         hdev->req_skb = NULL;
262         hdev->req_status = hdev->req_result = 0;
263
264         bt_dev_dbg(hdev, "end: err %d", err);
265
266         return err;
267 }
268
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270                                                   unsigned long opt),
271                  unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273         int ret;
274
275         /* Serialize all requests */
276         hci_req_sync_lock(hdev);
277         /* check the state after obtaing the lock to protect the HCI_UP
278          * against any races from hci_dev_do_close when the controller
279          * gets removed.
280          */
281         if (test_bit(HCI_UP, &hdev->flags))
282                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283         else
284                 ret = -ENETDOWN;
285         hci_req_sync_unlock(hdev);
286
287         return ret;
288 }
289
290 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
291                                 const void *param)
292 {
293         int len = HCI_COMMAND_HDR_SIZE + plen;
294         struct hci_command_hdr *hdr;
295         struct sk_buff *skb;
296
297         skb = bt_skb_alloc(len, GFP_ATOMIC);
298         if (!skb)
299                 return NULL;
300
301         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
302         hdr->opcode = cpu_to_le16(opcode);
303         hdr->plen   = plen;
304
305         if (plen)
306                 skb_put_data(skb, param, plen);
307
308         bt_dev_dbg(hdev, "skb len %d", skb->len);
309
310         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
311         hci_skb_opcode(skb) = opcode;
312
313         return skb;
314 }
315
316 /* Queue a command to an asynchronous HCI request */
317 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
318                     const void *param, u8 event)
319 {
320         struct hci_dev *hdev = req->hdev;
321         struct sk_buff *skb;
322
323         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
324
325         /* If an error occurred during request building, there is no point in
326          * queueing the HCI command. We can simply return.
327          */
328         if (req->err)
329                 return;
330
331         skb = hci_prepare_cmd(hdev, opcode, plen, param);
332         if (!skb) {
333                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
334                            opcode);
335                 req->err = -ENOMEM;
336                 return;
337         }
338
339         if (skb_queue_empty(&req->cmd_q))
340                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341
342         bt_cb(skb)->hci.req_event = event;
343
344         skb_queue_tail(&req->cmd_q, skb);
345 }
346
347 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
348                  const void *param)
349 {
350         hci_req_add_ev(req, opcode, plen, param, 0);
351 }
352
353 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 {
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_write_page_scan_activity acp;
357         u8 type;
358
359         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
360                 return;
361
362         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
363                 return;
364
365         if (enable) {
366                 type = PAGE_SCAN_TYPE_INTERLACED;
367
368                 /* 160 msec page scan interval */
369                 acp.interval = cpu_to_le16(0x0100);
370         } else {
371                 type = hdev->def_page_scan_type;
372                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
373         }
374
375         acp.window = cpu_to_le16(hdev->def_page_scan_window);
376
377         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378             __cpu_to_le16(hdev->page_scan_window) != acp.window)
379                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
380                             sizeof(acp), &acp);
381
382         if (hdev->page_scan_type != type)
383                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
384 }
385
386 static void start_interleave_scan(struct hci_dev *hdev)
387 {
388         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
389         queue_delayed_work(hdev->req_workqueue,
390                            &hdev->interleave_scan, 0);
391 }
392
393 static bool is_interleave_scanning(struct hci_dev *hdev)
394 {
395         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
396 }
397
398 static void cancel_interleave_scan(struct hci_dev *hdev)
399 {
400         bt_dev_dbg(hdev, "cancelling interleave scan");
401
402         cancel_delayed_work_sync(&hdev->interleave_scan);
403
404         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
405 }
406
407 /* Return true if interleave_scan wasn't started until exiting this function,
408  * otherwise, return false
409  */
410 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
411 {
412         /* Do interleaved scan only if all of the following are true:
413          * - There is at least one ADV monitor
414          * - At least one pending LE connection or one device to be scanned for
415          * - Monitor offloading is not supported
416          * If so, we should alternate between allowlist scan and one without
417          * any filters to save power.
418          */
419         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
420                                 !(list_empty(&hdev->pend_le_conns) &&
421                                   list_empty(&hdev->pend_le_reports)) &&
422                                 hci_get_adv_monitor_offload_ext(hdev) ==
423                                     HCI_ADV_MONITOR_EXT_NONE;
424         bool is_interleaving = is_interleave_scanning(hdev);
425
426         if (use_interleaving && !is_interleaving) {
427                 start_interleave_scan(hdev);
428                 bt_dev_dbg(hdev, "starting interleave scan");
429                 return true;
430         }
431
432         if (!use_interleaving && is_interleaving)
433                 cancel_interleave_scan(hdev);
434
435         return false;
436 }
437
438 /* This function controls the background scanning based on hdev->pend_le_conns
439  * list. If there are pending LE connection we start the background scanning,
440  * otherwise we stop it.
441  *
442  * This function requires the caller holds hdev->lock.
443  */
444 static void __hci_update_background_scan(struct hci_request *req)
445 {
446         struct hci_dev *hdev = req->hdev;
447
448         if (!test_bit(HCI_UP, &hdev->flags) ||
449             test_bit(HCI_INIT, &hdev->flags) ||
450             hci_dev_test_flag(hdev, HCI_SETUP) ||
451             hci_dev_test_flag(hdev, HCI_CONFIG) ||
452             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
453             hci_dev_test_flag(hdev, HCI_UNREGISTER))
454                 return;
455
456         /* No point in doing scanning if LE support hasn't been enabled */
457         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
458                 return;
459
460         /* If discovery is active don't interfere with it */
461         if (hdev->discovery.state != DISCOVERY_STOPPED)
462                 return;
463
464         /* Reset RSSI and UUID filters when starting background scanning
465          * since these filters are meant for service discovery only.
466          *
467          * The Start Discovery and Start Service Discovery operations
468          * ensure to set proper values for RSSI threshold and UUID
469          * filter list. So it is safe to just reset them here.
470          */
471         hci_discovery_filter_clear(hdev);
472
473         bt_dev_dbg(hdev, "ADV monitoring is %s",
474                    hci_is_adv_monitoring(hdev) ? "on" : "off");
475
476         if (list_empty(&hdev->pend_le_conns) &&
477             list_empty(&hdev->pend_le_reports) &&
478             !hci_is_adv_monitoring(hdev)) {
479                 /* If there is no pending LE connections or devices
480                  * to be scanned for or no ADV monitors, we should stop the
481                  * background scanning.
482                  */
483
484                 /* If controller is not scanning we are done. */
485                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
486                         return;
487
488                 hci_req_add_le_scan_disable(req, false);
489
490                 bt_dev_dbg(hdev, "stopping background scanning");
491         } else {
492                 /* If there is at least one pending LE connection, we should
493                  * keep the background scan running.
494                  */
495
496                 /* If controller is connecting, we should not start scanning
497                  * since some controllers are not able to scan and connect at
498                  * the same time.
499                  */
500                 if (hci_lookup_le_connect(hdev))
501                         return;
502
503                 /* If controller is currently scanning, we stop it to ensure we
504                  * don't miss any advertising (due to duplicates filter).
505                  */
506                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
507                         hci_req_add_le_scan_disable(req, false);
508
509                 hci_req_add_le_passive_scan(req);
510                 bt_dev_dbg(hdev, "starting background scanning");
511         }
512 }
513
514 void __hci_req_update_name(struct hci_request *req)
515 {
516         struct hci_dev *hdev = req->hdev;
517         struct hci_cp_write_local_name cp;
518
519         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
520
521         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
522 }
523
524 #define PNP_INFO_SVCLASS_ID             0x1200
525
526 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528         u8 *ptr = data, *uuids_start = NULL;
529         struct bt_uuid *uuid;
530
531         if (len < 4)
532                 return ptr;
533
534         list_for_each_entry(uuid, &hdev->uuids, list) {
535                 u16 uuid16;
536
537                 if (uuid->size != 16)
538                         continue;
539
540                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
541                 if (uuid16 < 0x1100)
542                         continue;
543
544                 if (uuid16 == PNP_INFO_SVCLASS_ID)
545                         continue;
546
547                 if (!uuids_start) {
548                         uuids_start = ptr;
549                         uuids_start[0] = 1;
550                         uuids_start[1] = EIR_UUID16_ALL;
551                         ptr += 2;
552                 }
553
554                 /* Stop if not enough space to put next UUID */
555                 if ((ptr - data) + sizeof(u16) > len) {
556                         uuids_start[1] = EIR_UUID16_SOME;
557                         break;
558                 }
559
560                 *ptr++ = (uuid16 & 0x00ff);
561                 *ptr++ = (uuid16 & 0xff00) >> 8;
562                 uuids_start[0] += sizeof(uuid16);
563         }
564
565         return ptr;
566 }
567
568 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
569 {
570         u8 *ptr = data, *uuids_start = NULL;
571         struct bt_uuid *uuid;
572
573         if (len < 6)
574                 return ptr;
575
576         list_for_each_entry(uuid, &hdev->uuids, list) {
577                 if (uuid->size != 32)
578                         continue;
579
580                 if (!uuids_start) {
581                         uuids_start = ptr;
582                         uuids_start[0] = 1;
583                         uuids_start[1] = EIR_UUID32_ALL;
584                         ptr += 2;
585                 }
586
587                 /* Stop if not enough space to put next UUID */
588                 if ((ptr - data) + sizeof(u32) > len) {
589                         uuids_start[1] = EIR_UUID32_SOME;
590                         break;
591                 }
592
593                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
594                 ptr += sizeof(u32);
595                 uuids_start[0] += sizeof(u32);
596         }
597
598         return ptr;
599 }
600
601 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
602 {
603         u8 *ptr = data, *uuids_start = NULL;
604         struct bt_uuid *uuid;
605
606         if (len < 18)
607                 return ptr;
608
609         list_for_each_entry(uuid, &hdev->uuids, list) {
610                 if (uuid->size != 128)
611                         continue;
612
613                 if (!uuids_start) {
614                         uuids_start = ptr;
615                         uuids_start[0] = 1;
616                         uuids_start[1] = EIR_UUID128_ALL;
617                         ptr += 2;
618                 }
619
620                 /* Stop if not enough space to put next UUID */
621                 if ((ptr - data) + 16 > len) {
622                         uuids_start[1] = EIR_UUID128_SOME;
623                         break;
624                 }
625
626                 memcpy(ptr, uuid->uuid, 16);
627                 ptr += 16;
628                 uuids_start[0] += 16;
629         }
630
631         return ptr;
632 }
633
634 static void create_eir(struct hci_dev *hdev, u8 *data)
635 {
636         u8 *ptr = data;
637         size_t name_len;
638
639         name_len = strlen(hdev->dev_name);
640
641         if (name_len > 0) {
642                 /* EIR Data type */
643                 if (name_len > 48) {
644                         name_len = 48;
645                         ptr[1] = EIR_NAME_SHORT;
646                 } else
647                         ptr[1] = EIR_NAME_COMPLETE;
648
649                 /* EIR Data length */
650                 ptr[0] = name_len + 1;
651
652                 memcpy(ptr + 2, hdev->dev_name, name_len);
653
654                 ptr += (name_len + 2);
655         }
656
657         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
658                 ptr[0] = 2;
659                 ptr[1] = EIR_TX_POWER;
660                 ptr[2] = (u8) hdev->inq_tx_power;
661
662                 ptr += 3;
663         }
664
665         if (hdev->devid_source > 0) {
666                 ptr[0] = 9;
667                 ptr[1] = EIR_DEVICE_ID;
668
669                 put_unaligned_le16(hdev->devid_source, ptr + 2);
670                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
671                 put_unaligned_le16(hdev->devid_product, ptr + 6);
672                 put_unaligned_le16(hdev->devid_version, ptr + 8);
673
674                 ptr += 10;
675         }
676
677         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
679         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
680 }
681
682 void __hci_req_update_eir(struct hci_request *req)
683 {
684         struct hci_dev *hdev = req->hdev;
685         struct hci_cp_write_eir cp;
686
687         if (!hdev_is_powered(hdev))
688                 return;
689
690         if (!lmp_ext_inq_capable(hdev))
691                 return;
692
693         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
694                 return;
695
696         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
697                 return;
698
699         memset(&cp, 0, sizeof(cp));
700
701         create_eir(hdev, cp.data);
702
703         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
704                 return;
705
706         memcpy(hdev->eir, cp.data, sizeof(cp.data));
707
708         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
709 }
710
711 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
712 {
713         struct hci_dev *hdev = req->hdev;
714
715         if (hdev->scanning_paused) {
716                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
717                 return;
718         }
719
720         if (hdev->suspended)
721                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
722
723         if (use_ext_scan(hdev)) {
724                 struct hci_cp_le_set_ext_scan_enable cp;
725
726                 memset(&cp, 0, sizeof(cp));
727                 cp.enable = LE_SCAN_DISABLE;
728                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
729                             &cp);
730         } else {
731                 struct hci_cp_le_set_scan_enable cp;
732
733                 memset(&cp, 0, sizeof(cp));
734                 cp.enable = LE_SCAN_DISABLE;
735                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
736         }
737
738         /* Disable address resolution */
739         if (use_ll_privacy(hdev) &&
740             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
741             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
742                 __u8 enable = 0x00;
743
744                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
745         }
746 }
747
748 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
749                                 u8 bdaddr_type)
750 {
751         struct hci_cp_le_del_from_white_list cp;
752
753         cp.bdaddr_type = bdaddr_type;
754         bacpy(&cp.bdaddr, bdaddr);
755
756         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
757                    cp.bdaddr_type);
758         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
759
760         if (use_ll_privacy(req->hdev) &&
761             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
762                 struct smp_irk *irk;
763
764                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
765                 if (irk) {
766                         struct hci_cp_le_del_from_resolv_list cp;
767
768                         cp.bdaddr_type = bdaddr_type;
769                         bacpy(&cp.bdaddr, bdaddr);
770
771                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
772                                     sizeof(cp), &cp);
773                 }
774         }
775 }
776
777 /* Adds connection to white list if needed. On error, returns -1. */
778 static int add_to_white_list(struct hci_request *req,
779                              struct hci_conn_params *params, u8 *num_entries,
780                              bool allow_rpa)
781 {
782         struct hci_cp_le_add_to_white_list cp;
783         struct hci_dev *hdev = req->hdev;
784
785         /* Already in white list */
786         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
787                                    params->addr_type))
788                 return 0;
789
790         /* Select filter policy to accept all advertising */
791         if (*num_entries >= hdev->le_white_list_size)
792                 return -1;
793
794         /* White list can not be used with RPAs */
795         if (!allow_rpa &&
796             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
797             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
798                 return -1;
799         }
800
801         /* During suspend, only wakeable devices can be in whitelist */
802         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
803                                                    params->current_flags))
804                 return 0;
805
806         *num_entries += 1;
807         cp.bdaddr_type = params->addr_type;
808         bacpy(&cp.bdaddr, &params->addr);
809
810         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
811                    cp.bdaddr_type);
812         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
813
814         if (use_ll_privacy(hdev) &&
815             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
816                 struct smp_irk *irk;
817
818                 irk = hci_find_irk_by_addr(hdev, &params->addr,
819                                            params->addr_type);
820                 if (irk) {
821                         struct hci_cp_le_add_to_resolv_list cp;
822
823                         cp.bdaddr_type = params->addr_type;
824                         bacpy(&cp.bdaddr, &params->addr);
825                         memcpy(cp.peer_irk, irk->val, 16);
826
827                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828                                 memcpy(cp.local_irk, hdev->irk, 16);
829                         else
830                                 memset(cp.local_irk, 0, 16);
831
832                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
833                                     sizeof(cp), &cp);
834                 }
835         }
836
837         return 0;
838 }
839
840 static u8 update_white_list(struct hci_request *req)
841 {
842         struct hci_dev *hdev = req->hdev;
843         struct hci_conn_params *params;
844         struct bdaddr_list *b;
845         u8 num_entries = 0;
846         bool pend_conn, pend_report;
847         /* We allow whitelisting even with RPAs in suspend. In the worst case,
848          * we won't be able to wake from devices that use the privacy1.2
849          * features. Additionally, once we support privacy1.2 and IRK
850          * offloading, we can update this to also check for those conditions.
851          */
852         bool allow_rpa = hdev->suspended;
853
854         /* Go through the current white list programmed into the
855          * controller one by one and check if that address is still
856          * in the list of pending connections or list of devices to
857          * report. If not present in either list, then queue the
858          * command to remove it from the controller.
859          */
860         list_for_each_entry(b, &hdev->le_white_list, list) {
861                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
862                                                       &b->bdaddr,
863                                                       b->bdaddr_type);
864                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
865                                                         &b->bdaddr,
866                                                         b->bdaddr_type);
867
868                 /* If the device is not likely to connect or report,
869                  * remove it from the whitelist.
870                  */
871                 if (!pend_conn && !pend_report) {
872                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
873                         continue;
874                 }
875
876                 /* White list can not be used with RPAs */
877                 if (!allow_rpa &&
878                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
879                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
880                         return 0x00;
881                 }
882
883                 num_entries++;
884         }
885
886         /* Since all no longer valid white list entries have been
887          * removed, walk through the list of pending connections
888          * and ensure that any new device gets programmed into
889          * the controller.
890          *
891          * If the list of the devices is larger than the list of
892          * available white list entries in the controller, then
893          * just abort and return filer policy value to not use the
894          * white list.
895          */
896         list_for_each_entry(params, &hdev->pend_le_conns, action) {
897                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
898                         return 0x00;
899         }
900
901         /* After adding all new pending connections, walk through
902          * the list of pending reports and also add these to the
903          * white list if there is still space. Abort if space runs out.
904          */
905         list_for_each_entry(params, &hdev->pend_le_reports, action) {
906                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
907                         return 0x00;
908         }
909
910         /* Use the allowlist unless the following conditions are all true:
911          * - We are not currently suspending
912          * - There are 1 or more ADV monitors registered and it's not offloaded
913          * - Interleaved scanning is not currently using the allowlist
914          */
915         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
916             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
917             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
918                 return 0x00;
919
920         /* Select filter policy to use white list */
921         return 0x01;
922 }
923
924 static bool scan_use_rpa(struct hci_dev *hdev)
925 {
926         return hci_dev_test_flag(hdev, HCI_PRIVACY);
927 }
928
929 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
930                                u16 window, u8 own_addr_type, u8 filter_policy,
931                                bool addr_resolv)
932 {
933         struct hci_dev *hdev = req->hdev;
934
935         if (hdev->scanning_paused) {
936                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
937                 return;
938         }
939
940         if (use_ll_privacy(hdev) &&
941             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
942             addr_resolv) {
943                 u8 enable = 0x01;
944
945                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
946         }
947
948         /* Use ext scanning if set ext scan param and ext scan enable is
949          * supported
950          */
951         if (use_ext_scan(hdev)) {
952                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
953                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
954                 struct hci_cp_le_scan_phy_params *phy_params;
955                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
956                 u32 plen;
957
958                 ext_param_cp = (void *)data;
959                 phy_params = (void *)ext_param_cp->data;
960
961                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
962                 ext_param_cp->own_addr_type = own_addr_type;
963                 ext_param_cp->filter_policy = filter_policy;
964
965                 plen = sizeof(*ext_param_cp);
966
967                 if (scan_1m(hdev) || scan_2m(hdev)) {
968                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
969
970                         memset(phy_params, 0, sizeof(*phy_params));
971                         phy_params->type = type;
972                         phy_params->interval = cpu_to_le16(interval);
973                         phy_params->window = cpu_to_le16(window);
974
975                         plen += sizeof(*phy_params);
976                         phy_params++;
977                 }
978
979                 if (scan_coded(hdev)) {
980                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
981
982                         memset(phy_params, 0, sizeof(*phy_params));
983                         phy_params->type = type;
984                         phy_params->interval = cpu_to_le16(interval);
985                         phy_params->window = cpu_to_le16(window);
986
987                         plen += sizeof(*phy_params);
988                         phy_params++;
989                 }
990
991                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
992                             plen, ext_param_cp);
993
994                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
995                 ext_enable_cp.enable = LE_SCAN_ENABLE;
996                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
997
998                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
999                             sizeof(ext_enable_cp), &ext_enable_cp);
1000         } else {
1001                 struct hci_cp_le_set_scan_param param_cp;
1002                 struct hci_cp_le_set_scan_enable enable_cp;
1003
1004                 memset(&param_cp, 0, sizeof(param_cp));
1005                 param_cp.type = type;
1006                 param_cp.interval = cpu_to_le16(interval);
1007                 param_cp.window = cpu_to_le16(window);
1008                 param_cp.own_address_type = own_addr_type;
1009                 param_cp.filter_policy = filter_policy;
1010                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1011                             &param_cp);
1012
1013                 memset(&enable_cp, 0, sizeof(enable_cp));
1014                 enable_cp.enable = LE_SCAN_ENABLE;
1015                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1016                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1017                             &enable_cp);
1018         }
1019 }
1020
1021 /* Returns true if an le connection is in the scanning state */
1022 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1023 {
1024         struct hci_conn_hash *h = &hdev->conn_hash;
1025         struct hci_conn  *c;
1026
1027         rcu_read_lock();
1028
1029         list_for_each_entry_rcu(c, &h->list, list) {
1030                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1031                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
1032                         rcu_read_unlock();
1033                         return true;
1034                 }
1035         }
1036
1037         rcu_read_unlock();
1038
1039         return false;
1040 }
1041
1042 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1043  * controller based address resolution to be able to reconfigure
1044  * resolving list.
1045  */
1046 void hci_req_add_le_passive_scan(struct hci_request *req)
1047 {
1048         struct hci_dev *hdev = req->hdev;
1049         u8 own_addr_type;
1050         u8 filter_policy;
1051         u16 window, interval;
1052         /* Background scanning should run with address resolution */
1053         bool addr_resolv = true;
1054
1055         if (hdev->scanning_paused) {
1056                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1057                 return;
1058         }
1059
1060         /* Set require_privacy to false since no SCAN_REQ are send
1061          * during passive scanning. Not using an non-resolvable address
1062          * here is important so that peer devices using direct
1063          * advertising with our address will be correctly reported
1064          * by the controller.
1065          */
1066         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1067                                       &own_addr_type))
1068                 return;
1069
1070         if (hdev->enable_advmon_interleave_scan &&
1071             __hci_update_interleaved_scan(hdev))
1072                 return;
1073
1074         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1075         /* Adding or removing entries from the white list must
1076          * happen before enabling scanning. The controller does
1077          * not allow white list modification while scanning.
1078          */
1079         filter_policy = update_white_list(req);
1080
1081         /* When the controller is using random resolvable addresses and
1082          * with that having LE privacy enabled, then controllers with
1083          * Extended Scanner Filter Policies support can now enable support
1084          * for handling directed advertising.
1085          *
1086          * So instead of using filter polices 0x00 (no whitelist)
1087          * and 0x01 (whitelist enabled) use the new filter policies
1088          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1089          */
1090         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1091             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1092                 filter_policy |= 0x02;
1093
1094         if (hdev->suspended) {
1095                 window = hdev->le_scan_window_suspend;
1096                 interval = hdev->le_scan_int_suspend;
1097
1098                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1099         } else if (hci_is_le_conn_scanning(hdev)) {
1100                 window = hdev->le_scan_window_connect;
1101                 interval = hdev->le_scan_int_connect;
1102         } else if (hci_is_adv_monitoring(hdev)) {
1103                 window = hdev->le_scan_window_adv_monitor;
1104                 interval = hdev->le_scan_int_adv_monitor;
1105         } else {
1106                 window = hdev->le_scan_window;
1107                 interval = hdev->le_scan_interval;
1108         }
1109
1110         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1111         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1112                            own_addr_type, filter_policy, addr_resolv);
1113 }
1114
1115 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1116 {
1117         struct adv_info *adv_instance;
1118
1119         /* Instance 0x00 always set local name */
1120         if (instance == 0x00)
1121                 return true;
1122
1123         adv_instance = hci_find_adv_instance(hdev, instance);
1124         if (!adv_instance)
1125                 return false;
1126
1127         if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1128             adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1129                 return true;
1130
1131         return adv_instance->scan_rsp_len ? true : false;
1132 }
1133
1134 static void hci_req_clear_event_filter(struct hci_request *req)
1135 {
1136         struct hci_cp_set_event_filter f;
1137
1138         memset(&f, 0, sizeof(f));
1139         f.flt_type = HCI_FLT_CLEAR_ALL;
1140         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1141
1142         /* Update page scan state (since we may have modified it when setting
1143          * the event filter).
1144          */
1145         __hci_req_update_scan(req);
1146 }
1147
1148 static void hci_req_set_event_filter(struct hci_request *req)
1149 {
1150         struct bdaddr_list_with_flags *b;
1151         struct hci_cp_set_event_filter f;
1152         struct hci_dev *hdev = req->hdev;
1153         u8 scan = SCAN_DISABLED;
1154
1155         /* Always clear event filter when starting */
1156         hci_req_clear_event_filter(req);
1157
1158         list_for_each_entry(b, &hdev->whitelist, list) {
1159                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1160                                         b->current_flags))
1161                         continue;
1162
1163                 memset(&f, 0, sizeof(f));
1164                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1165                 f.flt_type = HCI_FLT_CONN_SETUP;
1166                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1167                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1168
1169                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1170                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1171                 scan = SCAN_PAGE;
1172         }
1173
1174         if (scan)
1175                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1176         else
1177                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1178
1179         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 }
1181
1182 static void cancel_adv_timeout(struct hci_dev *hdev)
1183 {
1184         if (hdev->adv_instance_timeout) {
1185                 hdev->adv_instance_timeout = 0;
1186                 cancel_delayed_work(&hdev->adv_instance_expire);
1187         }
1188 }
1189
1190 /* This function requires the caller holds hdev->lock */
1191 void __hci_req_pause_adv_instances(struct hci_request *req)
1192 {
1193         bt_dev_dbg(req->hdev, "Pausing advertising instances");
1194
1195         /* Call to disable any advertisements active on the controller.
1196          * This will succeed even if no advertisements are configured.
1197          */
1198         __hci_req_disable_advertising(req);
1199
1200         /* If we are using software rotation, pause the loop */
1201         if (!ext_adv_capable(req->hdev))
1202                 cancel_adv_timeout(req->hdev);
1203 }
1204
1205 /* This function requires the caller holds hdev->lock */
1206 static void __hci_req_resume_adv_instances(struct hci_request *req)
1207 {
1208         struct adv_info *adv;
1209
1210         bt_dev_dbg(req->hdev, "Resuming advertising instances");
1211
1212         if (ext_adv_capable(req->hdev)) {
1213                 /* Call for each tracked instance to be re-enabled */
1214                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1215                         __hci_req_enable_ext_advertising(req,
1216                                                          adv->instance);
1217                 }
1218
1219         } else {
1220                 /* Schedule for most recent instance to be restarted and begin
1221                  * the software rotation loop
1222                  */
1223                 __hci_req_schedule_adv_instance(req,
1224                                                 req->hdev->cur_adv_instance,
1225                                                 true);
1226         }
1227 }
1228
1229 /* This function requires the caller holds hdev->lock */
1230 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1231 {
1232         struct hci_request req;
1233
1234         hci_req_init(&req, hdev);
1235         __hci_req_resume_adv_instances(&req);
1236
1237         return hci_req_run(&req, NULL);
1238 }
1239
1240 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1241 {
1242         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1243                    status);
1244         if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1245             test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1246                 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1247                 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1248                 wake_up(&hdev->suspend_wait_q);
1249         }
1250
1251         if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1252                 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1253                 wake_up(&hdev->suspend_wait_q);
1254         }
1255 }
1256
1257 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1258                                               bool enable)
1259 {
1260         struct hci_dev *hdev = req->hdev;
1261
1262         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1263         case HCI_ADV_MONITOR_EXT_MSFT:
1264                 msft_req_add_set_filter_enable(req, enable);
1265                 break;
1266         default:
1267                 return;
1268         }
1269
1270         /* No need to block when enabling since it's on resume path */
1271         if (hdev->suspended && !enable)
1272                 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1273 }
1274
1275 /* Call with hci_dev_lock */
1276 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1277 {
1278         int old_state;
1279         struct hci_conn *conn;
1280         struct hci_request req;
1281         u8 page_scan;
1282         int disconnect_counter;
1283
1284         if (next == hdev->suspend_state) {
1285                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1286                 goto done;
1287         }
1288
1289         hdev->suspend_state = next;
1290         hci_req_init(&req, hdev);
1291
1292         if (next == BT_SUSPEND_DISCONNECT) {
1293                 /* Mark device as suspended */
1294                 hdev->suspended = true;
1295
1296                 /* Pause discovery if not already stopped */
1297                 old_state = hdev->discovery.state;
1298                 if (old_state != DISCOVERY_STOPPED) {
1299                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1300                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1301                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1302                 }
1303
1304                 hdev->discovery_paused = true;
1305                 hdev->discovery_old_state = old_state;
1306
1307                 /* Stop directed advertising */
1308                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1309                 if (old_state) {
1310                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1311                         cancel_delayed_work(&hdev->discov_off);
1312                         queue_delayed_work(hdev->req_workqueue,
1313                                            &hdev->discov_off, 0);
1314                 }
1315
1316                 /* Pause other advertisements */
1317                 if (hdev->adv_instance_cnt)
1318                         __hci_req_pause_adv_instances(&req);
1319
1320                 hdev->advertising_paused = true;
1321                 hdev->advertising_old_state = old_state;
1322                 /* Disable page scan */
1323                 page_scan = SCAN_DISABLED;
1324                 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1325
1326                 /* Disable LE passive scan if enabled */
1327                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1328                         cancel_interleave_scan(hdev);
1329                         hci_req_add_le_scan_disable(&req, false);
1330                 }
1331
1332                 /* Disable advertisement filters */
1333                 hci_req_add_set_adv_filter_enable(&req, false);
1334
1335                 /* Mark task needing completion */
1336                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1337
1338                 /* Prevent disconnects from causing scanning to be re-enabled */
1339                 hdev->scanning_paused = true;
1340
1341                 /* Run commands before disconnecting */
1342                 hci_req_run(&req, suspend_req_complete);
1343
1344                 disconnect_counter = 0;
1345                 /* Soft disconnect everything (power off) */
1346                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1347                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1348                         disconnect_counter++;
1349                 }
1350
1351                 if (disconnect_counter > 0) {
1352                         bt_dev_dbg(hdev,
1353                                    "Had %d disconnects. Will wait on them",
1354                                    disconnect_counter);
1355                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1356                 }
1357         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1358                 /* Unpause to take care of updating scanning params */
1359                 hdev->scanning_paused = false;
1360                 /* Enable event filter for paired devices */
1361                 hci_req_set_event_filter(&req);
1362                 /* Enable passive scan at lower duty cycle */
1363                 __hci_update_background_scan(&req);
1364                 /* Pause scan changes again. */
1365                 hdev->scanning_paused = true;
1366                 hci_req_run(&req, suspend_req_complete);
1367         } else {
1368                 hdev->suspended = false;
1369                 hdev->scanning_paused = false;
1370
1371                 hci_req_clear_event_filter(&req);
1372                 /* Reset passive/background scanning to normal */
1373                 __hci_update_background_scan(&req);
1374                 /* Enable all of the advertisement filters */
1375                 hci_req_add_set_adv_filter_enable(&req, true);
1376
1377                 /* Unpause directed advertising */
1378                 hdev->advertising_paused = false;
1379                 if (hdev->advertising_old_state) {
1380                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1381                                 hdev->suspend_tasks);
1382                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1383                         queue_work(hdev->req_workqueue,
1384                                    &hdev->discoverable_update);
1385                         hdev->advertising_old_state = 0;
1386                 }
1387
1388                 /* Resume other advertisements */
1389                 if (hdev->adv_instance_cnt)
1390                         __hci_req_resume_adv_instances(&req);
1391
1392                 /* Unpause discovery */
1393                 hdev->discovery_paused = false;
1394                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1395                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1396                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1397                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1398                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1399                 }
1400
1401                 hci_req_run(&req, suspend_req_complete);
1402         }
1403
1404         hdev->suspend_state = next;
1405
1406 done:
1407         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1408         wake_up(&hdev->suspend_wait_q);
1409 }
1410
1411 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1412 {
1413         return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1414 }
1415
1416 void __hci_req_disable_advertising(struct hci_request *req)
1417 {
1418         if (ext_adv_capable(req->hdev)) {
1419                 __hci_req_disable_ext_adv_instance(req, 0x00);
1420
1421         } else {
1422                 u8 enable = 0x00;
1423
1424                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1425         }
1426 }
1427
1428 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1429 {
1430         u32 flags;
1431         struct adv_info *adv_instance;
1432
1433         if (instance == 0x00) {
1434                 /* Instance 0 always manages the "Tx Power" and "Flags"
1435                  * fields
1436                  */
1437                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1438
1439                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1440                  * corresponds to the "connectable" instance flag.
1441                  */
1442                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1443                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1444
1445                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1446                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1447                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1448                         flags |= MGMT_ADV_FLAG_DISCOV;
1449
1450                 return flags;
1451         }
1452
1453         adv_instance = hci_find_adv_instance(hdev, instance);
1454
1455         /* Return 0 when we got an invalid instance identifier. */
1456         if (!adv_instance)
1457                 return 0;
1458
1459         return adv_instance->flags;
1460 }
1461
1462 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1463 {
1464         /* If privacy is not enabled don't use RPA */
1465         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1466                 return false;
1467
1468         /* If basic privacy mode is enabled use RPA */
1469         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1470                 return true;
1471
1472         /* If limited privacy mode is enabled don't use RPA if we're
1473          * both discoverable and bondable.
1474          */
1475         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1476             hci_dev_test_flag(hdev, HCI_BONDABLE))
1477                 return false;
1478
1479         /* We're neither bondable nor discoverable in the limited
1480          * privacy mode, therefore use RPA.
1481          */
1482         return true;
1483 }
1484
1485 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1486 {
1487         /* If there is no connection we are OK to advertise. */
1488         if (hci_conn_num(hdev, LE_LINK) == 0)
1489                 return true;
1490
1491         /* Check le_states if there is any connection in slave role. */
1492         if (hdev->conn_hash.le_num_slave > 0) {
1493                 /* Slave connection state and non connectable mode bit 20. */
1494                 if (!connectable && !(hdev->le_states[2] & 0x10))
1495                         return false;
1496
1497                 /* Slave connection state and connectable mode bit 38
1498                  * and scannable bit 21.
1499                  */
1500                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1501                                     !(hdev->le_states[2] & 0x20)))
1502                         return false;
1503         }
1504
1505         /* Check le_states if there is any connection in master role. */
1506         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1507                 /* Master connection state and non connectable mode bit 18. */
1508                 if (!connectable && !(hdev->le_states[2] & 0x02))
1509                         return false;
1510
1511                 /* Master connection state and connectable mode bit 35 and
1512                  * scannable 19.
1513                  */
1514                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1515                                     !(hdev->le_states[2] & 0x08)))
1516                         return false;
1517         }
1518
1519         return true;
1520 }
1521
1522 void __hci_req_enable_advertising(struct hci_request *req)
1523 {
1524         struct hci_dev *hdev = req->hdev;
1525         struct adv_info *adv_instance;
1526         struct hci_cp_le_set_adv_param cp;
1527         u8 own_addr_type, enable = 0x01;
1528         bool connectable;
1529         u16 adv_min_interval, adv_max_interval;
1530         u32 flags;
1531
1532         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1533         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1534
1535         /* If the "connectable" instance flag was not set, then choose between
1536          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1537          */
1538         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1539                       mgmt_get_connectable(hdev);
1540
1541         if (!is_advertising_allowed(hdev, connectable))
1542                 return;
1543
1544         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1545                 __hci_req_disable_advertising(req);
1546
1547         /* Clear the HCI_LE_ADV bit temporarily so that the
1548          * hci_update_random_address knows that it's safe to go ahead
1549          * and write a new random address. The flag will be set back on
1550          * as soon as the SET_ADV_ENABLE HCI command completes.
1551          */
1552         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1553
1554         /* Set require_privacy to true only when non-connectable
1555          * advertising is used. In that case it is fine to use a
1556          * non-resolvable private address.
1557          */
1558         if (hci_update_random_address(req, !connectable,
1559                                       adv_use_rpa(hdev, flags),
1560                                       &own_addr_type) < 0)
1561                 return;
1562
1563         memset(&cp, 0, sizeof(cp));
1564
1565         if (adv_instance) {
1566                 adv_min_interval = adv_instance->min_interval;
1567                 adv_max_interval = adv_instance->max_interval;
1568         } else {
1569                 adv_min_interval = hdev->le_adv_min_interval;
1570                 adv_max_interval = hdev->le_adv_max_interval;
1571         }
1572
1573         if (connectable) {
1574                 cp.type = LE_ADV_IND;
1575         } else {
1576                 if (adv_cur_instance_is_scannable(hdev))
1577                         cp.type = LE_ADV_SCAN_IND;
1578                 else
1579                         cp.type = LE_ADV_NONCONN_IND;
1580
1581                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1582                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1583                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1584                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1585                 }
1586         }
1587
1588         cp.min_interval = cpu_to_le16(adv_min_interval);
1589         cp.max_interval = cpu_to_le16(adv_max_interval);
1590         cp.own_address_type = own_addr_type;
1591         cp.channel_map = hdev->le_adv_channel_map;
1592
1593         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1594
1595         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1596 }
1597
1598 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1599 {
1600         size_t short_len;
1601         size_t complete_len;
1602
1603         /* no space left for name (+ NULL + type + len) */
1604         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1605                 return ad_len;
1606
1607         /* use complete name if present and fits */
1608         complete_len = strlen(hdev->dev_name);
1609         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1610                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1611                                        hdev->dev_name, complete_len + 1);
1612
1613         /* use short name if present */
1614         short_len = strlen(hdev->short_name);
1615         if (short_len)
1616                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1617                                        hdev->short_name, short_len + 1);
1618
1619         /* use shortened full name if present, we already know that name
1620          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1621          */
1622         if (complete_len) {
1623                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1624
1625                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1626                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1627
1628                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1629                                        sizeof(name));
1630         }
1631
1632         return ad_len;
1633 }
1634
1635 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1636 {
1637         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1638 }
1639
1640 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1641 {
1642         u8 scan_rsp_len = 0;
1643
1644         if (hdev->appearance) {
1645                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1646         }
1647
1648         return append_local_name(hdev, ptr, scan_rsp_len);
1649 }
1650
1651 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1652                                         u8 *ptr)
1653 {
1654         struct adv_info *adv_instance;
1655         u32 instance_flags;
1656         u8 scan_rsp_len = 0;
1657
1658         adv_instance = hci_find_adv_instance(hdev, instance);
1659         if (!adv_instance)
1660                 return 0;
1661
1662         instance_flags = adv_instance->flags;
1663
1664         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1665                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1666         }
1667
1668         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1669                adv_instance->scan_rsp_len);
1670
1671         scan_rsp_len += adv_instance->scan_rsp_len;
1672
1673         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1674                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1675
1676         return scan_rsp_len;
1677 }
1678
1679 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1680 {
1681         struct hci_dev *hdev = req->hdev;
1682         u8 len;
1683
1684         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1685                 return;
1686
1687         if (ext_adv_capable(hdev)) {
1688                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1689
1690                 memset(&cp, 0, sizeof(cp));
1691
1692                 if (instance)
1693                         len = create_instance_scan_rsp_data(hdev, instance,
1694                                                             cp.data);
1695                 else
1696                         len = create_default_scan_rsp_data(hdev, cp.data);
1697
1698                 if (hdev->scan_rsp_data_len == len &&
1699                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1700                         return;
1701
1702                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1703                 hdev->scan_rsp_data_len = len;
1704
1705                 cp.handle = instance;
1706                 cp.length = len;
1707                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1708                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1709
1710                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1711                             &cp);
1712         } else {
1713                 struct hci_cp_le_set_scan_rsp_data cp;
1714
1715                 memset(&cp, 0, sizeof(cp));
1716
1717                 if (instance)
1718                         len = create_instance_scan_rsp_data(hdev, instance,
1719                                                             cp.data);
1720                 else
1721                         len = create_default_scan_rsp_data(hdev, cp.data);
1722
1723                 if (hdev->scan_rsp_data_len == len &&
1724                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1725                         return;
1726
1727                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1728                 hdev->scan_rsp_data_len = len;
1729
1730                 cp.length = len;
1731
1732                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1733         }
1734 }
1735
1736 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1737 {
1738         struct adv_info *adv_instance = NULL;
1739         u8 ad_len = 0, flags = 0;
1740         u32 instance_flags;
1741
1742         /* Return 0 when the current instance identifier is invalid. */
1743         if (instance) {
1744                 adv_instance = hci_find_adv_instance(hdev, instance);
1745                 if (!adv_instance)
1746                         return 0;
1747         }
1748
1749         instance_flags = get_adv_instance_flags(hdev, instance);
1750
1751         /* If instance already has the flags set skip adding it once
1752          * again.
1753          */
1754         if (adv_instance && eir_get_data(adv_instance->adv_data,
1755                                          adv_instance->adv_data_len, EIR_FLAGS,
1756                                          NULL))
1757                 goto skip_flags;
1758
1759         /* The Add Advertising command allows userspace to set both the general
1760          * and limited discoverable flags.
1761          */
1762         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1763                 flags |= LE_AD_GENERAL;
1764
1765         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1766                 flags |= LE_AD_LIMITED;
1767
1768         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1769                 flags |= LE_AD_NO_BREDR;
1770
1771         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1772                 /* If a discovery flag wasn't provided, simply use the global
1773                  * settings.
1774                  */
1775                 if (!flags)
1776                         flags |= mgmt_get_adv_discov_flags(hdev);
1777
1778                 /* If flags would still be empty, then there is no need to
1779                  * include the "Flags" AD field".
1780                  */
1781                 if (flags) {
1782                         ptr[0] = 0x02;
1783                         ptr[1] = EIR_FLAGS;
1784                         ptr[2] = flags;
1785
1786                         ad_len += 3;
1787                         ptr += 3;
1788                 }
1789         }
1790
1791 skip_flags:
1792         if (adv_instance) {
1793                 memcpy(ptr, adv_instance->adv_data,
1794                        adv_instance->adv_data_len);
1795                 ad_len += adv_instance->adv_data_len;
1796                 ptr += adv_instance->adv_data_len;
1797         }
1798
1799         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1800                 s8 adv_tx_power;
1801
1802                 if (ext_adv_capable(hdev)) {
1803                         if (adv_instance)
1804                                 adv_tx_power = adv_instance->tx_power;
1805                         else
1806                                 adv_tx_power = hdev->adv_tx_power;
1807                 } else {
1808                         adv_tx_power = hdev->adv_tx_power;
1809                 }
1810
1811                 /* Provide Tx Power only if we can provide a valid value for it */
1812                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1813                         ptr[0] = 0x02;
1814                         ptr[1] = EIR_TX_POWER;
1815                         ptr[2] = (u8)adv_tx_power;
1816
1817                         ad_len += 3;
1818                         ptr += 3;
1819                 }
1820         }
1821
1822         return ad_len;
1823 }
1824
1825 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1826 {
1827         struct hci_dev *hdev = req->hdev;
1828         u8 len;
1829
1830         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1831                 return;
1832
1833         if (ext_adv_capable(hdev)) {
1834                 struct hci_cp_le_set_ext_adv_data cp;
1835
1836                 memset(&cp, 0, sizeof(cp));
1837
1838                 len = create_instance_adv_data(hdev, instance, cp.data);
1839
1840                 /* There's nothing to do if the data hasn't changed */
1841                 if (hdev->adv_data_len == len &&
1842                     memcmp(cp.data, hdev->adv_data, len) == 0)
1843                         return;
1844
1845                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1846                 hdev->adv_data_len = len;
1847
1848                 cp.length = len;
1849                 cp.handle = instance;
1850                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1851                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1852
1853                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1854         } else {
1855                 struct hci_cp_le_set_adv_data cp;
1856
1857                 memset(&cp, 0, sizeof(cp));
1858
1859                 len = create_instance_adv_data(hdev, instance, cp.data);
1860
1861                 /* There's nothing to do if the data hasn't changed */
1862                 if (hdev->adv_data_len == len &&
1863                     memcmp(cp.data, hdev->adv_data, len) == 0)
1864                         return;
1865
1866                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1867                 hdev->adv_data_len = len;
1868
1869                 cp.length = len;
1870
1871                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1872         }
1873 }
1874
1875 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1876 {
1877         struct hci_request req;
1878
1879         hci_req_init(&req, hdev);
1880         __hci_req_update_adv_data(&req, instance);
1881
1882         return hci_req_run(&req, NULL);
1883 }
1884
1885 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1886                                             u16 opcode)
1887 {
1888         BT_DBG("%s status %u", hdev->name, status);
1889 }
1890
1891 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1892 {
1893         struct hci_request req;
1894         __u8 enable = 0x00;
1895
1896         if (!use_ll_privacy(hdev) &&
1897             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1898                 return;
1899
1900         hci_req_init(&req, hdev);
1901
1902         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1903
1904         hci_req_run(&req, enable_addr_resolution_complete);
1905 }
1906
1907 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1908 {
1909         bt_dev_dbg(hdev, "status %u", status);
1910 }
1911
1912 void hci_req_reenable_advertising(struct hci_dev *hdev)
1913 {
1914         struct hci_request req;
1915
1916         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1917             list_empty(&hdev->adv_instances))
1918                 return;
1919
1920         hci_req_init(&req, hdev);
1921
1922         if (hdev->cur_adv_instance) {
1923                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1924                                                 true);
1925         } else {
1926                 if (ext_adv_capable(hdev)) {
1927                         __hci_req_start_ext_adv(&req, 0x00);
1928                 } else {
1929                         __hci_req_update_adv_data(&req, 0x00);
1930                         __hci_req_update_scan_rsp_data(&req, 0x00);
1931                         __hci_req_enable_advertising(&req);
1932                 }
1933         }
1934
1935         hci_req_run(&req, adv_enable_complete);
1936 }
1937
1938 static void adv_timeout_expire(struct work_struct *work)
1939 {
1940         struct hci_dev *hdev = container_of(work, struct hci_dev,
1941                                             adv_instance_expire.work);
1942
1943         struct hci_request req;
1944         u8 instance;
1945
1946         bt_dev_dbg(hdev, "");
1947
1948         hci_dev_lock(hdev);
1949
1950         hdev->adv_instance_timeout = 0;
1951
1952         instance = hdev->cur_adv_instance;
1953         if (instance == 0x00)
1954                 goto unlock;
1955
1956         hci_req_init(&req, hdev);
1957
1958         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1959
1960         if (list_empty(&hdev->adv_instances))
1961                 __hci_req_disable_advertising(&req);
1962
1963         hci_req_run(&req, NULL);
1964
1965 unlock:
1966         hci_dev_unlock(hdev);
1967 }
1968
1969 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1970                                            unsigned long opt)
1971 {
1972         struct hci_dev *hdev = req->hdev;
1973         int ret = 0;
1974
1975         hci_dev_lock(hdev);
1976
1977         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1978                 hci_req_add_le_scan_disable(req, false);
1979         hci_req_add_le_passive_scan(req);
1980
1981         switch (hdev->interleave_scan_state) {
1982         case INTERLEAVE_SCAN_ALLOWLIST:
1983                 bt_dev_dbg(hdev, "next state: allowlist");
1984                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1985                 break;
1986         case INTERLEAVE_SCAN_NO_FILTER:
1987                 bt_dev_dbg(hdev, "next state: no filter");
1988                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1989                 break;
1990         case INTERLEAVE_SCAN_NONE:
1991                 BT_ERR("unexpected error");
1992                 ret = -1;
1993         }
1994
1995         hci_dev_unlock(hdev);
1996
1997         return ret;
1998 }
1999
2000 static void interleave_scan_work(struct work_struct *work)
2001 {
2002         struct hci_dev *hdev = container_of(work, struct hci_dev,
2003                                             interleave_scan.work);
2004         u8 status;
2005         unsigned long timeout;
2006
2007         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2008                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2009         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2010                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2011         } else {
2012                 bt_dev_err(hdev, "unexpected error");
2013                 return;
2014         }
2015
2016         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2017                      HCI_CMD_TIMEOUT, &status);
2018
2019         /* Don't continue interleaving if it was canceled */
2020         if (is_interleave_scanning(hdev))
2021                 queue_delayed_work(hdev->req_workqueue,
2022                                    &hdev->interleave_scan, timeout);
2023 }
2024
2025 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2026                            bool use_rpa, struct adv_info *adv_instance,
2027                            u8 *own_addr_type, bdaddr_t *rand_addr)
2028 {
2029         int err;
2030
2031         bacpy(rand_addr, BDADDR_ANY);
2032
2033         /* If privacy is enabled use a resolvable private address. If
2034          * current RPA has expired then generate a new one.
2035          */
2036         if (use_rpa) {
2037                 int to;
2038
2039                 /* If Controller supports LL Privacy use own address type is
2040                  * 0x03
2041                  */
2042                 if (use_ll_privacy(hdev))
2043                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2044                 else
2045                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2046
2047                 if (adv_instance) {
2048                         if (!adv_instance->rpa_expired &&
2049                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
2050                                 return 0;
2051
2052                         adv_instance->rpa_expired = false;
2053                 } else {
2054                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2055                             !bacmp(&hdev->random_addr, &hdev->rpa))
2056                                 return 0;
2057                 }
2058
2059                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2060                 if (err < 0) {
2061                         bt_dev_err(hdev, "failed to generate new RPA");
2062                         return err;
2063                 }
2064
2065                 bacpy(rand_addr, &hdev->rpa);
2066
2067                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2068                 if (adv_instance)
2069                         queue_delayed_work(hdev->workqueue,
2070                                            &adv_instance->rpa_expired_cb, to);
2071                 else
2072                         queue_delayed_work(hdev->workqueue,
2073                                            &hdev->rpa_expired, to);
2074
2075                 return 0;
2076         }
2077
2078         /* In case of required privacy without resolvable private address,
2079          * use an non-resolvable private address. This is useful for
2080          * non-connectable advertising.
2081          */
2082         if (require_privacy) {
2083                 bdaddr_t nrpa;
2084
2085                 while (true) {
2086                         /* The non-resolvable private address is generated
2087                          * from random six bytes with the two most significant
2088                          * bits cleared.
2089                          */
2090                         get_random_bytes(&nrpa, 6);
2091                         nrpa.b[5] &= 0x3f;
2092
2093                         /* The non-resolvable private address shall not be
2094                          * equal to the public address.
2095                          */
2096                         if (bacmp(&hdev->bdaddr, &nrpa))
2097                                 break;
2098                 }
2099
2100                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2101                 bacpy(rand_addr, &nrpa);
2102
2103                 return 0;
2104         }
2105
2106         /* No privacy so use a public address. */
2107         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2108
2109         return 0;
2110 }
2111
2112 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2113 {
2114         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2115 }
2116
2117 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2118 {
2119         struct hci_cp_le_set_ext_adv_params cp;
2120         struct hci_dev *hdev = req->hdev;
2121         bool connectable;
2122         u32 flags;
2123         bdaddr_t random_addr;
2124         u8 own_addr_type;
2125         int err;
2126         struct adv_info *adv_instance;
2127         bool secondary_adv;
2128
2129         if (instance > 0) {
2130                 adv_instance = hci_find_adv_instance(hdev, instance);
2131                 if (!adv_instance)
2132                         return -EINVAL;
2133         } else {
2134                 adv_instance = NULL;
2135         }
2136
2137         flags = get_adv_instance_flags(hdev, instance);
2138
2139         /* If the "connectable" instance flag was not set, then choose between
2140          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2141          */
2142         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2143                       mgmt_get_connectable(hdev);
2144
2145         if (!is_advertising_allowed(hdev, connectable))
2146                 return -EPERM;
2147
2148         /* Set require_privacy to true only when non-connectable
2149          * advertising is used. In that case it is fine to use a
2150          * non-resolvable private address.
2151          */
2152         err = hci_get_random_address(hdev, !connectable,
2153                                      adv_use_rpa(hdev, flags), adv_instance,
2154                                      &own_addr_type, &random_addr);
2155         if (err < 0)
2156                 return err;
2157
2158         memset(&cp, 0, sizeof(cp));
2159
2160         if (adv_instance) {
2161                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2162                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2163                 cp.tx_power = adv_instance->tx_power;
2164         } else {
2165                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2166                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2167                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2168         }
2169
2170         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2171
2172         if (connectable) {
2173                 if (secondary_adv)
2174                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2175                 else
2176                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2177         } else if (adv_instance_is_scannable(hdev, instance)) {
2178                 if (secondary_adv)
2179                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2180                 else
2181                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2182         } else {
2183                 if (secondary_adv)
2184                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2185                 else
2186                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2187         }
2188
2189         cp.own_addr_type = own_addr_type;
2190         cp.channel_map = hdev->le_adv_channel_map;
2191         cp.handle = instance;
2192
2193         if (flags & MGMT_ADV_FLAG_SEC_2M) {
2194                 cp.primary_phy = HCI_ADV_PHY_1M;
2195                 cp.secondary_phy = HCI_ADV_PHY_2M;
2196         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2197                 cp.primary_phy = HCI_ADV_PHY_CODED;
2198                 cp.secondary_phy = HCI_ADV_PHY_CODED;
2199         } else {
2200                 /* In all other cases use 1M */
2201                 cp.primary_phy = HCI_ADV_PHY_1M;
2202                 cp.secondary_phy = HCI_ADV_PHY_1M;
2203         }
2204
2205         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2206
2207         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2208             bacmp(&random_addr, BDADDR_ANY)) {
2209                 struct hci_cp_le_set_adv_set_rand_addr cp;
2210
2211                 /* Check if random address need to be updated */
2212                 if (adv_instance) {
2213                         if (!bacmp(&random_addr, &adv_instance->random_addr))
2214                                 return 0;
2215                 } else {
2216                         if (!bacmp(&random_addr, &hdev->random_addr))
2217                                 return 0;
2218                 }
2219
2220                 memset(&cp, 0, sizeof(cp));
2221
2222                 cp.handle = instance;
2223                 bacpy(&cp.bdaddr, &random_addr);
2224
2225                 hci_req_add(req,
2226                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2227                             sizeof(cp), &cp);
2228         }
2229
2230         return 0;
2231 }
2232
2233 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2234 {
2235         struct hci_dev *hdev = req->hdev;
2236         struct hci_cp_le_set_ext_adv_enable *cp;
2237         struct hci_cp_ext_adv_set *adv_set;
2238         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2239         struct adv_info *adv_instance;
2240
2241         if (instance > 0) {
2242                 adv_instance = hci_find_adv_instance(hdev, instance);
2243                 if (!adv_instance)
2244                         return -EINVAL;
2245         } else {
2246                 adv_instance = NULL;
2247         }
2248
2249         cp = (void *) data;
2250         adv_set = (void *) cp->data;
2251
2252         memset(cp, 0, sizeof(*cp));
2253
2254         cp->enable = 0x01;
2255         cp->num_of_sets = 0x01;
2256
2257         memset(adv_set, 0, sizeof(*adv_set));
2258
2259         adv_set->handle = instance;
2260
2261         /* Set duration per instance since controller is responsible for
2262          * scheduling it.
2263          */
2264         if (adv_instance && adv_instance->duration) {
2265                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2266
2267                 /* Time = N * 10 ms */
2268                 adv_set->duration = cpu_to_le16(duration / 10);
2269         }
2270
2271         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2272                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2273                     data);
2274
2275         return 0;
2276 }
2277
2278 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2279 {
2280         struct hci_dev *hdev = req->hdev;
2281         struct hci_cp_le_set_ext_adv_enable *cp;
2282         struct hci_cp_ext_adv_set *adv_set;
2283         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2284         u8 req_size;
2285
2286         /* If request specifies an instance that doesn't exist, fail */
2287         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2288                 return -EINVAL;
2289
2290         memset(data, 0, sizeof(data));
2291
2292         cp = (void *)data;
2293         adv_set = (void *)cp->data;
2294
2295         /* Instance 0x00 indicates all advertising instances will be disabled */
2296         cp->num_of_sets = !!instance;
2297         cp->enable = 0x00;
2298
2299         adv_set->handle = instance;
2300
2301         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2302         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2303
2304         return 0;
2305 }
2306
2307 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2308 {
2309         struct hci_dev *hdev = req->hdev;
2310
2311         /* If request specifies an instance that doesn't exist, fail */
2312         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2313                 return -EINVAL;
2314
2315         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2316
2317         return 0;
2318 }
2319
2320 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2321 {
2322         struct hci_dev *hdev = req->hdev;
2323         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2324         int err;
2325
2326         /* If instance isn't pending, the chip knows about it, and it's safe to
2327          * disable
2328          */
2329         if (adv_instance && !adv_instance->pending)
2330                 __hci_req_disable_ext_adv_instance(req, instance);
2331
2332         err = __hci_req_setup_ext_adv_instance(req, instance);
2333         if (err < 0)
2334                 return err;
2335
2336         __hci_req_update_scan_rsp_data(req, instance);
2337         __hci_req_enable_ext_advertising(req, instance);
2338
2339         return 0;
2340 }
2341
2342 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2343                                     bool force)
2344 {
2345         struct hci_dev *hdev = req->hdev;
2346         struct adv_info *adv_instance = NULL;
2347         u16 timeout;
2348
2349         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2350             list_empty(&hdev->adv_instances))
2351                 return -EPERM;
2352
2353         if (hdev->adv_instance_timeout)
2354                 return -EBUSY;
2355
2356         adv_instance = hci_find_adv_instance(hdev, instance);
2357         if (!adv_instance)
2358                 return -ENOENT;
2359
2360         /* A zero timeout means unlimited advertising. As long as there is
2361          * only one instance, duration should be ignored. We still set a timeout
2362          * in case further instances are being added later on.
2363          *
2364          * If the remaining lifetime of the instance is more than the duration
2365          * then the timeout corresponds to the duration, otherwise it will be
2366          * reduced to the remaining instance lifetime.
2367          */
2368         if (adv_instance->timeout == 0 ||
2369             adv_instance->duration <= adv_instance->remaining_time)
2370                 timeout = adv_instance->duration;
2371         else
2372                 timeout = adv_instance->remaining_time;
2373
2374         /* The remaining time is being reduced unless the instance is being
2375          * advertised without time limit.
2376          */
2377         if (adv_instance->timeout)
2378                 adv_instance->remaining_time =
2379                                 adv_instance->remaining_time - timeout;
2380
2381         /* Only use work for scheduling instances with legacy advertising */
2382         if (!ext_adv_capable(hdev)) {
2383                 hdev->adv_instance_timeout = timeout;
2384                 queue_delayed_work(hdev->req_workqueue,
2385                            &hdev->adv_instance_expire,
2386                            msecs_to_jiffies(timeout * 1000));
2387         }
2388
2389         /* If we're just re-scheduling the same instance again then do not
2390          * execute any HCI commands. This happens when a single instance is
2391          * being advertised.
2392          */
2393         if (!force && hdev->cur_adv_instance == instance &&
2394             hci_dev_test_flag(hdev, HCI_LE_ADV))
2395                 return 0;
2396
2397         hdev->cur_adv_instance = instance;
2398         if (ext_adv_capable(hdev)) {
2399                 __hci_req_start_ext_adv(req, instance);
2400         } else {
2401                 __hci_req_update_adv_data(req, instance);
2402                 __hci_req_update_scan_rsp_data(req, instance);
2403                 __hci_req_enable_advertising(req);
2404         }
2405
2406         return 0;
2407 }
2408
2409 /* For a single instance:
2410  * - force == true: The instance will be removed even when its remaining
2411  *   lifetime is not zero.
2412  * - force == false: the instance will be deactivated but kept stored unless
2413  *   the remaining lifetime is zero.
2414  *
2415  * For instance == 0x00:
2416  * - force == true: All instances will be removed regardless of their timeout
2417  *   setting.
2418  * - force == false: Only instances that have a timeout will be removed.
2419  */
2420 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2421                                 struct hci_request *req, u8 instance,
2422                                 bool force)
2423 {
2424         struct adv_info *adv_instance, *n, *next_instance = NULL;
2425         int err;
2426         u8 rem_inst;
2427
2428         /* Cancel any timeout concerning the removed instance(s). */
2429         if (!instance || hdev->cur_adv_instance == instance)
2430                 cancel_adv_timeout(hdev);
2431
2432         /* Get the next instance to advertise BEFORE we remove
2433          * the current one. This can be the same instance again
2434          * if there is only one instance.
2435          */
2436         if (instance && hdev->cur_adv_instance == instance)
2437                 next_instance = hci_get_next_instance(hdev, instance);
2438
2439         if (instance == 0x00) {
2440                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2441                                          list) {
2442                         if (!(force || adv_instance->timeout))
2443                                 continue;
2444
2445                         rem_inst = adv_instance->instance;
2446                         err = hci_remove_adv_instance(hdev, rem_inst);
2447                         if (!err)
2448                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2449                 }
2450         } else {
2451                 adv_instance = hci_find_adv_instance(hdev, instance);
2452
2453                 if (force || (adv_instance && adv_instance->timeout &&
2454                               !adv_instance->remaining_time)) {
2455                         /* Don't advertise a removed instance. */
2456                         if (next_instance &&
2457                             next_instance->instance == instance)
2458                                 next_instance = NULL;
2459
2460                         err = hci_remove_adv_instance(hdev, instance);
2461                         if (!err)
2462                                 mgmt_advertising_removed(sk, hdev, instance);
2463                 }
2464         }
2465
2466         if (!req || !hdev_is_powered(hdev) ||
2467             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2468                 return;
2469
2470         if (next_instance && !ext_adv_capable(hdev))
2471                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2472                                                 false);
2473 }
2474
2475 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2476 {
2477         struct hci_dev *hdev = req->hdev;
2478
2479         /* If we're advertising or initiating an LE connection we can't
2480          * go ahead and change the random address at this time. This is
2481          * because the eventual initiator address used for the
2482          * subsequently created connection will be undefined (some
2483          * controllers use the new address and others the one we had
2484          * when the operation started).
2485          *
2486          * In this kind of scenario skip the update and let the random
2487          * address be updated at the next cycle.
2488          */
2489         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2490             hci_lookup_le_connect(hdev)) {
2491                 bt_dev_dbg(hdev, "Deferring random address update");
2492                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2493                 return;
2494         }
2495
2496         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2497 }
2498
2499 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2500                               bool use_rpa, u8 *own_addr_type)
2501 {
2502         struct hci_dev *hdev = req->hdev;
2503         int err;
2504
2505         /* If privacy is enabled use a resolvable private address. If
2506          * current RPA has expired or there is something else than
2507          * the current RPA in use, then generate a new one.
2508          */
2509         if (use_rpa) {
2510                 int to;
2511
2512                 /* If Controller supports LL Privacy use own address type is
2513                  * 0x03
2514                  */
2515                 if (use_ll_privacy(hdev))
2516                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2517                 else
2518                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2519
2520                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2521                     !bacmp(&hdev->random_addr, &hdev->rpa))
2522                         return 0;
2523
2524                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2525                 if (err < 0) {
2526                         bt_dev_err(hdev, "failed to generate new RPA");
2527                         return err;
2528                 }
2529
2530                 set_random_addr(req, &hdev->rpa);
2531
2532                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2533                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2534
2535                 return 0;
2536         }
2537
2538         /* In case of required privacy without resolvable private address,
2539          * use an non-resolvable private address. This is useful for active
2540          * scanning and non-connectable advertising.
2541          */
2542         if (require_privacy) {
2543                 bdaddr_t nrpa;
2544
2545                 while (true) {
2546                         /* The non-resolvable private address is generated
2547                          * from random six bytes with the two most significant
2548                          * bits cleared.
2549                          */
2550                         get_random_bytes(&nrpa, 6);
2551                         nrpa.b[5] &= 0x3f;
2552
2553                         /* The non-resolvable private address shall not be
2554                          * equal to the public address.
2555                          */
2556                         if (bacmp(&hdev->bdaddr, &nrpa))
2557                                 break;
2558                 }
2559
2560                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2561                 set_random_addr(req, &nrpa);
2562                 return 0;
2563         }
2564
2565         /* If forcing static address is in use or there is no public
2566          * address use the static address as random address (but skip
2567          * the HCI command if the current random address is already the
2568          * static one.
2569          *
2570          * In case BR/EDR has been disabled on a dual-mode controller
2571          * and a static address has been configured, then use that
2572          * address instead of the public BR/EDR address.
2573          */
2574         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2575             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2576             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2577              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2578                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2579                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2580                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2581                                     &hdev->static_addr);
2582                 return 0;
2583         }
2584
2585         /* Neither privacy nor static address is being used so use a
2586          * public address.
2587          */
2588         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2589
2590         return 0;
2591 }
2592
2593 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2594 {
2595         struct bdaddr_list *b;
2596
2597         list_for_each_entry(b, &hdev->whitelist, list) {
2598                 struct hci_conn *conn;
2599
2600                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2601                 if (!conn)
2602                         return true;
2603
2604                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2605                         return true;
2606         }
2607
2608         return false;
2609 }
2610
2611 void __hci_req_update_scan(struct hci_request *req)
2612 {
2613         struct hci_dev *hdev = req->hdev;
2614         u8 scan;
2615
2616         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2617                 return;
2618
2619         if (!hdev_is_powered(hdev))
2620                 return;
2621
2622         if (mgmt_powering_down(hdev))
2623                 return;
2624
2625         if (hdev->scanning_paused)
2626                 return;
2627
2628         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2629             disconnected_whitelist_entries(hdev))
2630                 scan = SCAN_PAGE;
2631         else
2632                 scan = SCAN_DISABLED;
2633
2634         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2635                 scan |= SCAN_INQUIRY;
2636
2637         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2638             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2639                 return;
2640
2641         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2642 }
2643
2644 static int update_scan(struct hci_request *req, unsigned long opt)
2645 {
2646         hci_dev_lock(req->hdev);
2647         __hci_req_update_scan(req);
2648         hci_dev_unlock(req->hdev);
2649         return 0;
2650 }
2651
2652 static void scan_update_work(struct work_struct *work)
2653 {
2654         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2655
2656         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2657 }
2658
2659 static int connectable_update(struct hci_request *req, unsigned long opt)
2660 {
2661         struct hci_dev *hdev = req->hdev;
2662
2663         hci_dev_lock(hdev);
2664
2665         __hci_req_update_scan(req);
2666
2667         /* If BR/EDR is not enabled and we disable advertising as a
2668          * by-product of disabling connectable, we need to update the
2669          * advertising flags.
2670          */
2671         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2672                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2673
2674         /* Update the advertising parameters if necessary */
2675         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2676             !list_empty(&hdev->adv_instances)) {
2677                 if (ext_adv_capable(hdev))
2678                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2679                 else
2680                         __hci_req_enable_advertising(req);
2681         }
2682
2683         __hci_update_background_scan(req);
2684
2685         hci_dev_unlock(hdev);
2686
2687         return 0;
2688 }
2689
2690 static void connectable_update_work(struct work_struct *work)
2691 {
2692         struct hci_dev *hdev = container_of(work, struct hci_dev,
2693                                             connectable_update);
2694         u8 status;
2695
2696         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2697         mgmt_set_connectable_complete(hdev, status);
2698 }
2699
2700 static u8 get_service_classes(struct hci_dev *hdev)
2701 {
2702         struct bt_uuid *uuid;
2703         u8 val = 0;
2704
2705         list_for_each_entry(uuid, &hdev->uuids, list)
2706                 val |= uuid->svc_hint;
2707
2708         return val;
2709 }
2710
2711 void __hci_req_update_class(struct hci_request *req)
2712 {
2713         struct hci_dev *hdev = req->hdev;
2714         u8 cod[3];
2715
2716         bt_dev_dbg(hdev, "");
2717
2718         if (!hdev_is_powered(hdev))
2719                 return;
2720
2721         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2722                 return;
2723
2724         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2725                 return;
2726
2727         cod[0] = hdev->minor_class;
2728         cod[1] = hdev->major_class;
2729         cod[2] = get_service_classes(hdev);
2730
2731         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2732                 cod[1] |= 0x20;
2733
2734         if (memcmp(cod, hdev->dev_class, 3) == 0)
2735                 return;
2736
2737         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2738 }
2739
2740 static void write_iac(struct hci_request *req)
2741 {
2742         struct hci_dev *hdev = req->hdev;
2743         struct hci_cp_write_current_iac_lap cp;
2744
2745         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2746                 return;
2747
2748         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2749                 /* Limited discoverable mode */
2750                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2751                 cp.iac_lap[0] = 0x00;   /* LIAC */
2752                 cp.iac_lap[1] = 0x8b;
2753                 cp.iac_lap[2] = 0x9e;
2754                 cp.iac_lap[3] = 0x33;   /* GIAC */
2755                 cp.iac_lap[4] = 0x8b;
2756                 cp.iac_lap[5] = 0x9e;
2757         } else {
2758                 /* General discoverable mode */
2759                 cp.num_iac = 1;
2760                 cp.iac_lap[0] = 0x33;   /* GIAC */
2761                 cp.iac_lap[1] = 0x8b;
2762                 cp.iac_lap[2] = 0x9e;
2763         }
2764
2765         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2766                     (cp.num_iac * 3) + 1, &cp);
2767 }
2768
2769 static int discoverable_update(struct hci_request *req, unsigned long opt)
2770 {
2771         struct hci_dev *hdev = req->hdev;
2772
2773         hci_dev_lock(hdev);
2774
2775         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2776                 write_iac(req);
2777                 __hci_req_update_scan(req);
2778                 __hci_req_update_class(req);
2779         }
2780
2781         /* Advertising instances don't use the global discoverable setting, so
2782          * only update AD if advertising was enabled using Set Advertising.
2783          */
2784         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2785                 __hci_req_update_adv_data(req, 0x00);
2786
2787                 /* Discoverable mode affects the local advertising
2788                  * address in limited privacy mode.
2789                  */
2790                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2791                         if (ext_adv_capable(hdev))
2792                                 __hci_req_start_ext_adv(req, 0x00);
2793                         else
2794                                 __hci_req_enable_advertising(req);
2795                 }
2796         }
2797
2798         hci_dev_unlock(hdev);
2799
2800         return 0;
2801 }
2802
2803 static void discoverable_update_work(struct work_struct *work)
2804 {
2805         struct hci_dev *hdev = container_of(work, struct hci_dev,
2806                                             discoverable_update);
2807         u8 status;
2808
2809         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2810         mgmt_set_discoverable_complete(hdev, status);
2811 }
2812
2813 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2814                       u8 reason)
2815 {
2816         switch (conn->state) {
2817         case BT_CONNECTED:
2818         case BT_CONFIG:
2819                 if (conn->type == AMP_LINK) {
2820                         struct hci_cp_disconn_phy_link cp;
2821
2822                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2823                         cp.reason = reason;
2824                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2825                                     &cp);
2826                 } else {
2827                         struct hci_cp_disconnect dc;
2828
2829                         dc.handle = cpu_to_le16(conn->handle);
2830                         dc.reason = reason;
2831                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2832                 }
2833
2834                 conn->state = BT_DISCONN;
2835
2836                 break;
2837         case BT_CONNECT:
2838                 if (conn->type == LE_LINK) {
2839                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2840                                 break;
2841                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2842                                     0, NULL);
2843                 } else if (conn->type == ACL_LINK) {
2844                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2845                                 break;
2846                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2847                                     6, &conn->dst);
2848                 }
2849                 break;
2850         case BT_CONNECT2:
2851                 if (conn->type == ACL_LINK) {
2852                         struct hci_cp_reject_conn_req rej;
2853
2854                         bacpy(&rej.bdaddr, &conn->dst);
2855                         rej.reason = reason;
2856
2857                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2858                                     sizeof(rej), &rej);
2859                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2860                         struct hci_cp_reject_sync_conn_req rej;
2861
2862                         bacpy(&rej.bdaddr, &conn->dst);
2863
2864                         /* SCO rejection has its own limited set of
2865                          * allowed error values (0x0D-0x0F) which isn't
2866                          * compatible with most values passed to this
2867                          * function. To be safe hard-code one of the
2868                          * values that's suitable for SCO.
2869                          */
2870                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2871
2872                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2873                                     sizeof(rej), &rej);
2874                 }
2875                 break;
2876         default:
2877                 conn->state = BT_CLOSED;
2878                 break;
2879         }
2880 }
2881
2882 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2883 {
2884         if (status)
2885                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2886 }
2887
2888 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2889 {
2890         struct hci_request req;
2891         int err;
2892
2893         hci_req_init(&req, conn->hdev);
2894
2895         __hci_abort_conn(&req, conn, reason);
2896
2897         err = hci_req_run(&req, abort_conn_complete);
2898         if (err && err != -ENODATA) {
2899                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2900                 return err;
2901         }
2902
2903         return 0;
2904 }
2905
2906 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2907 {
2908         hci_dev_lock(req->hdev);
2909         __hci_update_background_scan(req);
2910         hci_dev_unlock(req->hdev);
2911         return 0;
2912 }
2913
2914 static void bg_scan_update(struct work_struct *work)
2915 {
2916         struct hci_dev *hdev = container_of(work, struct hci_dev,
2917                                             bg_scan_update);
2918         struct hci_conn *conn;
2919         u8 status;
2920         int err;
2921
2922         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2923         if (!err)
2924                 return;
2925
2926         hci_dev_lock(hdev);
2927
2928         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2929         if (conn)
2930                 hci_le_conn_failed(conn, status);
2931
2932         hci_dev_unlock(hdev);
2933 }
2934
2935 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2936 {
2937         hci_req_add_le_scan_disable(req, false);
2938         return 0;
2939 }
2940
2941 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2942 {
2943         u8 length = opt;
2944         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2945         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2946         struct hci_cp_inquiry cp;
2947
2948         bt_dev_dbg(req->hdev, "");
2949
2950         hci_dev_lock(req->hdev);
2951         hci_inquiry_cache_flush(req->hdev);
2952         hci_dev_unlock(req->hdev);
2953
2954         memset(&cp, 0, sizeof(cp));
2955
2956         if (req->hdev->discovery.limited)
2957                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2958         else
2959                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2960
2961         cp.length = length;
2962
2963         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2964
2965         return 0;
2966 }
2967
2968 static void le_scan_disable_work(struct work_struct *work)
2969 {
2970         struct hci_dev *hdev = container_of(work, struct hci_dev,
2971                                             le_scan_disable.work);
2972         u8 status;
2973
2974         bt_dev_dbg(hdev, "");
2975
2976         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2977                 return;
2978
2979         cancel_delayed_work(&hdev->le_scan_restart);
2980
2981         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2982         if (status) {
2983                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2984                            status);
2985                 return;
2986         }
2987
2988         hdev->discovery.scan_start = 0;
2989
2990         /* If we were running LE only scan, change discovery state. If
2991          * we were running both LE and BR/EDR inquiry simultaneously,
2992          * and BR/EDR inquiry is already finished, stop discovery,
2993          * otherwise BR/EDR inquiry will stop discovery when finished.
2994          * If we will resolve remote device name, do not change
2995          * discovery state.
2996          */
2997
2998         if (hdev->discovery.type == DISCOV_TYPE_LE)
2999                 goto discov_stopped;
3000
3001         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
3002                 return;
3003
3004         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3005                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3006                     hdev->discovery.state != DISCOVERY_RESOLVING)
3007                         goto discov_stopped;
3008
3009                 return;
3010         }
3011
3012         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3013                      HCI_CMD_TIMEOUT, &status);
3014         if (status) {
3015                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3016                 goto discov_stopped;
3017         }
3018
3019         return;
3020
3021 discov_stopped:
3022         hci_dev_lock(hdev);
3023         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3024         hci_dev_unlock(hdev);
3025 }
3026
3027 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3028 {
3029         struct hci_dev *hdev = req->hdev;
3030
3031         /* If controller is not scanning we are done. */
3032         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3033                 return 0;
3034
3035         if (hdev->scanning_paused) {
3036                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3037                 return 0;
3038         }
3039
3040         hci_req_add_le_scan_disable(req, false);
3041
3042         if (use_ext_scan(hdev)) {
3043                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3044
3045                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3046                 ext_enable_cp.enable = LE_SCAN_ENABLE;
3047                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3048
3049                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3050                             sizeof(ext_enable_cp), &ext_enable_cp);
3051         } else {
3052                 struct hci_cp_le_set_scan_enable cp;
3053
3054                 memset(&cp, 0, sizeof(cp));
3055                 cp.enable = LE_SCAN_ENABLE;
3056                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3057                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3058         }
3059
3060         return 0;
3061 }
3062
3063 static void le_scan_restart_work(struct work_struct *work)
3064 {
3065         struct hci_dev *hdev = container_of(work, struct hci_dev,
3066                                             le_scan_restart.work);
3067         unsigned long timeout, duration, scan_start, now;
3068         u8 status;
3069
3070         bt_dev_dbg(hdev, "");
3071
3072         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3073         if (status) {
3074                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3075                            status);
3076                 return;
3077         }
3078
3079         hci_dev_lock(hdev);
3080
3081         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3082             !hdev->discovery.scan_start)
3083                 goto unlock;
3084
3085         /* When the scan was started, hdev->le_scan_disable has been queued
3086          * after duration from scan_start. During scan restart this job
3087          * has been canceled, and we need to queue it again after proper
3088          * timeout, to make sure that scan does not run indefinitely.
3089          */
3090         duration = hdev->discovery.scan_duration;
3091         scan_start = hdev->discovery.scan_start;
3092         now = jiffies;
3093         if (now - scan_start <= duration) {
3094                 int elapsed;
3095
3096                 if (now >= scan_start)
3097                         elapsed = now - scan_start;
3098                 else
3099                         elapsed = ULONG_MAX - scan_start + now;
3100
3101                 timeout = duration - elapsed;
3102         } else {
3103                 timeout = 0;
3104         }
3105
3106         queue_delayed_work(hdev->req_workqueue,
3107                            &hdev->le_scan_disable, timeout);
3108
3109 unlock:
3110         hci_dev_unlock(hdev);
3111 }
3112
3113 static int active_scan(struct hci_request *req, unsigned long opt)
3114 {
3115         uint16_t interval = opt;
3116         struct hci_dev *hdev = req->hdev;
3117         u8 own_addr_type;
3118         /* White list is not used for discovery */
3119         u8 filter_policy = 0x00;
3120         /* Discovery doesn't require controller address resolution */
3121         bool addr_resolv = false;
3122         int err;
3123
3124         bt_dev_dbg(hdev, "");
3125
3126         /* If controller is scanning, it means the background scanning is
3127          * running. Thus, we should temporarily stop it in order to set the
3128          * discovery scanning parameters.
3129          */
3130         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3131                 hci_req_add_le_scan_disable(req, false);
3132                 cancel_interleave_scan(hdev);
3133         }
3134
3135         /* All active scans will be done with either a resolvable private
3136          * address (when privacy feature has been enabled) or non-resolvable
3137          * private address.
3138          */
3139         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3140                                         &own_addr_type);
3141         if (err < 0)
3142                 own_addr_type = ADDR_LE_DEV_PUBLIC;
3143
3144         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3145                            hdev->le_scan_window_discovery, own_addr_type,
3146                            filter_policy, addr_resolv);
3147         return 0;
3148 }
3149
3150 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3151 {
3152         int err;
3153
3154         bt_dev_dbg(req->hdev, "");
3155
3156         err = active_scan(req, opt);
3157         if (err)
3158                 return err;
3159
3160         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3161 }
3162
3163 static void start_discovery(struct hci_dev *hdev, u8 *status)
3164 {
3165         unsigned long timeout;
3166
3167         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3168
3169         switch (hdev->discovery.type) {
3170         case DISCOV_TYPE_BREDR:
3171                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3172                         hci_req_sync(hdev, bredr_inquiry,
3173                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3174                                      status);
3175                 return;
3176         case DISCOV_TYPE_INTERLEAVED:
3177                 /* When running simultaneous discovery, the LE scanning time
3178                  * should occupy the whole discovery time sine BR/EDR inquiry
3179                  * and LE scanning are scheduled by the controller.
3180                  *
3181                  * For interleaving discovery in comparison, BR/EDR inquiry
3182                  * and LE scanning are done sequentially with separate
3183                  * timeouts.
3184                  */
3185                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3186                              &hdev->quirks)) {
3187                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3188                         /* During simultaneous discovery, we double LE scan
3189                          * interval. We must leave some time for the controller
3190                          * to do BR/EDR inquiry.
3191                          */
3192                         hci_req_sync(hdev, interleaved_discov,
3193                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3194                                      status);
3195                         break;
3196                 }
3197
3198                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3199                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3200                              HCI_CMD_TIMEOUT, status);
3201                 break;
3202         case DISCOV_TYPE_LE:
3203                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3204                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3205                              HCI_CMD_TIMEOUT, status);
3206                 break;
3207         default:
3208                 *status = HCI_ERROR_UNSPECIFIED;
3209                 return;
3210         }
3211
3212         if (*status)
3213                 return;
3214
3215         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3216
3217         /* When service discovery is used and the controller has a
3218          * strict duplicate filter, it is important to remember the
3219          * start and duration of the scan. This is required for
3220          * restarting scanning during the discovery phase.
3221          */
3222         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3223                      hdev->discovery.result_filtering) {
3224                 hdev->discovery.scan_start = jiffies;
3225                 hdev->discovery.scan_duration = timeout;
3226         }
3227
3228         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3229                            timeout);
3230 }
3231
3232 bool hci_req_stop_discovery(struct hci_request *req)
3233 {
3234         struct hci_dev *hdev = req->hdev;
3235         struct discovery_state *d = &hdev->discovery;
3236         struct hci_cp_remote_name_req_cancel cp;
3237         struct inquiry_entry *e;
3238         bool ret = false;
3239
3240         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3241
3242         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3243                 if (test_bit(HCI_INQUIRY, &hdev->flags))
3244                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3245
3246                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3247                         cancel_delayed_work(&hdev->le_scan_disable);
3248                         hci_req_add_le_scan_disable(req, false);
3249                 }
3250
3251                 ret = true;
3252         } else {
3253                 /* Passive scanning */
3254                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3255                         hci_req_add_le_scan_disable(req, false);
3256                         ret = true;
3257                 }
3258         }
3259
3260         /* No further actions needed for LE-only discovery */
3261         if (d->type == DISCOV_TYPE_LE)
3262                 return ret;
3263
3264         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3265                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3266                                                      NAME_PENDING);
3267                 if (!e)
3268                         return ret;
3269
3270                 bacpy(&cp.bdaddr, &e->data.bdaddr);
3271                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3272                             &cp);
3273                 ret = true;
3274         }
3275
3276         return ret;
3277 }
3278
3279 static int stop_discovery(struct hci_request *req, unsigned long opt)
3280 {
3281         hci_dev_lock(req->hdev);
3282         hci_req_stop_discovery(req);
3283         hci_dev_unlock(req->hdev);
3284
3285         return 0;
3286 }
3287
3288 static void discov_update(struct work_struct *work)
3289 {
3290         struct hci_dev *hdev = container_of(work, struct hci_dev,
3291                                             discov_update);
3292         u8 status = 0;
3293
3294         switch (hdev->discovery.state) {
3295         case DISCOVERY_STARTING:
3296                 start_discovery(hdev, &status);
3297                 mgmt_start_discovery_complete(hdev, status);
3298                 if (status)
3299                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3300                 else
3301                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3302                 break;
3303         case DISCOVERY_STOPPING:
3304                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3305                 mgmt_stop_discovery_complete(hdev, status);
3306                 if (!status)
3307                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3308                 break;
3309         case DISCOVERY_STOPPED:
3310         default:
3311                 return;
3312         }
3313 }
3314
3315 static void discov_off(struct work_struct *work)
3316 {
3317         struct hci_dev *hdev = container_of(work, struct hci_dev,
3318                                             discov_off.work);
3319
3320         bt_dev_dbg(hdev, "");
3321
3322         hci_dev_lock(hdev);
3323
3324         /* When discoverable timeout triggers, then just make sure
3325          * the limited discoverable flag is cleared. Even in the case
3326          * of a timeout triggered from general discoverable, it is
3327          * safe to unconditionally clear the flag.
3328          */
3329         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3330         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3331         hdev->discov_timeout = 0;
3332
3333         hci_dev_unlock(hdev);
3334
3335         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3336         mgmt_new_settings(hdev);
3337 }
3338
3339 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3340 {
3341         struct hci_dev *hdev = req->hdev;
3342         u8 link_sec;
3343
3344         hci_dev_lock(hdev);
3345
3346         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3347             !lmp_host_ssp_capable(hdev)) {
3348                 u8 mode = 0x01;
3349
3350                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3351
3352                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3353                         u8 support = 0x01;
3354
3355                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3356                                     sizeof(support), &support);
3357                 }
3358         }
3359
3360         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3361             lmp_bredr_capable(hdev)) {
3362                 struct hci_cp_write_le_host_supported cp;
3363
3364                 cp.le = 0x01;
3365                 cp.simul = 0x00;
3366
3367                 /* Check first if we already have the right
3368                  * host state (host features set)
3369                  */
3370                 if (cp.le != lmp_host_le_capable(hdev) ||
3371                     cp.simul != lmp_host_le_br_capable(hdev))
3372                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3373                                     sizeof(cp), &cp);
3374         }
3375
3376         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3377                 /* Make sure the controller has a good default for
3378                  * advertising data. This also applies to the case
3379                  * where BR/EDR was toggled during the AUTO_OFF phase.
3380                  */
3381                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3382                     list_empty(&hdev->adv_instances)) {
3383                         int err;
3384
3385                         if (ext_adv_capable(hdev)) {
3386                                 err = __hci_req_setup_ext_adv_instance(req,
3387                                                                        0x00);
3388                                 if (!err)
3389                                         __hci_req_update_scan_rsp_data(req,
3390                                                                        0x00);
3391                         } else {
3392                                 err = 0;
3393                                 __hci_req_update_adv_data(req, 0x00);
3394                                 __hci_req_update_scan_rsp_data(req, 0x00);
3395                         }
3396
3397                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3398                                 if (!ext_adv_capable(hdev))
3399                                         __hci_req_enable_advertising(req);
3400                                 else if (!err)
3401                                         __hci_req_enable_ext_advertising(req,
3402                                                                          0x00);
3403                         }
3404                 } else if (!list_empty(&hdev->adv_instances)) {
3405                         struct adv_info *adv_instance;
3406
3407                         adv_instance = list_first_entry(&hdev->adv_instances,
3408                                                         struct adv_info, list);
3409                         __hci_req_schedule_adv_instance(req,
3410                                                         adv_instance->instance,
3411                                                         true);
3412                 }
3413         }
3414
3415         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3416         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3417                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3418                             sizeof(link_sec), &link_sec);
3419
3420         if (lmp_bredr_capable(hdev)) {
3421                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3422                         __hci_req_write_fast_connectable(req, true);
3423                 else
3424                         __hci_req_write_fast_connectable(req, false);
3425                 __hci_req_update_scan(req);
3426                 __hci_req_update_class(req);
3427                 __hci_req_update_name(req);
3428                 __hci_req_update_eir(req);
3429         }
3430
3431         hci_dev_unlock(hdev);
3432         return 0;
3433 }
3434
3435 int __hci_req_hci_power_on(struct hci_dev *hdev)
3436 {
3437         /* Register the available SMP channels (BR/EDR and LE) only when
3438          * successfully powering on the controller. This late
3439          * registration is required so that LE SMP can clearly decide if
3440          * the public address or static address is used.
3441          */
3442         smp_register(hdev);
3443
3444         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3445                               NULL);
3446 }
3447
3448 void hci_request_setup(struct hci_dev *hdev)
3449 {
3450         INIT_WORK(&hdev->discov_update, discov_update);
3451         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3452         INIT_WORK(&hdev->scan_update, scan_update_work);
3453         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3454         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3455         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3456         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3457         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3458         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3459         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3460 }
3461
3462 void hci_request_cancel_all(struct hci_dev *hdev)
3463 {
3464         hci_req_sync_cancel(hdev, ENODEV);
3465
3466         cancel_work_sync(&hdev->discov_update);
3467         cancel_work_sync(&hdev->bg_scan_update);
3468         cancel_work_sync(&hdev->scan_update);
3469         cancel_work_sync(&hdev->connectable_update);
3470         cancel_work_sync(&hdev->discoverable_update);
3471         cancel_delayed_work_sync(&hdev->discov_off);
3472         cancel_delayed_work_sync(&hdev->le_scan_disable);
3473         cancel_delayed_work_sync(&hdev->le_scan_restart);
3474
3475         if (hdev->adv_instance_timeout) {
3476                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3477                 hdev->adv_instance_timeout = 0;
3478         }
3479
3480         cancel_interleave_scan(hdev);
3481 }