Merge tag 'devicetree-for-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33
34 #define HCI_REQ_DONE      0
35 #define HCI_REQ_PEND      1
36 #define HCI_REQ_CANCELED  2
37
38 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 {
40         skb_queue_head_init(&req->cmd_q);
41         req->hdev = hdev;
42         req->err = 0;
43 }
44
45 void hci_req_purge(struct hci_request *req)
46 {
47         skb_queue_purge(&req->cmd_q);
48 }
49
50 bool hci_req_status_pend(struct hci_dev *hdev)
51 {
52         return hdev->req_status == HCI_REQ_PEND;
53 }
54
55 static int req_run(struct hci_request *req, hci_req_complete_t complete,
56                    hci_req_complete_skb_t complete_skb)
57 {
58         struct hci_dev *hdev = req->hdev;
59         struct sk_buff *skb;
60         unsigned long flags;
61
62         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
63
64         /* If an error occurred during request building, remove all HCI
65          * commands queued on the HCI request queue.
66          */
67         if (req->err) {
68                 skb_queue_purge(&req->cmd_q);
69                 return req->err;
70         }
71
72         /* Do not allow empty requests */
73         if (skb_queue_empty(&req->cmd_q))
74                 return -ENODATA;
75
76         skb = skb_peek_tail(&req->cmd_q);
77         if (complete) {
78                 bt_cb(skb)->hci.req_complete = complete;
79         } else if (complete_skb) {
80                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
81                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
82         }
83
84         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
85         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
86         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87
88         queue_work(hdev->workqueue, &hdev->cmd_work);
89
90         return 0;
91 }
92
93 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 {
95         return req_run(req, complete, NULL);
96 }
97
98 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 {
100         return req_run(req, NULL, complete);
101 }
102
103 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
104                                   struct sk_buff *skb)
105 {
106         bt_dev_dbg(hdev, "result 0x%2.2x", result);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 if (skb)
112                         hdev->req_skb = skb_get(skb);
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 {
119         bt_dev_dbg(hdev, "err 0x%2.2x", err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
129                                   const void *param, u8 event, u32 timeout)
130 {
131         struct hci_request req;
132         struct sk_buff *skb;
133         int err = 0;
134
135         bt_dev_dbg(hdev, "");
136
137         hci_req_init(&req, hdev);
138
139         hci_req_add_ev(&req, opcode, plen, param, event);
140
141         hdev->req_status = HCI_REQ_PEND;
142
143         err = hci_req_run_skb(&req, hci_req_sync_complete);
144         if (err < 0)
145                 return ERR_PTR(err);
146
147         err = wait_event_interruptible_timeout(hdev->req_wait_q,
148                         hdev->req_status != HCI_REQ_PEND, timeout);
149
150         if (err == -ERESTARTSYS)
151                 return ERR_PTR(-EINTR);
152
153         switch (hdev->req_status) {
154         case HCI_REQ_DONE:
155                 err = -bt_to_errno(hdev->req_result);
156                 break;
157
158         case HCI_REQ_CANCELED:
159                 err = -hdev->req_result;
160                 break;
161
162         default:
163                 err = -ETIMEDOUT;
164                 break;
165         }
166
167         hdev->req_status = hdev->req_result = 0;
168         skb = hdev->req_skb;
169         hdev->req_skb = NULL;
170
171         bt_dev_dbg(hdev, "end: err %d", err);
172
173         if (err < 0) {
174                 kfree_skb(skb);
175                 return ERR_PTR(err);
176         }
177
178         if (!skb)
179                 return ERR_PTR(-ENODATA);
180
181         return skb;
182 }
183 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184
185 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
186                                const void *param, u32 timeout)
187 {
188         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 }
190 EXPORT_SYMBOL(__hci_cmd_sync);
191
192 /* Execute request and wait for completion. */
193 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194                                                      unsigned long opt),
195                    unsigned long opt, u32 timeout, u8 *hci_status)
196 {
197         struct hci_request req;
198         int err = 0;
199
200         bt_dev_dbg(hdev, "start");
201
202         hci_req_init(&req, hdev);
203
204         hdev->req_status = HCI_REQ_PEND;
205
206         err = func(&req, opt);
207         if (err) {
208                 if (hci_status)
209                         *hci_status = HCI_ERROR_UNSPECIFIED;
210                 return err;
211         }
212
213         err = hci_req_run_skb(&req, hci_req_sync_complete);
214         if (err < 0) {
215                 hdev->req_status = 0;
216
217                 /* ENODATA means the HCI request command queue is empty.
218                  * This can happen when a request with conditionals doesn't
219                  * trigger any commands to be sent. This is normal behavior
220                  * and should not trigger an error return.
221                  */
222                 if (err == -ENODATA) {
223                         if (hci_status)
224                                 *hci_status = 0;
225                         return 0;
226                 }
227
228                 if (hci_status)
229                         *hci_status = HCI_ERROR_UNSPECIFIED;
230
231                 return err;
232         }
233
234         err = wait_event_interruptible_timeout(hdev->req_wait_q,
235                         hdev->req_status != HCI_REQ_PEND, timeout);
236
237         if (err == -ERESTARTSYS)
238                 return -EINTR;
239
240         switch (hdev->req_status) {
241         case HCI_REQ_DONE:
242                 err = -bt_to_errno(hdev->req_result);
243                 if (hci_status)
244                         *hci_status = hdev->req_result;
245                 break;
246
247         case HCI_REQ_CANCELED:
248                 err = -hdev->req_result;
249                 if (hci_status)
250                         *hci_status = HCI_ERROR_UNSPECIFIED;
251                 break;
252
253         default:
254                 err = -ETIMEDOUT;
255                 if (hci_status)
256                         *hci_status = HCI_ERROR_UNSPECIFIED;
257                 break;
258         }
259
260         kfree_skb(hdev->req_skb);
261         hdev->req_skb = NULL;
262         hdev->req_status = hdev->req_result = 0;
263
264         bt_dev_dbg(hdev, "end: err %d", err);
265
266         return err;
267 }
268
269 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270                                                   unsigned long opt),
271                  unsigned long opt, u32 timeout, u8 *hci_status)
272 {
273         int ret;
274
275         if (!test_bit(HCI_UP, &hdev->flags))
276                 return -ENETDOWN;
277
278         /* Serialize all requests */
279         hci_req_sync_lock(hdev);
280         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
281         hci_req_sync_unlock(hdev);
282
283         return ret;
284 }
285
286 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
287                                 const void *param)
288 {
289         int len = HCI_COMMAND_HDR_SIZE + plen;
290         struct hci_command_hdr *hdr;
291         struct sk_buff *skb;
292
293         skb = bt_skb_alloc(len, GFP_ATOMIC);
294         if (!skb)
295                 return NULL;
296
297         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
298         hdr->opcode = cpu_to_le16(opcode);
299         hdr->plen   = plen;
300
301         if (plen)
302                 skb_put_data(skb, param, plen);
303
304         bt_dev_dbg(hdev, "skb len %d", skb->len);
305
306         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307         hci_skb_opcode(skb) = opcode;
308
309         return skb;
310 }
311
312 /* Queue a command to an asynchronous HCI request */
313 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314                     const void *param, u8 event)
315 {
316         struct hci_dev *hdev = req->hdev;
317         struct sk_buff *skb;
318
319         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
320
321         /* If an error occurred during request building, there is no point in
322          * queueing the HCI command. We can simply return.
323          */
324         if (req->err)
325                 return;
326
327         skb = hci_prepare_cmd(hdev, opcode, plen, param);
328         if (!skb) {
329                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
330                            opcode);
331                 req->err = -ENOMEM;
332                 return;
333         }
334
335         if (skb_queue_empty(&req->cmd_q))
336                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
337
338         bt_cb(skb)->hci.req_event = event;
339
340         skb_queue_tail(&req->cmd_q, skb);
341 }
342
343 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
344                  const void *param)
345 {
346         hci_req_add_ev(req, opcode, plen, param, 0);
347 }
348
349 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
350 {
351         struct hci_dev *hdev = req->hdev;
352         struct hci_cp_write_page_scan_activity acp;
353         u8 type;
354
355         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
356                 return;
357
358         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
359                 return;
360
361         if (enable) {
362                 type = PAGE_SCAN_TYPE_INTERLACED;
363
364                 /* 160 msec page scan interval */
365                 acp.interval = cpu_to_le16(0x0100);
366         } else {
367                 type = hdev->def_page_scan_type;
368                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
369         }
370
371         acp.window = cpu_to_le16(hdev->def_page_scan_window);
372
373         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
374             __cpu_to_le16(hdev->page_scan_window) != acp.window)
375                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
376                             sizeof(acp), &acp);
377
378         if (hdev->page_scan_type != type)
379                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
380 }
381
382 static void start_interleave_scan(struct hci_dev *hdev)
383 {
384         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
385         queue_delayed_work(hdev->req_workqueue,
386                            &hdev->interleave_scan, 0);
387 }
388
389 static bool is_interleave_scanning(struct hci_dev *hdev)
390 {
391         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
392 }
393
394 static void cancel_interleave_scan(struct hci_dev *hdev)
395 {
396         bt_dev_dbg(hdev, "cancelling interleave scan");
397
398         cancel_delayed_work_sync(&hdev->interleave_scan);
399
400         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
401 }
402
403 /* Return true if interleave_scan wasn't started until exiting this function,
404  * otherwise, return false
405  */
406 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
407 {
408         /* Do interleaved scan only if all of the following are true:
409          * - There is at least one ADV monitor
410          * - At least one pending LE connection or one device to be scanned for
411          * - Monitor offloading is not supported
412          * If so, we should alternate between allowlist scan and one without
413          * any filters to save power.
414          */
415         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416                                 !(list_empty(&hdev->pend_le_conns) &&
417                                   list_empty(&hdev->pend_le_reports)) &&
418                                 hci_get_adv_monitor_offload_ext(hdev) ==
419                                     HCI_ADV_MONITOR_EXT_NONE;
420         bool is_interleaving = is_interleave_scanning(hdev);
421
422         if (use_interleaving && !is_interleaving) {
423                 start_interleave_scan(hdev);
424                 bt_dev_dbg(hdev, "starting interleave scan");
425                 return true;
426         }
427
428         if (!use_interleaving && is_interleaving)
429                 cancel_interleave_scan(hdev);
430
431         return false;
432 }
433
434 /* This function controls the background scanning based on hdev->pend_le_conns
435  * list. If there are pending LE connection we start the background scanning,
436  * otherwise we stop it.
437  *
438  * This function requires the caller holds hdev->lock.
439  */
440 static void __hci_update_background_scan(struct hci_request *req)
441 {
442         struct hci_dev *hdev = req->hdev;
443
444         if (!test_bit(HCI_UP, &hdev->flags) ||
445             test_bit(HCI_INIT, &hdev->flags) ||
446             hci_dev_test_flag(hdev, HCI_SETUP) ||
447             hci_dev_test_flag(hdev, HCI_CONFIG) ||
448             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449             hci_dev_test_flag(hdev, HCI_UNREGISTER))
450                 return;
451
452         /* No point in doing scanning if LE support hasn't been enabled */
453         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
454                 return;
455
456         /* If discovery is active don't interfere with it */
457         if (hdev->discovery.state != DISCOVERY_STOPPED)
458                 return;
459
460         /* Reset RSSI and UUID filters when starting background scanning
461          * since these filters are meant for service discovery only.
462          *
463          * The Start Discovery and Start Service Discovery operations
464          * ensure to set proper values for RSSI threshold and UUID
465          * filter list. So it is safe to just reset them here.
466          */
467         hci_discovery_filter_clear(hdev);
468
469         bt_dev_dbg(hdev, "ADV monitoring is %s",
470                    hci_is_adv_monitoring(hdev) ? "on" : "off");
471
472         if (list_empty(&hdev->pend_le_conns) &&
473             list_empty(&hdev->pend_le_reports) &&
474             !hci_is_adv_monitoring(hdev)) {
475                 /* If there is no pending LE connections or devices
476                  * to be scanned for or no ADV monitors, we should stop the
477                  * background scanning.
478                  */
479
480                 /* If controller is not scanning we are done. */
481                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
482                         return;
483
484                 hci_req_add_le_scan_disable(req, false);
485
486                 bt_dev_dbg(hdev, "stopping background scanning");
487         } else {
488                 /* If there is at least one pending LE connection, we should
489                  * keep the background scan running.
490                  */
491
492                 /* If controller is connecting, we should not start scanning
493                  * since some controllers are not able to scan and connect at
494                  * the same time.
495                  */
496                 if (hci_lookup_le_connect(hdev))
497                         return;
498
499                 /* If controller is currently scanning, we stop it to ensure we
500                  * don't miss any advertising (due to duplicates filter).
501                  */
502                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
503                         hci_req_add_le_scan_disable(req, false);
504
505                 hci_req_add_le_passive_scan(req);
506                 bt_dev_dbg(hdev, "starting background scanning");
507         }
508 }
509
510 void __hci_req_update_name(struct hci_request *req)
511 {
512         struct hci_dev *hdev = req->hdev;
513         struct hci_cp_write_local_name cp;
514
515         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
516
517         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
518 }
519
520 #define PNP_INFO_SVCLASS_ID             0x1200
521
522 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
523 {
524         u8 *ptr = data, *uuids_start = NULL;
525         struct bt_uuid *uuid;
526
527         if (len < 4)
528                 return ptr;
529
530         list_for_each_entry(uuid, &hdev->uuids, list) {
531                 u16 uuid16;
532
533                 if (uuid->size != 16)
534                         continue;
535
536                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
537                 if (uuid16 < 0x1100)
538                         continue;
539
540                 if (uuid16 == PNP_INFO_SVCLASS_ID)
541                         continue;
542
543                 if (!uuids_start) {
544                         uuids_start = ptr;
545                         uuids_start[0] = 1;
546                         uuids_start[1] = EIR_UUID16_ALL;
547                         ptr += 2;
548                 }
549
550                 /* Stop if not enough space to put next UUID */
551                 if ((ptr - data) + sizeof(u16) > len) {
552                         uuids_start[1] = EIR_UUID16_SOME;
553                         break;
554                 }
555
556                 *ptr++ = (uuid16 & 0x00ff);
557                 *ptr++ = (uuid16 & 0xff00) >> 8;
558                 uuids_start[0] += sizeof(uuid16);
559         }
560
561         return ptr;
562 }
563
564 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
565 {
566         u8 *ptr = data, *uuids_start = NULL;
567         struct bt_uuid *uuid;
568
569         if (len < 6)
570                 return ptr;
571
572         list_for_each_entry(uuid, &hdev->uuids, list) {
573                 if (uuid->size != 32)
574                         continue;
575
576                 if (!uuids_start) {
577                         uuids_start = ptr;
578                         uuids_start[0] = 1;
579                         uuids_start[1] = EIR_UUID32_ALL;
580                         ptr += 2;
581                 }
582
583                 /* Stop if not enough space to put next UUID */
584                 if ((ptr - data) + sizeof(u32) > len) {
585                         uuids_start[1] = EIR_UUID32_SOME;
586                         break;
587                 }
588
589                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
590                 ptr += sizeof(u32);
591                 uuids_start[0] += sizeof(u32);
592         }
593
594         return ptr;
595 }
596
597 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
598 {
599         u8 *ptr = data, *uuids_start = NULL;
600         struct bt_uuid *uuid;
601
602         if (len < 18)
603                 return ptr;
604
605         list_for_each_entry(uuid, &hdev->uuids, list) {
606                 if (uuid->size != 128)
607                         continue;
608
609                 if (!uuids_start) {
610                         uuids_start = ptr;
611                         uuids_start[0] = 1;
612                         uuids_start[1] = EIR_UUID128_ALL;
613                         ptr += 2;
614                 }
615
616                 /* Stop if not enough space to put next UUID */
617                 if ((ptr - data) + 16 > len) {
618                         uuids_start[1] = EIR_UUID128_SOME;
619                         break;
620                 }
621
622                 memcpy(ptr, uuid->uuid, 16);
623                 ptr += 16;
624                 uuids_start[0] += 16;
625         }
626
627         return ptr;
628 }
629
630 static void create_eir(struct hci_dev *hdev, u8 *data)
631 {
632         u8 *ptr = data;
633         size_t name_len;
634
635         name_len = strlen(hdev->dev_name);
636
637         if (name_len > 0) {
638                 /* EIR Data type */
639                 if (name_len > 48) {
640                         name_len = 48;
641                         ptr[1] = EIR_NAME_SHORT;
642                 } else
643                         ptr[1] = EIR_NAME_COMPLETE;
644
645                 /* EIR Data length */
646                 ptr[0] = name_len + 1;
647
648                 memcpy(ptr + 2, hdev->dev_name, name_len);
649
650                 ptr += (name_len + 2);
651         }
652
653         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
654                 ptr[0] = 2;
655                 ptr[1] = EIR_TX_POWER;
656                 ptr[2] = (u8) hdev->inq_tx_power;
657
658                 ptr += 3;
659         }
660
661         if (hdev->devid_source > 0) {
662                 ptr[0] = 9;
663                 ptr[1] = EIR_DEVICE_ID;
664
665                 put_unaligned_le16(hdev->devid_source, ptr + 2);
666                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667                 put_unaligned_le16(hdev->devid_product, ptr + 6);
668                 put_unaligned_le16(hdev->devid_version, ptr + 8);
669
670                 ptr += 10;
671         }
672
673         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
676 }
677
678 void __hci_req_update_eir(struct hci_request *req)
679 {
680         struct hci_dev *hdev = req->hdev;
681         struct hci_cp_write_eir cp;
682
683         if (!hdev_is_powered(hdev))
684                 return;
685
686         if (!lmp_ext_inq_capable(hdev))
687                 return;
688
689         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
690                 return;
691
692         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
693                 return;
694
695         memset(&cp, 0, sizeof(cp));
696
697         create_eir(hdev, cp.data);
698
699         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
700                 return;
701
702         memcpy(hdev->eir, cp.data, sizeof(cp.data));
703
704         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
705 }
706
707 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
708 {
709         struct hci_dev *hdev = req->hdev;
710
711         if (hdev->scanning_paused) {
712                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
713                 return;
714         }
715
716         if (hdev->suspended)
717                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
718
719         if (use_ext_scan(hdev)) {
720                 struct hci_cp_le_set_ext_scan_enable cp;
721
722                 memset(&cp, 0, sizeof(cp));
723                 cp.enable = LE_SCAN_DISABLE;
724                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
725                             &cp);
726         } else {
727                 struct hci_cp_le_set_scan_enable cp;
728
729                 memset(&cp, 0, sizeof(cp));
730                 cp.enable = LE_SCAN_DISABLE;
731                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
732         }
733
734         /* Disable address resolution */
735         if (use_ll_privacy(hdev) &&
736             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
737             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
738                 __u8 enable = 0x00;
739
740                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
741         }
742 }
743
744 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
745                                 u8 bdaddr_type)
746 {
747         struct hci_cp_le_del_from_white_list cp;
748
749         cp.bdaddr_type = bdaddr_type;
750         bacpy(&cp.bdaddr, bdaddr);
751
752         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
753                    cp.bdaddr_type);
754         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
755
756         if (use_ll_privacy(req->hdev) &&
757             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
758                 struct smp_irk *irk;
759
760                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
761                 if (irk) {
762                         struct hci_cp_le_del_from_resolv_list cp;
763
764                         cp.bdaddr_type = bdaddr_type;
765                         bacpy(&cp.bdaddr, bdaddr);
766
767                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
768                                     sizeof(cp), &cp);
769                 }
770         }
771 }
772
773 /* Adds connection to white list if needed. On error, returns -1. */
774 static int add_to_white_list(struct hci_request *req,
775                              struct hci_conn_params *params, u8 *num_entries,
776                              bool allow_rpa)
777 {
778         struct hci_cp_le_add_to_white_list cp;
779         struct hci_dev *hdev = req->hdev;
780
781         /* Already in white list */
782         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
783                                    params->addr_type))
784                 return 0;
785
786         /* Select filter policy to accept all advertising */
787         if (*num_entries >= hdev->le_white_list_size)
788                 return -1;
789
790         /* White list can not be used with RPAs */
791         if (!allow_rpa &&
792             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
793             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
794                 return -1;
795         }
796
797         /* During suspend, only wakeable devices can be in whitelist */
798         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
799                                                    params->current_flags))
800                 return 0;
801
802         *num_entries += 1;
803         cp.bdaddr_type = params->addr_type;
804         bacpy(&cp.bdaddr, &params->addr);
805
806         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
807                    cp.bdaddr_type);
808         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
809
810         if (use_ll_privacy(hdev) &&
811             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
812                 struct smp_irk *irk;
813
814                 irk = hci_find_irk_by_addr(hdev, &params->addr,
815                                            params->addr_type);
816                 if (irk) {
817                         struct hci_cp_le_add_to_resolv_list cp;
818
819                         cp.bdaddr_type = params->addr_type;
820                         bacpy(&cp.bdaddr, &params->addr);
821                         memcpy(cp.peer_irk, irk->val, 16);
822
823                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
824                                 memcpy(cp.local_irk, hdev->irk, 16);
825                         else
826                                 memset(cp.local_irk, 0, 16);
827
828                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
829                                     sizeof(cp), &cp);
830                 }
831         }
832
833         return 0;
834 }
835
836 static u8 update_white_list(struct hci_request *req)
837 {
838         struct hci_dev *hdev = req->hdev;
839         struct hci_conn_params *params;
840         struct bdaddr_list *b;
841         u8 num_entries = 0;
842         bool pend_conn, pend_report;
843         /* We allow whitelisting even with RPAs in suspend. In the worst case,
844          * we won't be able to wake from devices that use the privacy1.2
845          * features. Additionally, once we support privacy1.2 and IRK
846          * offloading, we can update this to also check for those conditions.
847          */
848         bool allow_rpa = hdev->suspended;
849
850         /* Go through the current white list programmed into the
851          * controller one by one and check if that address is still
852          * in the list of pending connections or list of devices to
853          * report. If not present in either list, then queue the
854          * command to remove it from the controller.
855          */
856         list_for_each_entry(b, &hdev->le_white_list, list) {
857                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
858                                                       &b->bdaddr,
859                                                       b->bdaddr_type);
860                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
861                                                         &b->bdaddr,
862                                                         b->bdaddr_type);
863
864                 /* If the device is not likely to connect or report,
865                  * remove it from the whitelist.
866                  */
867                 if (!pend_conn && !pend_report) {
868                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
869                         continue;
870                 }
871
872                 /* White list can not be used with RPAs */
873                 if (!allow_rpa &&
874                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
875                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
876                         return 0x00;
877                 }
878
879                 num_entries++;
880         }
881
882         /* Since all no longer valid white list entries have been
883          * removed, walk through the list of pending connections
884          * and ensure that any new device gets programmed into
885          * the controller.
886          *
887          * If the list of the devices is larger than the list of
888          * available white list entries in the controller, then
889          * just abort and return filer policy value to not use the
890          * white list.
891          */
892         list_for_each_entry(params, &hdev->pend_le_conns, action) {
893                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
894                         return 0x00;
895         }
896
897         /* After adding all new pending connections, walk through
898          * the list of pending reports and also add these to the
899          * white list if there is still space. Abort if space runs out.
900          */
901         list_for_each_entry(params, &hdev->pend_le_reports, action) {
902                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
903                         return 0x00;
904         }
905
906         /* Use the allowlist unless the following conditions are all true:
907          * - We are not currently suspending
908          * - There are 1 or more ADV monitors registered and it's not offloaded
909          * - Interleaved scanning is not currently using the allowlist
910          */
911         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
912             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
913             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
914                 return 0x00;
915
916         /* Select filter policy to use white list */
917         return 0x01;
918 }
919
920 static bool scan_use_rpa(struct hci_dev *hdev)
921 {
922         return hci_dev_test_flag(hdev, HCI_PRIVACY);
923 }
924
925 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
926                                u16 window, u8 own_addr_type, u8 filter_policy,
927                                bool addr_resolv)
928 {
929         struct hci_dev *hdev = req->hdev;
930
931         if (hdev->scanning_paused) {
932                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
933                 return;
934         }
935
936         if (use_ll_privacy(hdev) &&
937             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
938             addr_resolv) {
939                 u8 enable = 0x01;
940
941                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
942         }
943
944         /* Use ext scanning if set ext scan param and ext scan enable is
945          * supported
946          */
947         if (use_ext_scan(hdev)) {
948                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
949                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
950                 struct hci_cp_le_scan_phy_params *phy_params;
951                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
952                 u32 plen;
953
954                 ext_param_cp = (void *)data;
955                 phy_params = (void *)ext_param_cp->data;
956
957                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
958                 ext_param_cp->own_addr_type = own_addr_type;
959                 ext_param_cp->filter_policy = filter_policy;
960
961                 plen = sizeof(*ext_param_cp);
962
963                 if (scan_1m(hdev) || scan_2m(hdev)) {
964                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
965
966                         memset(phy_params, 0, sizeof(*phy_params));
967                         phy_params->type = type;
968                         phy_params->interval = cpu_to_le16(interval);
969                         phy_params->window = cpu_to_le16(window);
970
971                         plen += sizeof(*phy_params);
972                         phy_params++;
973                 }
974
975                 if (scan_coded(hdev)) {
976                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
977
978                         memset(phy_params, 0, sizeof(*phy_params));
979                         phy_params->type = type;
980                         phy_params->interval = cpu_to_le16(interval);
981                         phy_params->window = cpu_to_le16(window);
982
983                         plen += sizeof(*phy_params);
984                         phy_params++;
985                 }
986
987                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
988                             plen, ext_param_cp);
989
990                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
991                 ext_enable_cp.enable = LE_SCAN_ENABLE;
992                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
993
994                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
995                             sizeof(ext_enable_cp), &ext_enable_cp);
996         } else {
997                 struct hci_cp_le_set_scan_param param_cp;
998                 struct hci_cp_le_set_scan_enable enable_cp;
999
1000                 memset(&param_cp, 0, sizeof(param_cp));
1001                 param_cp.type = type;
1002                 param_cp.interval = cpu_to_le16(interval);
1003                 param_cp.window = cpu_to_le16(window);
1004                 param_cp.own_address_type = own_addr_type;
1005                 param_cp.filter_policy = filter_policy;
1006                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1007                             &param_cp);
1008
1009                 memset(&enable_cp, 0, sizeof(enable_cp));
1010                 enable_cp.enable = LE_SCAN_ENABLE;
1011                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1012                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1013                             &enable_cp);
1014         }
1015 }
1016
1017 /* Returns true if an le connection is in the scanning state */
1018 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1019 {
1020         struct hci_conn_hash *h = &hdev->conn_hash;
1021         struct hci_conn  *c;
1022
1023         rcu_read_lock();
1024
1025         list_for_each_entry_rcu(c, &h->list, list) {
1026                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1027                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
1028                         rcu_read_unlock();
1029                         return true;
1030                 }
1031         }
1032
1033         rcu_read_unlock();
1034
1035         return false;
1036 }
1037
1038 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1039  * controller based address resolution to be able to reconfigure
1040  * resolving list.
1041  */
1042 void hci_req_add_le_passive_scan(struct hci_request *req)
1043 {
1044         struct hci_dev *hdev = req->hdev;
1045         u8 own_addr_type;
1046         u8 filter_policy;
1047         u16 window, interval;
1048         /* Background scanning should run with address resolution */
1049         bool addr_resolv = true;
1050
1051         if (hdev->scanning_paused) {
1052                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1053                 return;
1054         }
1055
1056         /* Set require_privacy to false since no SCAN_REQ are send
1057          * during passive scanning. Not using an non-resolvable address
1058          * here is important so that peer devices using direct
1059          * advertising with our address will be correctly reported
1060          * by the controller.
1061          */
1062         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1063                                       &own_addr_type))
1064                 return;
1065
1066         if (hdev->enable_advmon_interleave_scan &&
1067             __hci_update_interleaved_scan(hdev))
1068                 return;
1069
1070         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1071         /* Adding or removing entries from the white list must
1072          * happen before enabling scanning. The controller does
1073          * not allow white list modification while scanning.
1074          */
1075         filter_policy = update_white_list(req);
1076
1077         /* When the controller is using random resolvable addresses and
1078          * with that having LE privacy enabled, then controllers with
1079          * Extended Scanner Filter Policies support can now enable support
1080          * for handling directed advertising.
1081          *
1082          * So instead of using filter polices 0x00 (no whitelist)
1083          * and 0x01 (whitelist enabled) use the new filter policies
1084          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
1085          */
1086         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1087             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1088                 filter_policy |= 0x02;
1089
1090         if (hdev->suspended) {
1091                 window = hdev->le_scan_window_suspend;
1092                 interval = hdev->le_scan_int_suspend;
1093
1094                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1095         } else if (hci_is_le_conn_scanning(hdev)) {
1096                 window = hdev->le_scan_window_connect;
1097                 interval = hdev->le_scan_int_connect;
1098         } else if (hci_is_adv_monitoring(hdev)) {
1099                 window = hdev->le_scan_window_adv_monitor;
1100                 interval = hdev->le_scan_int_adv_monitor;
1101         } else {
1102                 window = hdev->le_scan_window;
1103                 interval = hdev->le_scan_interval;
1104         }
1105
1106         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1107         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1108                            own_addr_type, filter_policy, addr_resolv);
1109 }
1110
1111 static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1112 {
1113         struct adv_info *adv_instance;
1114
1115         /* Instance 0x00 always set local name */
1116         if (instance == 0x00)
1117                 return true;
1118
1119         adv_instance = hci_find_adv_instance(hdev, instance);
1120         if (!adv_instance)
1121                 return false;
1122
1123         if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1124             adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1125                 return true;
1126
1127         return adv_instance->scan_rsp_len ? true : false;
1128 }
1129
1130 static void hci_req_clear_event_filter(struct hci_request *req)
1131 {
1132         struct hci_cp_set_event_filter f;
1133
1134         memset(&f, 0, sizeof(f));
1135         f.flt_type = HCI_FLT_CLEAR_ALL;
1136         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1137
1138         /* Update page scan state (since we may have modified it when setting
1139          * the event filter).
1140          */
1141         __hci_req_update_scan(req);
1142 }
1143
1144 static void hci_req_set_event_filter(struct hci_request *req)
1145 {
1146         struct bdaddr_list_with_flags *b;
1147         struct hci_cp_set_event_filter f;
1148         struct hci_dev *hdev = req->hdev;
1149         u8 scan = SCAN_DISABLED;
1150
1151         /* Always clear event filter when starting */
1152         hci_req_clear_event_filter(req);
1153
1154         list_for_each_entry(b, &hdev->whitelist, list) {
1155                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1156                                         b->current_flags))
1157                         continue;
1158
1159                 memset(&f, 0, sizeof(f));
1160                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1161                 f.flt_type = HCI_FLT_CONN_SETUP;
1162                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1163                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1164
1165                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1166                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1167                 scan = SCAN_PAGE;
1168         }
1169
1170         if (scan)
1171                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1172         else
1173                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1174
1175         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1176 }
1177
1178 static void cancel_adv_timeout(struct hci_dev *hdev)
1179 {
1180         if (hdev->adv_instance_timeout) {
1181                 hdev->adv_instance_timeout = 0;
1182                 cancel_delayed_work(&hdev->adv_instance_expire);
1183         }
1184 }
1185
1186 /* This function requires the caller holds hdev->lock */
1187 void __hci_req_pause_adv_instances(struct hci_request *req)
1188 {
1189         bt_dev_dbg(req->hdev, "Pausing advertising instances");
1190
1191         /* Call to disable any advertisements active on the controller.
1192          * This will succeed even if no advertisements are configured.
1193          */
1194         __hci_req_disable_advertising(req);
1195
1196         /* If we are using software rotation, pause the loop */
1197         if (!ext_adv_capable(req->hdev))
1198                 cancel_adv_timeout(req->hdev);
1199 }
1200
1201 /* This function requires the caller holds hdev->lock */
1202 static void __hci_req_resume_adv_instances(struct hci_request *req)
1203 {
1204         struct adv_info *adv;
1205
1206         bt_dev_dbg(req->hdev, "Resuming advertising instances");
1207
1208         if (ext_adv_capable(req->hdev)) {
1209                 /* Call for each tracked instance to be re-enabled */
1210                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1211                         __hci_req_enable_ext_advertising(req,
1212                                                          adv->instance);
1213                 }
1214
1215         } else {
1216                 /* Schedule for most recent instance to be restarted and begin
1217                  * the software rotation loop
1218                  */
1219                 __hci_req_schedule_adv_instance(req,
1220                                                 req->hdev->cur_adv_instance,
1221                                                 true);
1222         }
1223 }
1224
1225 /* This function requires the caller holds hdev->lock */
1226 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1227 {
1228         struct hci_request req;
1229
1230         hci_req_init(&req, hdev);
1231         __hci_req_resume_adv_instances(&req);
1232
1233         return hci_req_run(&req, NULL);
1234 }
1235
1236 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1237 {
1238         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1239                    status);
1240         if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1241             test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1242                 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1243                 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1244                 wake_up(&hdev->suspend_wait_q);
1245         }
1246
1247         if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1248                 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1249                 wake_up(&hdev->suspend_wait_q);
1250         }
1251 }
1252
1253 static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
1254                                               bool enable)
1255 {
1256         struct hci_dev *hdev = req->hdev;
1257
1258         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1259         case HCI_ADV_MONITOR_EXT_MSFT:
1260                 msft_req_add_set_filter_enable(req, enable);
1261                 break;
1262         default:
1263                 return;
1264         }
1265
1266         /* No need to block when enabling since it's on resume path */
1267         if (hdev->suspended && !enable)
1268                 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1269 }
1270
1271 /* Call with hci_dev_lock */
1272 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1273 {
1274         int old_state;
1275         struct hci_conn *conn;
1276         struct hci_request req;
1277         u8 page_scan;
1278         int disconnect_counter;
1279
1280         if (next == hdev->suspend_state) {
1281                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1282                 goto done;
1283         }
1284
1285         hdev->suspend_state = next;
1286         hci_req_init(&req, hdev);
1287
1288         if (next == BT_SUSPEND_DISCONNECT) {
1289                 /* Mark device as suspended */
1290                 hdev->suspended = true;
1291
1292                 /* Pause discovery if not already stopped */
1293                 old_state = hdev->discovery.state;
1294                 if (old_state != DISCOVERY_STOPPED) {
1295                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1296                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1297                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1298                 }
1299
1300                 hdev->discovery_paused = true;
1301                 hdev->discovery_old_state = old_state;
1302
1303                 /* Stop directed advertising */
1304                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1305                 if (old_state) {
1306                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1307                         cancel_delayed_work(&hdev->discov_off);
1308                         queue_delayed_work(hdev->req_workqueue,
1309                                            &hdev->discov_off, 0);
1310                 }
1311
1312                 /* Pause other advertisements */
1313                 if (hdev->adv_instance_cnt)
1314                         __hci_req_pause_adv_instances(&req);
1315
1316                 hdev->advertising_paused = true;
1317                 hdev->advertising_old_state = old_state;
1318                 /* Disable page scan */
1319                 page_scan = SCAN_DISABLED;
1320                 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1321
1322                 /* Disable LE passive scan if enabled */
1323                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1324                         cancel_interleave_scan(hdev);
1325                         hci_req_add_le_scan_disable(&req, false);
1326                 }
1327
1328                 /* Disable advertisement filters */
1329                 hci_req_add_set_adv_filter_enable(&req, false);
1330
1331                 /* Mark task needing completion */
1332                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1333
1334                 /* Prevent disconnects from causing scanning to be re-enabled */
1335                 hdev->scanning_paused = true;
1336
1337                 /* Run commands before disconnecting */
1338                 hci_req_run(&req, suspend_req_complete);
1339
1340                 disconnect_counter = 0;
1341                 /* Soft disconnect everything (power off) */
1342                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1343                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1344                         disconnect_counter++;
1345                 }
1346
1347                 if (disconnect_counter > 0) {
1348                         bt_dev_dbg(hdev,
1349                                    "Had %d disconnects. Will wait on them",
1350                                    disconnect_counter);
1351                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1352                 }
1353         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1354                 /* Unpause to take care of updating scanning params */
1355                 hdev->scanning_paused = false;
1356                 /* Enable event filter for paired devices */
1357                 hci_req_set_event_filter(&req);
1358                 /* Enable passive scan at lower duty cycle */
1359                 __hci_update_background_scan(&req);
1360                 /* Pause scan changes again. */
1361                 hdev->scanning_paused = true;
1362                 hci_req_run(&req, suspend_req_complete);
1363         } else {
1364                 hdev->suspended = false;
1365                 hdev->scanning_paused = false;
1366
1367                 hci_req_clear_event_filter(&req);
1368                 /* Reset passive/background scanning to normal */
1369                 __hci_update_background_scan(&req);
1370                 /* Enable all of the advertisement filters */
1371                 hci_req_add_set_adv_filter_enable(&req, true);
1372
1373                 /* Unpause directed advertising */
1374                 hdev->advertising_paused = false;
1375                 if (hdev->advertising_old_state) {
1376                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1377                                 hdev->suspend_tasks);
1378                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1379                         queue_work(hdev->req_workqueue,
1380                                    &hdev->discoverable_update);
1381                         hdev->advertising_old_state = 0;
1382                 }
1383
1384                 /* Resume other advertisements */
1385                 if (hdev->adv_instance_cnt)
1386                         __hci_req_resume_adv_instances(&req);
1387
1388                 /* Unpause discovery */
1389                 hdev->discovery_paused = false;
1390                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1391                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1392                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1393                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1394                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1395                 }
1396
1397                 hci_req_run(&req, suspend_req_complete);
1398         }
1399
1400         hdev->suspend_state = next;
1401
1402 done:
1403         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1404         wake_up(&hdev->suspend_wait_q);
1405 }
1406
1407 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1408 {
1409         return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1410 }
1411
1412 void __hci_req_disable_advertising(struct hci_request *req)
1413 {
1414         if (ext_adv_capable(req->hdev)) {
1415                 __hci_req_disable_ext_adv_instance(req, 0x00);
1416
1417         } else {
1418                 u8 enable = 0x00;
1419
1420                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1421         }
1422 }
1423
1424 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1425 {
1426         u32 flags;
1427         struct adv_info *adv_instance;
1428
1429         if (instance == 0x00) {
1430                 /* Instance 0 always manages the "Tx Power" and "Flags"
1431                  * fields
1432                  */
1433                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1434
1435                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1436                  * corresponds to the "connectable" instance flag.
1437                  */
1438                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1439                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1440
1441                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1442                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1443                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1444                         flags |= MGMT_ADV_FLAG_DISCOV;
1445
1446                 return flags;
1447         }
1448
1449         adv_instance = hci_find_adv_instance(hdev, instance);
1450
1451         /* Return 0 when we got an invalid instance identifier. */
1452         if (!adv_instance)
1453                 return 0;
1454
1455         return adv_instance->flags;
1456 }
1457
1458 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1459 {
1460         /* If privacy is not enabled don't use RPA */
1461         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1462                 return false;
1463
1464         /* If basic privacy mode is enabled use RPA */
1465         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1466                 return true;
1467
1468         /* If limited privacy mode is enabled don't use RPA if we're
1469          * both discoverable and bondable.
1470          */
1471         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1472             hci_dev_test_flag(hdev, HCI_BONDABLE))
1473                 return false;
1474
1475         /* We're neither bondable nor discoverable in the limited
1476          * privacy mode, therefore use RPA.
1477          */
1478         return true;
1479 }
1480
1481 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1482 {
1483         /* If there is no connection we are OK to advertise. */
1484         if (hci_conn_num(hdev, LE_LINK) == 0)
1485                 return true;
1486
1487         /* Check le_states if there is any connection in slave role. */
1488         if (hdev->conn_hash.le_num_slave > 0) {
1489                 /* Slave connection state and non connectable mode bit 20. */
1490                 if (!connectable && !(hdev->le_states[2] & 0x10))
1491                         return false;
1492
1493                 /* Slave connection state and connectable mode bit 38
1494                  * and scannable bit 21.
1495                  */
1496                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1497                                     !(hdev->le_states[2] & 0x20)))
1498                         return false;
1499         }
1500
1501         /* Check le_states if there is any connection in master role. */
1502         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1503                 /* Master connection state and non connectable mode bit 18. */
1504                 if (!connectable && !(hdev->le_states[2] & 0x02))
1505                         return false;
1506
1507                 /* Master connection state and connectable mode bit 35 and
1508                  * scannable 19.
1509                  */
1510                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1511                                     !(hdev->le_states[2] & 0x08)))
1512                         return false;
1513         }
1514
1515         return true;
1516 }
1517
1518 void __hci_req_enable_advertising(struct hci_request *req)
1519 {
1520         struct hci_dev *hdev = req->hdev;
1521         struct adv_info *adv_instance;
1522         struct hci_cp_le_set_adv_param cp;
1523         u8 own_addr_type, enable = 0x01;
1524         bool connectable;
1525         u16 adv_min_interval, adv_max_interval;
1526         u32 flags;
1527
1528         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1529         adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1530
1531         /* If the "connectable" instance flag was not set, then choose between
1532          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1533          */
1534         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1535                       mgmt_get_connectable(hdev);
1536
1537         if (!is_advertising_allowed(hdev, connectable))
1538                 return;
1539
1540         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1541                 __hci_req_disable_advertising(req);
1542
1543         /* Clear the HCI_LE_ADV bit temporarily so that the
1544          * hci_update_random_address knows that it's safe to go ahead
1545          * and write a new random address. The flag will be set back on
1546          * as soon as the SET_ADV_ENABLE HCI command completes.
1547          */
1548         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1549
1550         /* Set require_privacy to true only when non-connectable
1551          * advertising is used. In that case it is fine to use a
1552          * non-resolvable private address.
1553          */
1554         if (hci_update_random_address(req, !connectable,
1555                                       adv_use_rpa(hdev, flags),
1556                                       &own_addr_type) < 0)
1557                 return;
1558
1559         memset(&cp, 0, sizeof(cp));
1560
1561         if (adv_instance) {
1562                 adv_min_interval = adv_instance->min_interval;
1563                 adv_max_interval = adv_instance->max_interval;
1564         } else {
1565                 adv_min_interval = hdev->le_adv_min_interval;
1566                 adv_max_interval = hdev->le_adv_max_interval;
1567         }
1568
1569         if (connectable) {
1570                 cp.type = LE_ADV_IND;
1571         } else {
1572                 if (adv_cur_instance_is_scannable(hdev))
1573                         cp.type = LE_ADV_SCAN_IND;
1574                 else
1575                         cp.type = LE_ADV_NONCONN_IND;
1576
1577                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1578                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1579                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1580                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1581                 }
1582         }
1583
1584         cp.min_interval = cpu_to_le16(adv_min_interval);
1585         cp.max_interval = cpu_to_le16(adv_max_interval);
1586         cp.own_address_type = own_addr_type;
1587         cp.channel_map = hdev->le_adv_channel_map;
1588
1589         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1590
1591         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1592 }
1593
1594 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1595 {
1596         size_t short_len;
1597         size_t complete_len;
1598
1599         /* no space left for name (+ NULL + type + len) */
1600         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1601                 return ad_len;
1602
1603         /* use complete name if present and fits */
1604         complete_len = strlen(hdev->dev_name);
1605         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1606                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1607                                        hdev->dev_name, complete_len + 1);
1608
1609         /* use short name if present */
1610         short_len = strlen(hdev->short_name);
1611         if (short_len)
1612                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1613                                        hdev->short_name, short_len + 1);
1614
1615         /* use shortened full name if present, we already know that name
1616          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1617          */
1618         if (complete_len) {
1619                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1620
1621                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1622                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1623
1624                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1625                                        sizeof(name));
1626         }
1627
1628         return ad_len;
1629 }
1630
1631 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1632 {
1633         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1634 }
1635
1636 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1637 {
1638         u8 scan_rsp_len = 0;
1639
1640         if (hdev->appearance) {
1641                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1642         }
1643
1644         return append_local_name(hdev, ptr, scan_rsp_len);
1645 }
1646
1647 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1648                                         u8 *ptr)
1649 {
1650         struct adv_info *adv_instance;
1651         u32 instance_flags;
1652         u8 scan_rsp_len = 0;
1653
1654         adv_instance = hci_find_adv_instance(hdev, instance);
1655         if (!adv_instance)
1656                 return 0;
1657
1658         instance_flags = adv_instance->flags;
1659
1660         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1661                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1662         }
1663
1664         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1665                adv_instance->scan_rsp_len);
1666
1667         scan_rsp_len += adv_instance->scan_rsp_len;
1668
1669         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1670                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1671
1672         return scan_rsp_len;
1673 }
1674
1675 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1676 {
1677         struct hci_dev *hdev = req->hdev;
1678         u8 len;
1679
1680         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1681                 return;
1682
1683         if (ext_adv_capable(hdev)) {
1684                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1685
1686                 memset(&cp, 0, sizeof(cp));
1687
1688                 if (instance)
1689                         len = create_instance_scan_rsp_data(hdev, instance,
1690                                                             cp.data);
1691                 else
1692                         len = create_default_scan_rsp_data(hdev, cp.data);
1693
1694                 if (hdev->scan_rsp_data_len == len &&
1695                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1696                         return;
1697
1698                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1699                 hdev->scan_rsp_data_len = len;
1700
1701                 cp.handle = instance;
1702                 cp.length = len;
1703                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1704                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1705
1706                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1707                             &cp);
1708         } else {
1709                 struct hci_cp_le_set_scan_rsp_data cp;
1710
1711                 memset(&cp, 0, sizeof(cp));
1712
1713                 if (instance)
1714                         len = create_instance_scan_rsp_data(hdev, instance,
1715                                                             cp.data);
1716                 else
1717                         len = create_default_scan_rsp_data(hdev, cp.data);
1718
1719                 if (hdev->scan_rsp_data_len == len &&
1720                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1721                         return;
1722
1723                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1724                 hdev->scan_rsp_data_len = len;
1725
1726                 cp.length = len;
1727
1728                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1729         }
1730 }
1731
1732 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1733 {
1734         struct adv_info *adv_instance = NULL;
1735         u8 ad_len = 0, flags = 0;
1736         u32 instance_flags;
1737
1738         /* Return 0 when the current instance identifier is invalid. */
1739         if (instance) {
1740                 adv_instance = hci_find_adv_instance(hdev, instance);
1741                 if (!adv_instance)
1742                         return 0;
1743         }
1744
1745         instance_flags = get_adv_instance_flags(hdev, instance);
1746
1747         /* If instance already has the flags set skip adding it once
1748          * again.
1749          */
1750         if (adv_instance && eir_get_data(adv_instance->adv_data,
1751                                          adv_instance->adv_data_len, EIR_FLAGS,
1752                                          NULL))
1753                 goto skip_flags;
1754
1755         /* The Add Advertising command allows userspace to set both the general
1756          * and limited discoverable flags.
1757          */
1758         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1759                 flags |= LE_AD_GENERAL;
1760
1761         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1762                 flags |= LE_AD_LIMITED;
1763
1764         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1765                 flags |= LE_AD_NO_BREDR;
1766
1767         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1768                 /* If a discovery flag wasn't provided, simply use the global
1769                  * settings.
1770                  */
1771                 if (!flags)
1772                         flags |= mgmt_get_adv_discov_flags(hdev);
1773
1774                 /* If flags would still be empty, then there is no need to
1775                  * include the "Flags" AD field".
1776                  */
1777                 if (flags) {
1778                         ptr[0] = 0x02;
1779                         ptr[1] = EIR_FLAGS;
1780                         ptr[2] = flags;
1781
1782                         ad_len += 3;
1783                         ptr += 3;
1784                 }
1785         }
1786
1787 skip_flags:
1788         if (adv_instance) {
1789                 memcpy(ptr, adv_instance->adv_data,
1790                        adv_instance->adv_data_len);
1791                 ad_len += adv_instance->adv_data_len;
1792                 ptr += adv_instance->adv_data_len;
1793         }
1794
1795         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1796                 s8 adv_tx_power;
1797
1798                 if (ext_adv_capable(hdev)) {
1799                         if (adv_instance)
1800                                 adv_tx_power = adv_instance->tx_power;
1801                         else
1802                                 adv_tx_power = hdev->adv_tx_power;
1803                 } else {
1804                         adv_tx_power = hdev->adv_tx_power;
1805                 }
1806
1807                 /* Provide Tx Power only if we can provide a valid value for it */
1808                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1809                         ptr[0] = 0x02;
1810                         ptr[1] = EIR_TX_POWER;
1811                         ptr[2] = (u8)adv_tx_power;
1812
1813                         ad_len += 3;
1814                         ptr += 3;
1815                 }
1816         }
1817
1818         return ad_len;
1819 }
1820
1821 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1822 {
1823         struct hci_dev *hdev = req->hdev;
1824         u8 len;
1825
1826         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1827                 return;
1828
1829         if (ext_adv_capable(hdev)) {
1830                 struct hci_cp_le_set_ext_adv_data cp;
1831
1832                 memset(&cp, 0, sizeof(cp));
1833
1834                 len = create_instance_adv_data(hdev, instance, cp.data);
1835
1836                 /* There's nothing to do if the data hasn't changed */
1837                 if (hdev->adv_data_len == len &&
1838                     memcmp(cp.data, hdev->adv_data, len) == 0)
1839                         return;
1840
1841                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1842                 hdev->adv_data_len = len;
1843
1844                 cp.length = len;
1845                 cp.handle = instance;
1846                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1847                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1848
1849                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1850         } else {
1851                 struct hci_cp_le_set_adv_data cp;
1852
1853                 memset(&cp, 0, sizeof(cp));
1854
1855                 len = create_instance_adv_data(hdev, instance, cp.data);
1856
1857                 /* There's nothing to do if the data hasn't changed */
1858                 if (hdev->adv_data_len == len &&
1859                     memcmp(cp.data, hdev->adv_data, len) == 0)
1860                         return;
1861
1862                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1863                 hdev->adv_data_len = len;
1864
1865                 cp.length = len;
1866
1867                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1868         }
1869 }
1870
1871 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1872 {
1873         struct hci_request req;
1874
1875         hci_req_init(&req, hdev);
1876         __hci_req_update_adv_data(&req, instance);
1877
1878         return hci_req_run(&req, NULL);
1879 }
1880
1881 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1882                                             u16 opcode)
1883 {
1884         BT_DBG("%s status %u", hdev->name, status);
1885 }
1886
1887 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1888 {
1889         struct hci_request req;
1890         __u8 enable = 0x00;
1891
1892         if (!use_ll_privacy(hdev) &&
1893             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1894                 return;
1895
1896         hci_req_init(&req, hdev);
1897
1898         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1899
1900         hci_req_run(&req, enable_addr_resolution_complete);
1901 }
1902
1903 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1904 {
1905         bt_dev_dbg(hdev, "status %u", status);
1906 }
1907
1908 void hci_req_reenable_advertising(struct hci_dev *hdev)
1909 {
1910         struct hci_request req;
1911
1912         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1913             list_empty(&hdev->adv_instances))
1914                 return;
1915
1916         hci_req_init(&req, hdev);
1917
1918         if (hdev->cur_adv_instance) {
1919                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1920                                                 true);
1921         } else {
1922                 if (ext_adv_capable(hdev)) {
1923                         __hci_req_start_ext_adv(&req, 0x00);
1924                 } else {
1925                         __hci_req_update_adv_data(&req, 0x00);
1926                         __hci_req_update_scan_rsp_data(&req, 0x00);
1927                         __hci_req_enable_advertising(&req);
1928                 }
1929         }
1930
1931         hci_req_run(&req, adv_enable_complete);
1932 }
1933
1934 static void adv_timeout_expire(struct work_struct *work)
1935 {
1936         struct hci_dev *hdev = container_of(work, struct hci_dev,
1937                                             adv_instance_expire.work);
1938
1939         struct hci_request req;
1940         u8 instance;
1941
1942         bt_dev_dbg(hdev, "");
1943
1944         hci_dev_lock(hdev);
1945
1946         hdev->adv_instance_timeout = 0;
1947
1948         instance = hdev->cur_adv_instance;
1949         if (instance == 0x00)
1950                 goto unlock;
1951
1952         hci_req_init(&req, hdev);
1953
1954         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1955
1956         if (list_empty(&hdev->adv_instances))
1957                 __hci_req_disable_advertising(&req);
1958
1959         hci_req_run(&req, NULL);
1960
1961 unlock:
1962         hci_dev_unlock(hdev);
1963 }
1964
1965 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1966                                            unsigned long opt)
1967 {
1968         struct hci_dev *hdev = req->hdev;
1969         int ret = 0;
1970
1971         hci_dev_lock(hdev);
1972
1973         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1974                 hci_req_add_le_scan_disable(req, false);
1975         hci_req_add_le_passive_scan(req);
1976
1977         switch (hdev->interleave_scan_state) {
1978         case INTERLEAVE_SCAN_ALLOWLIST:
1979                 bt_dev_dbg(hdev, "next state: allowlist");
1980                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1981                 break;
1982         case INTERLEAVE_SCAN_NO_FILTER:
1983                 bt_dev_dbg(hdev, "next state: no filter");
1984                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1985                 break;
1986         case INTERLEAVE_SCAN_NONE:
1987                 BT_ERR("unexpected error");
1988                 ret = -1;
1989         }
1990
1991         hci_dev_unlock(hdev);
1992
1993         return ret;
1994 }
1995
1996 static void interleave_scan_work(struct work_struct *work)
1997 {
1998         struct hci_dev *hdev = container_of(work, struct hci_dev,
1999                                             interleave_scan.work);
2000         u8 status;
2001         unsigned long timeout;
2002
2003         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
2004                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
2005         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
2006                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
2007         } else {
2008                 bt_dev_err(hdev, "unexpected error");
2009                 return;
2010         }
2011
2012         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
2013                      HCI_CMD_TIMEOUT, &status);
2014
2015         /* Don't continue interleaving if it was canceled */
2016         if (is_interleave_scanning(hdev))
2017                 queue_delayed_work(hdev->req_workqueue,
2018                                    &hdev->interleave_scan, timeout);
2019 }
2020
2021 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2022                            bool use_rpa, struct adv_info *adv_instance,
2023                            u8 *own_addr_type, bdaddr_t *rand_addr)
2024 {
2025         int err;
2026
2027         bacpy(rand_addr, BDADDR_ANY);
2028
2029         /* If privacy is enabled use a resolvable private address. If
2030          * current RPA has expired then generate a new one.
2031          */
2032         if (use_rpa) {
2033                 int to;
2034
2035                 /* If Controller supports LL Privacy use own address type is
2036                  * 0x03
2037                  */
2038                 if (use_ll_privacy(hdev))
2039                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2040                 else
2041                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2042
2043                 if (adv_instance) {
2044                         if (!adv_instance->rpa_expired &&
2045                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
2046                                 return 0;
2047
2048                         adv_instance->rpa_expired = false;
2049                 } else {
2050                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2051                             !bacmp(&hdev->random_addr, &hdev->rpa))
2052                                 return 0;
2053                 }
2054
2055                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2056                 if (err < 0) {
2057                         bt_dev_err(hdev, "failed to generate new RPA");
2058                         return err;
2059                 }
2060
2061                 bacpy(rand_addr, &hdev->rpa);
2062
2063                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2064                 if (adv_instance)
2065                         queue_delayed_work(hdev->workqueue,
2066                                            &adv_instance->rpa_expired_cb, to);
2067                 else
2068                         queue_delayed_work(hdev->workqueue,
2069                                            &hdev->rpa_expired, to);
2070
2071                 return 0;
2072         }
2073
2074         /* In case of required privacy without resolvable private address,
2075          * use an non-resolvable private address. This is useful for
2076          * non-connectable advertising.
2077          */
2078         if (require_privacy) {
2079                 bdaddr_t nrpa;
2080
2081                 while (true) {
2082                         /* The non-resolvable private address is generated
2083                          * from random six bytes with the two most significant
2084                          * bits cleared.
2085                          */
2086                         get_random_bytes(&nrpa, 6);
2087                         nrpa.b[5] &= 0x3f;
2088
2089                         /* The non-resolvable private address shall not be
2090                          * equal to the public address.
2091                          */
2092                         if (bacmp(&hdev->bdaddr, &nrpa))
2093                                 break;
2094                 }
2095
2096                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2097                 bacpy(rand_addr, &nrpa);
2098
2099                 return 0;
2100         }
2101
2102         /* No privacy so use a public address. */
2103         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2104
2105         return 0;
2106 }
2107
2108 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2109 {
2110         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2111 }
2112
2113 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2114 {
2115         struct hci_cp_le_set_ext_adv_params cp;
2116         struct hci_dev *hdev = req->hdev;
2117         bool connectable;
2118         u32 flags;
2119         bdaddr_t random_addr;
2120         u8 own_addr_type;
2121         int err;
2122         struct adv_info *adv_instance;
2123         bool secondary_adv;
2124
2125         if (instance > 0) {
2126                 adv_instance = hci_find_adv_instance(hdev, instance);
2127                 if (!adv_instance)
2128                         return -EINVAL;
2129         } else {
2130                 adv_instance = NULL;
2131         }
2132
2133         flags = get_adv_instance_flags(hdev, instance);
2134
2135         /* If the "connectable" instance flag was not set, then choose between
2136          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2137          */
2138         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2139                       mgmt_get_connectable(hdev);
2140
2141         if (!is_advertising_allowed(hdev, connectable))
2142                 return -EPERM;
2143
2144         /* Set require_privacy to true only when non-connectable
2145          * advertising is used. In that case it is fine to use a
2146          * non-resolvable private address.
2147          */
2148         err = hci_get_random_address(hdev, !connectable,
2149                                      adv_use_rpa(hdev, flags), adv_instance,
2150                                      &own_addr_type, &random_addr);
2151         if (err < 0)
2152                 return err;
2153
2154         memset(&cp, 0, sizeof(cp));
2155
2156         if (adv_instance) {
2157                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
2158                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
2159                 cp.tx_power = adv_instance->tx_power;
2160         } else {
2161                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2162                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2163                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
2164         }
2165
2166         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2167
2168         if (connectable) {
2169                 if (secondary_adv)
2170                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2171                 else
2172                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2173         } else if (adv_instance_is_scannable(hdev, instance)) {
2174                 if (secondary_adv)
2175                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2176                 else
2177                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2178         } else {
2179                 if (secondary_adv)
2180                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2181                 else
2182                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2183         }
2184
2185         cp.own_addr_type = own_addr_type;
2186         cp.channel_map = hdev->le_adv_channel_map;
2187         cp.handle = instance;
2188
2189         if (flags & MGMT_ADV_FLAG_SEC_2M) {
2190                 cp.primary_phy = HCI_ADV_PHY_1M;
2191                 cp.secondary_phy = HCI_ADV_PHY_2M;
2192         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2193                 cp.primary_phy = HCI_ADV_PHY_CODED;
2194                 cp.secondary_phy = HCI_ADV_PHY_CODED;
2195         } else {
2196                 /* In all other cases use 1M */
2197                 cp.primary_phy = HCI_ADV_PHY_1M;
2198                 cp.secondary_phy = HCI_ADV_PHY_1M;
2199         }
2200
2201         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2202
2203         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2204             bacmp(&random_addr, BDADDR_ANY)) {
2205                 struct hci_cp_le_set_adv_set_rand_addr cp;
2206
2207                 /* Check if random address need to be updated */
2208                 if (adv_instance) {
2209                         if (!bacmp(&random_addr, &adv_instance->random_addr))
2210                                 return 0;
2211                 } else {
2212                         if (!bacmp(&random_addr, &hdev->random_addr))
2213                                 return 0;
2214                 }
2215
2216                 memset(&cp, 0, sizeof(cp));
2217
2218                 cp.handle = instance;
2219                 bacpy(&cp.bdaddr, &random_addr);
2220
2221                 hci_req_add(req,
2222                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2223                             sizeof(cp), &cp);
2224         }
2225
2226         return 0;
2227 }
2228
2229 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2230 {
2231         struct hci_dev *hdev = req->hdev;
2232         struct hci_cp_le_set_ext_adv_enable *cp;
2233         struct hci_cp_ext_adv_set *adv_set;
2234         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2235         struct adv_info *adv_instance;
2236
2237         if (instance > 0) {
2238                 adv_instance = hci_find_adv_instance(hdev, instance);
2239                 if (!adv_instance)
2240                         return -EINVAL;
2241         } else {
2242                 adv_instance = NULL;
2243         }
2244
2245         cp = (void *) data;
2246         adv_set = (void *) cp->data;
2247
2248         memset(cp, 0, sizeof(*cp));
2249
2250         cp->enable = 0x01;
2251         cp->num_of_sets = 0x01;
2252
2253         memset(adv_set, 0, sizeof(*adv_set));
2254
2255         adv_set->handle = instance;
2256
2257         /* Set duration per instance since controller is responsible for
2258          * scheduling it.
2259          */
2260         if (adv_instance && adv_instance->duration) {
2261                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2262
2263                 /* Time = N * 10 ms */
2264                 adv_set->duration = cpu_to_le16(duration / 10);
2265         }
2266
2267         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2268                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2269                     data);
2270
2271         return 0;
2272 }
2273
2274 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2275 {
2276         struct hci_dev *hdev = req->hdev;
2277         struct hci_cp_le_set_ext_adv_enable *cp;
2278         struct hci_cp_ext_adv_set *adv_set;
2279         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2280         u8 req_size;
2281
2282         /* If request specifies an instance that doesn't exist, fail */
2283         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2284                 return -EINVAL;
2285
2286         memset(data, 0, sizeof(data));
2287
2288         cp = (void *)data;
2289         adv_set = (void *)cp->data;
2290
2291         /* Instance 0x00 indicates all advertising instances will be disabled */
2292         cp->num_of_sets = !!instance;
2293         cp->enable = 0x00;
2294
2295         adv_set->handle = instance;
2296
2297         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2298         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2299
2300         return 0;
2301 }
2302
2303 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2304 {
2305         struct hci_dev *hdev = req->hdev;
2306
2307         /* If request specifies an instance that doesn't exist, fail */
2308         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2309                 return -EINVAL;
2310
2311         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2312
2313         return 0;
2314 }
2315
2316 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2317 {
2318         struct hci_dev *hdev = req->hdev;
2319         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2320         int err;
2321
2322         /* If instance isn't pending, the chip knows about it, and it's safe to
2323          * disable
2324          */
2325         if (adv_instance && !adv_instance->pending)
2326                 __hci_req_disable_ext_adv_instance(req, instance);
2327
2328         err = __hci_req_setup_ext_adv_instance(req, instance);
2329         if (err < 0)
2330                 return err;
2331
2332         __hci_req_update_scan_rsp_data(req, instance);
2333         __hci_req_enable_ext_advertising(req, instance);
2334
2335         return 0;
2336 }
2337
2338 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2339                                     bool force)
2340 {
2341         struct hci_dev *hdev = req->hdev;
2342         struct adv_info *adv_instance = NULL;
2343         u16 timeout;
2344
2345         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2346             list_empty(&hdev->adv_instances))
2347                 return -EPERM;
2348
2349         if (hdev->adv_instance_timeout)
2350                 return -EBUSY;
2351
2352         adv_instance = hci_find_adv_instance(hdev, instance);
2353         if (!adv_instance)
2354                 return -ENOENT;
2355
2356         /* A zero timeout means unlimited advertising. As long as there is
2357          * only one instance, duration should be ignored. We still set a timeout
2358          * in case further instances are being added later on.
2359          *
2360          * If the remaining lifetime of the instance is more than the duration
2361          * then the timeout corresponds to the duration, otherwise it will be
2362          * reduced to the remaining instance lifetime.
2363          */
2364         if (adv_instance->timeout == 0 ||
2365             adv_instance->duration <= adv_instance->remaining_time)
2366                 timeout = adv_instance->duration;
2367         else
2368                 timeout = adv_instance->remaining_time;
2369
2370         /* The remaining time is being reduced unless the instance is being
2371          * advertised without time limit.
2372          */
2373         if (adv_instance->timeout)
2374                 adv_instance->remaining_time =
2375                                 adv_instance->remaining_time - timeout;
2376
2377         /* Only use work for scheduling instances with legacy advertising */
2378         if (!ext_adv_capable(hdev)) {
2379                 hdev->adv_instance_timeout = timeout;
2380                 queue_delayed_work(hdev->req_workqueue,
2381                            &hdev->adv_instance_expire,
2382                            msecs_to_jiffies(timeout * 1000));
2383         }
2384
2385         /* If we're just re-scheduling the same instance again then do not
2386          * execute any HCI commands. This happens when a single instance is
2387          * being advertised.
2388          */
2389         if (!force && hdev->cur_adv_instance == instance &&
2390             hci_dev_test_flag(hdev, HCI_LE_ADV))
2391                 return 0;
2392
2393         hdev->cur_adv_instance = instance;
2394         if (ext_adv_capable(hdev)) {
2395                 __hci_req_start_ext_adv(req, instance);
2396         } else {
2397                 __hci_req_update_adv_data(req, instance);
2398                 __hci_req_update_scan_rsp_data(req, instance);
2399                 __hci_req_enable_advertising(req);
2400         }
2401
2402         return 0;
2403 }
2404
2405 /* For a single instance:
2406  * - force == true: The instance will be removed even when its remaining
2407  *   lifetime is not zero.
2408  * - force == false: the instance will be deactivated but kept stored unless
2409  *   the remaining lifetime is zero.
2410  *
2411  * For instance == 0x00:
2412  * - force == true: All instances will be removed regardless of their timeout
2413  *   setting.
2414  * - force == false: Only instances that have a timeout will be removed.
2415  */
2416 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2417                                 struct hci_request *req, u8 instance,
2418                                 bool force)
2419 {
2420         struct adv_info *adv_instance, *n, *next_instance = NULL;
2421         int err;
2422         u8 rem_inst;
2423
2424         /* Cancel any timeout concerning the removed instance(s). */
2425         if (!instance || hdev->cur_adv_instance == instance)
2426                 cancel_adv_timeout(hdev);
2427
2428         /* Get the next instance to advertise BEFORE we remove
2429          * the current one. This can be the same instance again
2430          * if there is only one instance.
2431          */
2432         if (instance && hdev->cur_adv_instance == instance)
2433                 next_instance = hci_get_next_instance(hdev, instance);
2434
2435         if (instance == 0x00) {
2436                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2437                                          list) {
2438                         if (!(force || adv_instance->timeout))
2439                                 continue;
2440
2441                         rem_inst = adv_instance->instance;
2442                         err = hci_remove_adv_instance(hdev, rem_inst);
2443                         if (!err)
2444                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2445                 }
2446         } else {
2447                 adv_instance = hci_find_adv_instance(hdev, instance);
2448
2449                 if (force || (adv_instance && adv_instance->timeout &&
2450                               !adv_instance->remaining_time)) {
2451                         /* Don't advertise a removed instance. */
2452                         if (next_instance &&
2453                             next_instance->instance == instance)
2454                                 next_instance = NULL;
2455
2456                         err = hci_remove_adv_instance(hdev, instance);
2457                         if (!err)
2458                                 mgmt_advertising_removed(sk, hdev, instance);
2459                 }
2460         }
2461
2462         if (!req || !hdev_is_powered(hdev) ||
2463             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2464                 return;
2465
2466         if (next_instance && !ext_adv_capable(hdev))
2467                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2468                                                 false);
2469 }
2470
2471 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2472 {
2473         struct hci_dev *hdev = req->hdev;
2474
2475         /* If we're advertising or initiating an LE connection we can't
2476          * go ahead and change the random address at this time. This is
2477          * because the eventual initiator address used for the
2478          * subsequently created connection will be undefined (some
2479          * controllers use the new address and others the one we had
2480          * when the operation started).
2481          *
2482          * In this kind of scenario skip the update and let the random
2483          * address be updated at the next cycle.
2484          */
2485         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2486             hci_lookup_le_connect(hdev)) {
2487                 bt_dev_dbg(hdev, "Deferring random address update");
2488                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2489                 return;
2490         }
2491
2492         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2493 }
2494
2495 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2496                               bool use_rpa, u8 *own_addr_type)
2497 {
2498         struct hci_dev *hdev = req->hdev;
2499         int err;
2500
2501         /* If privacy is enabled use a resolvable private address. If
2502          * current RPA has expired or there is something else than
2503          * the current RPA in use, then generate a new one.
2504          */
2505         if (use_rpa) {
2506                 int to;
2507
2508                 /* If Controller supports LL Privacy use own address type is
2509                  * 0x03
2510                  */
2511                 if (use_ll_privacy(hdev))
2512                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2513                 else
2514                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2515
2516                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2517                     !bacmp(&hdev->random_addr, &hdev->rpa))
2518                         return 0;
2519
2520                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2521                 if (err < 0) {
2522                         bt_dev_err(hdev, "failed to generate new RPA");
2523                         return err;
2524                 }
2525
2526                 set_random_addr(req, &hdev->rpa);
2527
2528                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2529                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2530
2531                 return 0;
2532         }
2533
2534         /* In case of required privacy without resolvable private address,
2535          * use an non-resolvable private address. This is useful for active
2536          * scanning and non-connectable advertising.
2537          */
2538         if (require_privacy) {
2539                 bdaddr_t nrpa;
2540
2541                 while (true) {
2542                         /* The non-resolvable private address is generated
2543                          * from random six bytes with the two most significant
2544                          * bits cleared.
2545                          */
2546                         get_random_bytes(&nrpa, 6);
2547                         nrpa.b[5] &= 0x3f;
2548
2549                         /* The non-resolvable private address shall not be
2550                          * equal to the public address.
2551                          */
2552                         if (bacmp(&hdev->bdaddr, &nrpa))
2553                                 break;
2554                 }
2555
2556                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2557                 set_random_addr(req, &nrpa);
2558                 return 0;
2559         }
2560
2561         /* If forcing static address is in use or there is no public
2562          * address use the static address as random address (but skip
2563          * the HCI command if the current random address is already the
2564          * static one.
2565          *
2566          * In case BR/EDR has been disabled on a dual-mode controller
2567          * and a static address has been configured, then use that
2568          * address instead of the public BR/EDR address.
2569          */
2570         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2571             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2572             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2573              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2574                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2575                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2576                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2577                                     &hdev->static_addr);
2578                 return 0;
2579         }
2580
2581         /* Neither privacy nor static address is being used so use a
2582          * public address.
2583          */
2584         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2585
2586         return 0;
2587 }
2588
2589 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2590 {
2591         struct bdaddr_list *b;
2592
2593         list_for_each_entry(b, &hdev->whitelist, list) {
2594                 struct hci_conn *conn;
2595
2596                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2597                 if (!conn)
2598                         return true;
2599
2600                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2601                         return true;
2602         }
2603
2604         return false;
2605 }
2606
2607 void __hci_req_update_scan(struct hci_request *req)
2608 {
2609         struct hci_dev *hdev = req->hdev;
2610         u8 scan;
2611
2612         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2613                 return;
2614
2615         if (!hdev_is_powered(hdev))
2616                 return;
2617
2618         if (mgmt_powering_down(hdev))
2619                 return;
2620
2621         if (hdev->scanning_paused)
2622                 return;
2623
2624         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2625             disconnected_whitelist_entries(hdev))
2626                 scan = SCAN_PAGE;
2627         else
2628                 scan = SCAN_DISABLED;
2629
2630         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2631                 scan |= SCAN_INQUIRY;
2632
2633         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2634             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2635                 return;
2636
2637         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2638 }
2639
2640 static int update_scan(struct hci_request *req, unsigned long opt)
2641 {
2642         hci_dev_lock(req->hdev);
2643         __hci_req_update_scan(req);
2644         hci_dev_unlock(req->hdev);
2645         return 0;
2646 }
2647
2648 static void scan_update_work(struct work_struct *work)
2649 {
2650         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2651
2652         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2653 }
2654
2655 static int connectable_update(struct hci_request *req, unsigned long opt)
2656 {
2657         struct hci_dev *hdev = req->hdev;
2658
2659         hci_dev_lock(hdev);
2660
2661         __hci_req_update_scan(req);
2662
2663         /* If BR/EDR is not enabled and we disable advertising as a
2664          * by-product of disabling connectable, we need to update the
2665          * advertising flags.
2666          */
2667         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2668                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2669
2670         /* Update the advertising parameters if necessary */
2671         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2672             !list_empty(&hdev->adv_instances)) {
2673                 if (ext_adv_capable(hdev))
2674                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2675                 else
2676                         __hci_req_enable_advertising(req);
2677         }
2678
2679         __hci_update_background_scan(req);
2680
2681         hci_dev_unlock(hdev);
2682
2683         return 0;
2684 }
2685
2686 static void connectable_update_work(struct work_struct *work)
2687 {
2688         struct hci_dev *hdev = container_of(work, struct hci_dev,
2689                                             connectable_update);
2690         u8 status;
2691
2692         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2693         mgmt_set_connectable_complete(hdev, status);
2694 }
2695
2696 static u8 get_service_classes(struct hci_dev *hdev)
2697 {
2698         struct bt_uuid *uuid;
2699         u8 val = 0;
2700
2701         list_for_each_entry(uuid, &hdev->uuids, list)
2702                 val |= uuid->svc_hint;
2703
2704         return val;
2705 }
2706
2707 void __hci_req_update_class(struct hci_request *req)
2708 {
2709         struct hci_dev *hdev = req->hdev;
2710         u8 cod[3];
2711
2712         bt_dev_dbg(hdev, "");
2713
2714         if (!hdev_is_powered(hdev))
2715                 return;
2716
2717         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2718                 return;
2719
2720         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2721                 return;
2722
2723         cod[0] = hdev->minor_class;
2724         cod[1] = hdev->major_class;
2725         cod[2] = get_service_classes(hdev);
2726
2727         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2728                 cod[1] |= 0x20;
2729
2730         if (memcmp(cod, hdev->dev_class, 3) == 0)
2731                 return;
2732
2733         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2734 }
2735
2736 static void write_iac(struct hci_request *req)
2737 {
2738         struct hci_dev *hdev = req->hdev;
2739         struct hci_cp_write_current_iac_lap cp;
2740
2741         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2742                 return;
2743
2744         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2745                 /* Limited discoverable mode */
2746                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2747                 cp.iac_lap[0] = 0x00;   /* LIAC */
2748                 cp.iac_lap[1] = 0x8b;
2749                 cp.iac_lap[2] = 0x9e;
2750                 cp.iac_lap[3] = 0x33;   /* GIAC */
2751                 cp.iac_lap[4] = 0x8b;
2752                 cp.iac_lap[5] = 0x9e;
2753         } else {
2754                 /* General discoverable mode */
2755                 cp.num_iac = 1;
2756                 cp.iac_lap[0] = 0x33;   /* GIAC */
2757                 cp.iac_lap[1] = 0x8b;
2758                 cp.iac_lap[2] = 0x9e;
2759         }
2760
2761         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2762                     (cp.num_iac * 3) + 1, &cp);
2763 }
2764
2765 static int discoverable_update(struct hci_request *req, unsigned long opt)
2766 {
2767         struct hci_dev *hdev = req->hdev;
2768
2769         hci_dev_lock(hdev);
2770
2771         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2772                 write_iac(req);
2773                 __hci_req_update_scan(req);
2774                 __hci_req_update_class(req);
2775         }
2776
2777         /* Advertising instances don't use the global discoverable setting, so
2778          * only update AD if advertising was enabled using Set Advertising.
2779          */
2780         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2781                 __hci_req_update_adv_data(req, 0x00);
2782
2783                 /* Discoverable mode affects the local advertising
2784                  * address in limited privacy mode.
2785                  */
2786                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2787                         if (ext_adv_capable(hdev))
2788                                 __hci_req_start_ext_adv(req, 0x00);
2789                         else
2790                                 __hci_req_enable_advertising(req);
2791                 }
2792         }
2793
2794         hci_dev_unlock(hdev);
2795
2796         return 0;
2797 }
2798
2799 static void discoverable_update_work(struct work_struct *work)
2800 {
2801         struct hci_dev *hdev = container_of(work, struct hci_dev,
2802                                             discoverable_update);
2803         u8 status;
2804
2805         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2806         mgmt_set_discoverable_complete(hdev, status);
2807 }
2808
2809 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2810                       u8 reason)
2811 {
2812         switch (conn->state) {
2813         case BT_CONNECTED:
2814         case BT_CONFIG:
2815                 if (conn->type == AMP_LINK) {
2816                         struct hci_cp_disconn_phy_link cp;
2817
2818                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2819                         cp.reason = reason;
2820                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2821                                     &cp);
2822                 } else {
2823                         struct hci_cp_disconnect dc;
2824
2825                         dc.handle = cpu_to_le16(conn->handle);
2826                         dc.reason = reason;
2827                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2828                 }
2829
2830                 conn->state = BT_DISCONN;
2831
2832                 break;
2833         case BT_CONNECT:
2834                 if (conn->type == LE_LINK) {
2835                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2836                                 break;
2837                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2838                                     0, NULL);
2839                 } else if (conn->type == ACL_LINK) {
2840                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2841                                 break;
2842                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2843                                     6, &conn->dst);
2844                 }
2845                 break;
2846         case BT_CONNECT2:
2847                 if (conn->type == ACL_LINK) {
2848                         struct hci_cp_reject_conn_req rej;
2849
2850                         bacpy(&rej.bdaddr, &conn->dst);
2851                         rej.reason = reason;
2852
2853                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2854                                     sizeof(rej), &rej);
2855                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2856                         struct hci_cp_reject_sync_conn_req rej;
2857
2858                         bacpy(&rej.bdaddr, &conn->dst);
2859
2860                         /* SCO rejection has its own limited set of
2861                          * allowed error values (0x0D-0x0F) which isn't
2862                          * compatible with most values passed to this
2863                          * function. To be safe hard-code one of the
2864                          * values that's suitable for SCO.
2865                          */
2866                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2867
2868                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2869                                     sizeof(rej), &rej);
2870                 }
2871                 break;
2872         default:
2873                 conn->state = BT_CLOSED;
2874                 break;
2875         }
2876 }
2877
2878 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2879 {
2880         if (status)
2881                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2882 }
2883
2884 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2885 {
2886         struct hci_request req;
2887         int err;
2888
2889         hci_req_init(&req, conn->hdev);
2890
2891         __hci_abort_conn(&req, conn, reason);
2892
2893         err = hci_req_run(&req, abort_conn_complete);
2894         if (err && err != -ENODATA) {
2895                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2896                 return err;
2897         }
2898
2899         return 0;
2900 }
2901
2902 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2903 {
2904         hci_dev_lock(req->hdev);
2905         __hci_update_background_scan(req);
2906         hci_dev_unlock(req->hdev);
2907         return 0;
2908 }
2909
2910 static void bg_scan_update(struct work_struct *work)
2911 {
2912         struct hci_dev *hdev = container_of(work, struct hci_dev,
2913                                             bg_scan_update);
2914         struct hci_conn *conn;
2915         u8 status;
2916         int err;
2917
2918         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2919         if (!err)
2920                 return;
2921
2922         hci_dev_lock(hdev);
2923
2924         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2925         if (conn)
2926                 hci_le_conn_failed(conn, status);
2927
2928         hci_dev_unlock(hdev);
2929 }
2930
2931 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2932 {
2933         hci_req_add_le_scan_disable(req, false);
2934         return 0;
2935 }
2936
2937 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2938 {
2939         u8 length = opt;
2940         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2941         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2942         struct hci_cp_inquiry cp;
2943
2944         bt_dev_dbg(req->hdev, "");
2945
2946         hci_dev_lock(req->hdev);
2947         hci_inquiry_cache_flush(req->hdev);
2948         hci_dev_unlock(req->hdev);
2949
2950         memset(&cp, 0, sizeof(cp));
2951
2952         if (req->hdev->discovery.limited)
2953                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2954         else
2955                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2956
2957         cp.length = length;
2958
2959         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2960
2961         return 0;
2962 }
2963
2964 static void le_scan_disable_work(struct work_struct *work)
2965 {
2966         struct hci_dev *hdev = container_of(work, struct hci_dev,
2967                                             le_scan_disable.work);
2968         u8 status;
2969
2970         bt_dev_dbg(hdev, "");
2971
2972         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2973                 return;
2974
2975         cancel_delayed_work(&hdev->le_scan_restart);
2976
2977         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2978         if (status) {
2979                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2980                            status);
2981                 return;
2982         }
2983
2984         hdev->discovery.scan_start = 0;
2985
2986         /* If we were running LE only scan, change discovery state. If
2987          * we were running both LE and BR/EDR inquiry simultaneously,
2988          * and BR/EDR inquiry is already finished, stop discovery,
2989          * otherwise BR/EDR inquiry will stop discovery when finished.
2990          * If we will resolve remote device name, do not change
2991          * discovery state.
2992          */
2993
2994         if (hdev->discovery.type == DISCOV_TYPE_LE)
2995                 goto discov_stopped;
2996
2997         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2998                 return;
2999
3000         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
3001                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3002                     hdev->discovery.state != DISCOVERY_RESOLVING)
3003                         goto discov_stopped;
3004
3005                 return;
3006         }
3007
3008         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
3009                      HCI_CMD_TIMEOUT, &status);
3010         if (status) {
3011                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
3012                 goto discov_stopped;
3013         }
3014
3015         return;
3016
3017 discov_stopped:
3018         hci_dev_lock(hdev);
3019         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3020         hci_dev_unlock(hdev);
3021 }
3022
3023 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3024 {
3025         struct hci_dev *hdev = req->hdev;
3026
3027         /* If controller is not scanning we are done. */
3028         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3029                 return 0;
3030
3031         if (hdev->scanning_paused) {
3032                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3033                 return 0;
3034         }
3035
3036         hci_req_add_le_scan_disable(req, false);
3037
3038         if (use_ext_scan(hdev)) {
3039                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3040
3041                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3042                 ext_enable_cp.enable = LE_SCAN_ENABLE;
3043                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3044
3045                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3046                             sizeof(ext_enable_cp), &ext_enable_cp);
3047         } else {
3048                 struct hci_cp_le_set_scan_enable cp;
3049
3050                 memset(&cp, 0, sizeof(cp));
3051                 cp.enable = LE_SCAN_ENABLE;
3052                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3053                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3054         }
3055
3056         return 0;
3057 }
3058
3059 static void le_scan_restart_work(struct work_struct *work)
3060 {
3061         struct hci_dev *hdev = container_of(work, struct hci_dev,
3062                                             le_scan_restart.work);
3063         unsigned long timeout, duration, scan_start, now;
3064         u8 status;
3065
3066         bt_dev_dbg(hdev, "");
3067
3068         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3069         if (status) {
3070                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3071                            status);
3072                 return;
3073         }
3074
3075         hci_dev_lock(hdev);
3076
3077         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3078             !hdev->discovery.scan_start)
3079                 goto unlock;
3080
3081         /* When the scan was started, hdev->le_scan_disable has been queued
3082          * after duration from scan_start. During scan restart this job
3083          * has been canceled, and we need to queue it again after proper
3084          * timeout, to make sure that scan does not run indefinitely.
3085          */
3086         duration = hdev->discovery.scan_duration;
3087         scan_start = hdev->discovery.scan_start;
3088         now = jiffies;
3089         if (now - scan_start <= duration) {
3090                 int elapsed;
3091
3092                 if (now >= scan_start)
3093                         elapsed = now - scan_start;
3094                 else
3095                         elapsed = ULONG_MAX - scan_start + now;
3096
3097                 timeout = duration - elapsed;
3098         } else {
3099                 timeout = 0;
3100         }
3101
3102         queue_delayed_work(hdev->req_workqueue,
3103                            &hdev->le_scan_disable, timeout);
3104
3105 unlock:
3106         hci_dev_unlock(hdev);
3107 }
3108
3109 static int active_scan(struct hci_request *req, unsigned long opt)
3110 {
3111         uint16_t interval = opt;
3112         struct hci_dev *hdev = req->hdev;
3113         u8 own_addr_type;
3114         /* White list is not used for discovery */
3115         u8 filter_policy = 0x00;
3116         /* Discovery doesn't require controller address resolution */
3117         bool addr_resolv = false;
3118         int err;
3119
3120         bt_dev_dbg(hdev, "");
3121
3122         /* If controller is scanning, it means the background scanning is
3123          * running. Thus, we should temporarily stop it in order to set the
3124          * discovery scanning parameters.
3125          */
3126         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3127                 hci_req_add_le_scan_disable(req, false);
3128                 cancel_interleave_scan(hdev);
3129         }
3130
3131         /* All active scans will be done with either a resolvable private
3132          * address (when privacy feature has been enabled) or non-resolvable
3133          * private address.
3134          */
3135         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3136                                         &own_addr_type);
3137         if (err < 0)
3138                 own_addr_type = ADDR_LE_DEV_PUBLIC;
3139
3140         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3141                            hdev->le_scan_window_discovery, own_addr_type,
3142                            filter_policy, addr_resolv);
3143         return 0;
3144 }
3145
3146 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3147 {
3148         int err;
3149
3150         bt_dev_dbg(req->hdev, "");
3151
3152         err = active_scan(req, opt);
3153         if (err)
3154                 return err;
3155
3156         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3157 }
3158
3159 static void start_discovery(struct hci_dev *hdev, u8 *status)
3160 {
3161         unsigned long timeout;
3162
3163         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
3164
3165         switch (hdev->discovery.type) {
3166         case DISCOV_TYPE_BREDR:
3167                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3168                         hci_req_sync(hdev, bredr_inquiry,
3169                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3170                                      status);
3171                 return;
3172         case DISCOV_TYPE_INTERLEAVED:
3173                 /* When running simultaneous discovery, the LE scanning time
3174                  * should occupy the whole discovery time sine BR/EDR inquiry
3175                  * and LE scanning are scheduled by the controller.
3176                  *
3177                  * For interleaving discovery in comparison, BR/EDR inquiry
3178                  * and LE scanning are done sequentially with separate
3179                  * timeouts.
3180                  */
3181                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3182                              &hdev->quirks)) {
3183                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3184                         /* During simultaneous discovery, we double LE scan
3185                          * interval. We must leave some time for the controller
3186                          * to do BR/EDR inquiry.
3187                          */
3188                         hci_req_sync(hdev, interleaved_discov,
3189                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3190                                      status);
3191                         break;
3192                 }
3193
3194                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3195                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3196                              HCI_CMD_TIMEOUT, status);
3197                 break;
3198         case DISCOV_TYPE_LE:
3199                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3200                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3201                              HCI_CMD_TIMEOUT, status);
3202                 break;
3203         default:
3204                 *status = HCI_ERROR_UNSPECIFIED;
3205                 return;
3206         }
3207
3208         if (*status)
3209                 return;
3210
3211         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
3212
3213         /* When service discovery is used and the controller has a
3214          * strict duplicate filter, it is important to remember the
3215          * start and duration of the scan. This is required for
3216          * restarting scanning during the discovery phase.
3217          */
3218         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3219                      hdev->discovery.result_filtering) {
3220                 hdev->discovery.scan_start = jiffies;
3221                 hdev->discovery.scan_duration = timeout;
3222         }
3223
3224         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3225                            timeout);
3226 }
3227
3228 bool hci_req_stop_discovery(struct hci_request *req)
3229 {
3230         struct hci_dev *hdev = req->hdev;
3231         struct discovery_state *d = &hdev->discovery;
3232         struct hci_cp_remote_name_req_cancel cp;
3233         struct inquiry_entry *e;
3234         bool ret = false;
3235
3236         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
3237
3238         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3239                 if (test_bit(HCI_INQUIRY, &hdev->flags))
3240                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3241
3242                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3243                         cancel_delayed_work(&hdev->le_scan_disable);
3244                         hci_req_add_le_scan_disable(req, false);
3245                 }
3246
3247                 ret = true;
3248         } else {
3249                 /* Passive scanning */
3250                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3251                         hci_req_add_le_scan_disable(req, false);
3252                         ret = true;
3253                 }
3254         }
3255
3256         /* No further actions needed for LE-only discovery */
3257         if (d->type == DISCOV_TYPE_LE)
3258                 return ret;
3259
3260         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3261                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3262                                                      NAME_PENDING);
3263                 if (!e)
3264                         return ret;
3265
3266                 bacpy(&cp.bdaddr, &e->data.bdaddr);
3267                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3268                             &cp);
3269                 ret = true;
3270         }
3271
3272         return ret;
3273 }
3274
3275 static int stop_discovery(struct hci_request *req, unsigned long opt)
3276 {
3277         hci_dev_lock(req->hdev);
3278         hci_req_stop_discovery(req);
3279         hci_dev_unlock(req->hdev);
3280
3281         return 0;
3282 }
3283
3284 static void discov_update(struct work_struct *work)
3285 {
3286         struct hci_dev *hdev = container_of(work, struct hci_dev,
3287                                             discov_update);
3288         u8 status = 0;
3289
3290         switch (hdev->discovery.state) {
3291         case DISCOVERY_STARTING:
3292                 start_discovery(hdev, &status);
3293                 mgmt_start_discovery_complete(hdev, status);
3294                 if (status)
3295                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3296                 else
3297                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3298                 break;
3299         case DISCOVERY_STOPPING:
3300                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3301                 mgmt_stop_discovery_complete(hdev, status);
3302                 if (!status)
3303                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3304                 break;
3305         case DISCOVERY_STOPPED:
3306         default:
3307                 return;
3308         }
3309 }
3310
3311 static void discov_off(struct work_struct *work)
3312 {
3313         struct hci_dev *hdev = container_of(work, struct hci_dev,
3314                                             discov_off.work);
3315
3316         bt_dev_dbg(hdev, "");
3317
3318         hci_dev_lock(hdev);
3319
3320         /* When discoverable timeout triggers, then just make sure
3321          * the limited discoverable flag is cleared. Even in the case
3322          * of a timeout triggered from general discoverable, it is
3323          * safe to unconditionally clear the flag.
3324          */
3325         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3326         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3327         hdev->discov_timeout = 0;
3328
3329         hci_dev_unlock(hdev);
3330
3331         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3332         mgmt_new_settings(hdev);
3333 }
3334
3335 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3336 {
3337         struct hci_dev *hdev = req->hdev;
3338         u8 link_sec;
3339
3340         hci_dev_lock(hdev);
3341
3342         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3343             !lmp_host_ssp_capable(hdev)) {
3344                 u8 mode = 0x01;
3345
3346                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3347
3348                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3349                         u8 support = 0x01;
3350
3351                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3352                                     sizeof(support), &support);
3353                 }
3354         }
3355
3356         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3357             lmp_bredr_capable(hdev)) {
3358                 struct hci_cp_write_le_host_supported cp;
3359
3360                 cp.le = 0x01;
3361                 cp.simul = 0x00;
3362
3363                 /* Check first if we already have the right
3364                  * host state (host features set)
3365                  */
3366                 if (cp.le != lmp_host_le_capable(hdev) ||
3367                     cp.simul != lmp_host_le_br_capable(hdev))
3368                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3369                                     sizeof(cp), &cp);
3370         }
3371
3372         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3373                 /* Make sure the controller has a good default for
3374                  * advertising data. This also applies to the case
3375                  * where BR/EDR was toggled during the AUTO_OFF phase.
3376                  */
3377                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3378                     list_empty(&hdev->adv_instances)) {
3379                         int err;
3380
3381                         if (ext_adv_capable(hdev)) {
3382                                 err = __hci_req_setup_ext_adv_instance(req,
3383                                                                        0x00);
3384                                 if (!err)
3385                                         __hci_req_update_scan_rsp_data(req,
3386                                                                        0x00);
3387                         } else {
3388                                 err = 0;
3389                                 __hci_req_update_adv_data(req, 0x00);
3390                                 __hci_req_update_scan_rsp_data(req, 0x00);
3391                         }
3392
3393                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3394                                 if (!ext_adv_capable(hdev))
3395                                         __hci_req_enable_advertising(req);
3396                                 else if (!err)
3397                                         __hci_req_enable_ext_advertising(req,
3398                                                                          0x00);
3399                         }
3400                 } else if (!list_empty(&hdev->adv_instances)) {
3401                         struct adv_info *adv_instance;
3402
3403                         adv_instance = list_first_entry(&hdev->adv_instances,
3404                                                         struct adv_info, list);
3405                         __hci_req_schedule_adv_instance(req,
3406                                                         adv_instance->instance,
3407                                                         true);
3408                 }
3409         }
3410
3411         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3412         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3413                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3414                             sizeof(link_sec), &link_sec);
3415
3416         if (lmp_bredr_capable(hdev)) {
3417                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3418                         __hci_req_write_fast_connectable(req, true);
3419                 else
3420                         __hci_req_write_fast_connectable(req, false);
3421                 __hci_req_update_scan(req);
3422                 __hci_req_update_class(req);
3423                 __hci_req_update_name(req);
3424                 __hci_req_update_eir(req);
3425         }
3426
3427         hci_dev_unlock(hdev);
3428         return 0;
3429 }
3430
3431 int __hci_req_hci_power_on(struct hci_dev *hdev)
3432 {
3433         /* Register the available SMP channels (BR/EDR and LE) only when
3434          * successfully powering on the controller. This late
3435          * registration is required so that LE SMP can clearly decide if
3436          * the public address or static address is used.
3437          */
3438         smp_register(hdev);
3439
3440         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3441                               NULL);
3442 }
3443
3444 void hci_request_setup(struct hci_dev *hdev)
3445 {
3446         INIT_WORK(&hdev->discov_update, discov_update);
3447         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3448         INIT_WORK(&hdev->scan_update, scan_update_work);
3449         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3450         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3451         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3452         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3453         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3454         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3455         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3456 }
3457
3458 void hci_request_cancel_all(struct hci_dev *hdev)
3459 {
3460         hci_req_sync_cancel(hdev, ENODEV);
3461
3462         cancel_work_sync(&hdev->discov_update);
3463         cancel_work_sync(&hdev->bg_scan_update);
3464         cancel_work_sync(&hdev->scan_update);
3465         cancel_work_sync(&hdev->connectable_update);
3466         cancel_work_sync(&hdev->discoverable_update);
3467         cancel_delayed_work_sync(&hdev->discov_off);
3468         cancel_delayed_work_sync(&hdev->le_scan_disable);
3469         cancel_delayed_work_sync(&hdev->le_scan_restart);
3470
3471         if (hdev->adv_instance_timeout) {
3472                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3473                 hdev->adv_instance_timeout = 0;
3474         }
3475
3476         cancel_interleave_scan(hdev);
3477 }