Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39         skb_queue_head_init(&req->cmd_q);
40         req->hdev = hdev;
41         req->err = 0;
42 }
43
44 void hci_req_purge(struct hci_request *req)
45 {
46         skb_queue_purge(&req->cmd_q);
47 }
48
49 static int req_run(struct hci_request *req, hci_req_complete_t complete,
50                    hci_req_complete_skb_t complete_skb)
51 {
52         struct hci_dev *hdev = req->hdev;
53         struct sk_buff *skb;
54         unsigned long flags;
55
56         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58         /* If an error occurred during request building, remove all HCI
59          * commands queued on the HCI request queue.
60          */
61         if (req->err) {
62                 skb_queue_purge(&req->cmd_q);
63                 return req->err;
64         }
65
66         /* Do not allow empty requests */
67         if (skb_queue_empty(&req->cmd_q))
68                 return -ENODATA;
69
70         skb = skb_peek_tail(&req->cmd_q);
71         if (complete) {
72                 bt_cb(skb)->hci.req_complete = complete;
73         } else if (complete_skb) {
74                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76         }
77
78         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82         queue_work(hdev->workqueue, &hdev->cmd_work);
83
84         return 0;
85 }
86
87 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88 {
89         return req_run(req, complete, NULL);
90 }
91
92 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93 {
94         return req_run(req, NULL, complete);
95 }
96
97 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98                                   struct sk_buff *skb)
99 {
100         BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = result;
104                 hdev->req_status = HCI_REQ_DONE;
105                 if (skb)
106                         hdev->req_skb = skb_get(skb);
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110
111 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
112 {
113         BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115         if (hdev->req_status == HCI_REQ_PEND) {
116                 hdev->req_result = err;
117                 hdev->req_status = HCI_REQ_CANCELED;
118                 wake_up_interruptible(&hdev->req_wait_q);
119         }
120 }
121
122 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123                                   const void *param, u8 event, u32 timeout)
124 {
125         struct hci_request req;
126         struct sk_buff *skb;
127         int err = 0;
128
129         BT_DBG("%s", hdev->name);
130
131         hci_req_init(&req, hdev);
132
133         hci_req_add_ev(&req, opcode, plen, param, event);
134
135         hdev->req_status = HCI_REQ_PEND;
136
137         err = hci_req_run_skb(&req, hci_req_sync_complete);
138         if (err < 0)
139                 return ERR_PTR(err);
140
141         err = wait_event_interruptible_timeout(hdev->req_wait_q,
142                         hdev->req_status != HCI_REQ_PEND, timeout);
143
144         if (err == -ERESTARTSYS)
145                 return ERR_PTR(-EINTR);
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162         skb = hdev->req_skb;
163         hdev->req_skb = NULL;
164
165         BT_DBG("%s end: err %d", hdev->name, err);
166
167         if (err < 0) {
168                 kfree_skb(skb);
169                 return ERR_PTR(err);
170         }
171
172         if (!skb)
173                 return ERR_PTR(-ENODATA);
174
175         return skb;
176 }
177 EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180                                const void *param, u32 timeout)
181 {
182         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183 }
184 EXPORT_SYMBOL(__hci_cmd_sync);
185
186 /* Execute request and wait for completion. */
187 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188                                                      unsigned long opt),
189                    unsigned long opt, u32 timeout, u8 *hci_status)
190 {
191         struct hci_request req;
192         int err = 0;
193
194         BT_DBG("%s start", hdev->name);
195
196         hci_req_init(&req, hdev);
197
198         hdev->req_status = HCI_REQ_PEND;
199
200         err = func(&req, opt);
201         if (err) {
202                 if (hci_status)
203                         *hci_status = HCI_ERROR_UNSPECIFIED;
204                 return err;
205         }
206
207         err = hci_req_run_skb(&req, hci_req_sync_complete);
208         if (err < 0) {
209                 hdev->req_status = 0;
210
211                 /* ENODATA means the HCI request command queue is empty.
212                  * This can happen when a request with conditionals doesn't
213                  * trigger any commands to be sent. This is normal behavior
214                  * and should not trigger an error return.
215                  */
216                 if (err == -ENODATA) {
217                         if (hci_status)
218                                 *hci_status = 0;
219                         return 0;
220                 }
221
222                 if (hci_status)
223                         *hci_status = HCI_ERROR_UNSPECIFIED;
224
225                 return err;
226         }
227
228         err = wait_event_interruptible_timeout(hdev->req_wait_q,
229                         hdev->req_status != HCI_REQ_PEND, timeout);
230
231         if (err == -ERESTARTSYS)
232                 return -EINTR;
233
234         switch (hdev->req_status) {
235         case HCI_REQ_DONE:
236                 err = -bt_to_errno(hdev->req_result);
237                 if (hci_status)
238                         *hci_status = hdev->req_result;
239                 break;
240
241         case HCI_REQ_CANCELED:
242                 err = -hdev->req_result;
243                 if (hci_status)
244                         *hci_status = HCI_ERROR_UNSPECIFIED;
245                 break;
246
247         default:
248                 err = -ETIMEDOUT;
249                 if (hci_status)
250                         *hci_status = HCI_ERROR_UNSPECIFIED;
251                 break;
252         }
253
254         kfree_skb(hdev->req_skb);
255         hdev->req_skb = NULL;
256         hdev->req_status = hdev->req_result = 0;
257
258         BT_DBG("%s end: err %d", hdev->name, err);
259
260         return err;
261 }
262
263 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264                                                   unsigned long opt),
265                  unsigned long opt, u32 timeout, u8 *hci_status)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_sync_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
275         hci_req_sync_unlock(hdev);
276
277         return ret;
278 }
279
280 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281                                 const void *param)
282 {
283         int len = HCI_COMMAND_HDR_SIZE + plen;
284         struct hci_command_hdr *hdr;
285         struct sk_buff *skb;
286
287         skb = bt_skb_alloc(len, GFP_ATOMIC);
288         if (!skb)
289                 return NULL;
290
291         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
292         hdr->opcode = cpu_to_le16(opcode);
293         hdr->plen   = plen;
294
295         if (plen)
296                 skb_put_data(skb, param, plen);
297
298         BT_DBG("skb len %d", skb->len);
299
300         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301         hci_skb_opcode(skb) = opcode;
302
303         return skb;
304 }
305
306 /* Queue a command to an asynchronous HCI request */
307 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308                     const void *param, u8 event)
309 {
310         struct hci_dev *hdev = req->hdev;
311         struct sk_buff *skb;
312
313         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315         /* If an error occurred during request building, there is no point in
316          * queueing the HCI command. We can simply return.
317          */
318         if (req->err)
319                 return;
320
321         skb = hci_prepare_cmd(hdev, opcode, plen, param);
322         if (!skb) {
323                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324                            opcode);
325                 req->err = -ENOMEM;
326                 return;
327         }
328
329         if (skb_queue_empty(&req->cmd_q))
330                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
331
332         bt_cb(skb)->hci.req_event = event;
333
334         skb_queue_tail(&req->cmd_q, skb);
335 }
336
337 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338                  const void *param)
339 {
340         hci_req_add_ev(req, opcode, plen, param, 0);
341 }
342
343 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344 {
345         struct hci_dev *hdev = req->hdev;
346         struct hci_cp_write_page_scan_activity acp;
347         u8 type;
348
349         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350                 return;
351
352         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353                 return;
354
355         if (enable) {
356                 type = PAGE_SCAN_TYPE_INTERLACED;
357
358                 /* 160 msec page scan interval */
359                 acp.interval = cpu_to_le16(0x0100);
360         } else {
361                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
362
363                 /* default 1.28 sec page scan */
364                 acp.interval = cpu_to_le16(0x0800);
365         }
366
367         acp.window = cpu_to_le16(0x0012);
368
369         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370             __cpu_to_le16(hdev->page_scan_window) != acp.window)
371                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372                             sizeof(acp), &acp);
373
374         if (hdev->page_scan_type != type)
375                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376 }
377
378 /* This function controls the background scanning based on hdev->pend_le_conns
379  * list. If there are pending LE connection we start the background scanning,
380  * otherwise we stop it.
381  *
382  * This function requires the caller holds hdev->lock.
383  */
384 static void __hci_update_background_scan(struct hci_request *req)
385 {
386         struct hci_dev *hdev = req->hdev;
387
388         if (!test_bit(HCI_UP, &hdev->flags) ||
389             test_bit(HCI_INIT, &hdev->flags) ||
390             hci_dev_test_flag(hdev, HCI_SETUP) ||
391             hci_dev_test_flag(hdev, HCI_CONFIG) ||
392             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393             hci_dev_test_flag(hdev, HCI_UNREGISTER))
394                 return;
395
396         /* No point in doing scanning if LE support hasn't been enabled */
397         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398                 return;
399
400         /* If discovery is active don't interfere with it */
401         if (hdev->discovery.state != DISCOVERY_STOPPED)
402                 return;
403
404         /* Reset RSSI and UUID filters when starting background scanning
405          * since these filters are meant for service discovery only.
406          *
407          * The Start Discovery and Start Service Discovery operations
408          * ensure to set proper values for RSSI threshold and UUID
409          * filter list. So it is safe to just reset them here.
410          */
411         hci_discovery_filter_clear(hdev);
412
413         if (list_empty(&hdev->pend_le_conns) &&
414             list_empty(&hdev->pend_le_reports)) {
415                 /* If there is no pending LE connections or devices
416                  * to be scanned for, we should stop the background
417                  * scanning.
418                  */
419
420                 /* If controller is not scanning we are done. */
421                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422                         return;
423
424                 hci_req_add_le_scan_disable(req);
425
426                 BT_DBG("%s stopping background scanning", hdev->name);
427         } else {
428                 /* If there is at least one pending LE connection, we should
429                  * keep the background scan running.
430                  */
431
432                 /* If controller is connecting, we should not start scanning
433                  * since some controllers are not able to scan and connect at
434                  * the same time.
435                  */
436                 if (hci_lookup_le_connect(hdev))
437                         return;
438
439                 /* If controller is currently scanning, we stop it to ensure we
440                  * don't miss any advertising (due to duplicates filter).
441                  */
442                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443                         hci_req_add_le_scan_disable(req);
444
445                 hci_req_add_le_passive_scan(req);
446
447                 BT_DBG("%s starting background scanning", hdev->name);
448         }
449 }
450
451 void __hci_req_update_name(struct hci_request *req)
452 {
453         struct hci_dev *hdev = req->hdev;
454         struct hci_cp_write_local_name cp;
455
456         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457
458         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459 }
460
461 #define PNP_INFO_SVCLASS_ID             0x1200
462
463 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464 {
465         u8 *ptr = data, *uuids_start = NULL;
466         struct bt_uuid *uuid;
467
468         if (len < 4)
469                 return ptr;
470
471         list_for_each_entry(uuid, &hdev->uuids, list) {
472                 u16 uuid16;
473
474                 if (uuid->size != 16)
475                         continue;
476
477                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478                 if (uuid16 < 0x1100)
479                         continue;
480
481                 if (uuid16 == PNP_INFO_SVCLASS_ID)
482                         continue;
483
484                 if (!uuids_start) {
485                         uuids_start = ptr;
486                         uuids_start[0] = 1;
487                         uuids_start[1] = EIR_UUID16_ALL;
488                         ptr += 2;
489                 }
490
491                 /* Stop if not enough space to put next UUID */
492                 if ((ptr - data) + sizeof(u16) > len) {
493                         uuids_start[1] = EIR_UUID16_SOME;
494                         break;
495                 }
496
497                 *ptr++ = (uuid16 & 0x00ff);
498                 *ptr++ = (uuid16 & 0xff00) >> 8;
499                 uuids_start[0] += sizeof(uuid16);
500         }
501
502         return ptr;
503 }
504
505 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506 {
507         u8 *ptr = data, *uuids_start = NULL;
508         struct bt_uuid *uuid;
509
510         if (len < 6)
511                 return ptr;
512
513         list_for_each_entry(uuid, &hdev->uuids, list) {
514                 if (uuid->size != 32)
515                         continue;
516
517                 if (!uuids_start) {
518                         uuids_start = ptr;
519                         uuids_start[0] = 1;
520                         uuids_start[1] = EIR_UUID32_ALL;
521                         ptr += 2;
522                 }
523
524                 /* Stop if not enough space to put next UUID */
525                 if ((ptr - data) + sizeof(u32) > len) {
526                         uuids_start[1] = EIR_UUID32_SOME;
527                         break;
528                 }
529
530                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531                 ptr += sizeof(u32);
532                 uuids_start[0] += sizeof(u32);
533         }
534
535         return ptr;
536 }
537
538 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539 {
540         u8 *ptr = data, *uuids_start = NULL;
541         struct bt_uuid *uuid;
542
543         if (len < 18)
544                 return ptr;
545
546         list_for_each_entry(uuid, &hdev->uuids, list) {
547                 if (uuid->size != 128)
548                         continue;
549
550                 if (!uuids_start) {
551                         uuids_start = ptr;
552                         uuids_start[0] = 1;
553                         uuids_start[1] = EIR_UUID128_ALL;
554                         ptr += 2;
555                 }
556
557                 /* Stop if not enough space to put next UUID */
558                 if ((ptr - data) + 16 > len) {
559                         uuids_start[1] = EIR_UUID128_SOME;
560                         break;
561                 }
562
563                 memcpy(ptr, uuid->uuid, 16);
564                 ptr += 16;
565                 uuids_start[0] += 16;
566         }
567
568         return ptr;
569 }
570
571 static void create_eir(struct hci_dev *hdev, u8 *data)
572 {
573         u8 *ptr = data;
574         size_t name_len;
575
576         name_len = strlen(hdev->dev_name);
577
578         if (name_len > 0) {
579                 /* EIR Data type */
580                 if (name_len > 48) {
581                         name_len = 48;
582                         ptr[1] = EIR_NAME_SHORT;
583                 } else
584                         ptr[1] = EIR_NAME_COMPLETE;
585
586                 /* EIR Data length */
587                 ptr[0] = name_len + 1;
588
589                 memcpy(ptr + 2, hdev->dev_name, name_len);
590
591                 ptr += (name_len + 2);
592         }
593
594         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595                 ptr[0] = 2;
596                 ptr[1] = EIR_TX_POWER;
597                 ptr[2] = (u8) hdev->inq_tx_power;
598
599                 ptr += 3;
600         }
601
602         if (hdev->devid_source > 0) {
603                 ptr[0] = 9;
604                 ptr[1] = EIR_DEVICE_ID;
605
606                 put_unaligned_le16(hdev->devid_source, ptr + 2);
607                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608                 put_unaligned_le16(hdev->devid_product, ptr + 6);
609                 put_unaligned_le16(hdev->devid_version, ptr + 8);
610
611                 ptr += 10;
612         }
613
614         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617 }
618
619 void __hci_req_update_eir(struct hci_request *req)
620 {
621         struct hci_dev *hdev = req->hdev;
622         struct hci_cp_write_eir cp;
623
624         if (!hdev_is_powered(hdev))
625                 return;
626
627         if (!lmp_ext_inq_capable(hdev))
628                 return;
629
630         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631                 return;
632
633         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634                 return;
635
636         memset(&cp, 0, sizeof(cp));
637
638         create_eir(hdev, cp.data);
639
640         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641                 return;
642
643         memcpy(hdev->eir, cp.data, sizeof(cp.data));
644
645         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646 }
647
648 void hci_req_add_le_scan_disable(struct hci_request *req)
649 {
650         struct hci_cp_le_set_scan_enable cp;
651
652         memset(&cp, 0, sizeof(cp));
653         cp.enable = LE_SCAN_DISABLE;
654         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
655 }
656
657 static void add_to_white_list(struct hci_request *req,
658                               struct hci_conn_params *params)
659 {
660         struct hci_cp_le_add_to_white_list cp;
661
662         cp.bdaddr_type = params->addr_type;
663         bacpy(&cp.bdaddr, &params->addr);
664
665         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
666 }
667
668 static u8 update_white_list(struct hci_request *req)
669 {
670         struct hci_dev *hdev = req->hdev;
671         struct hci_conn_params *params;
672         struct bdaddr_list *b;
673         uint8_t white_list_entries = 0;
674
675         /* Go through the current white list programmed into the
676          * controller one by one and check if that address is still
677          * in the list of pending connections or list of devices to
678          * report. If not present in either list, then queue the
679          * command to remove it from the controller.
680          */
681         list_for_each_entry(b, &hdev->le_white_list, list) {
682                 /* If the device is neither in pend_le_conns nor
683                  * pend_le_reports then remove it from the whitelist.
684                  */
685                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
686                                                &b->bdaddr, b->bdaddr_type) &&
687                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
688                                                &b->bdaddr, b->bdaddr_type)) {
689                         struct hci_cp_le_del_from_white_list cp;
690
691                         cp.bdaddr_type = b->bdaddr_type;
692                         bacpy(&cp.bdaddr, &b->bdaddr);
693
694                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
695                                     sizeof(cp), &cp);
696                         continue;
697                 }
698
699                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
700                         /* White list can not be used with RPAs */
701                         return 0x00;
702                 }
703
704                 white_list_entries++;
705         }
706
707         /* Since all no longer valid white list entries have been
708          * removed, walk through the list of pending connections
709          * and ensure that any new device gets programmed into
710          * the controller.
711          *
712          * If the list of the devices is larger than the list of
713          * available white list entries in the controller, then
714          * just abort and return filer policy value to not use the
715          * white list.
716          */
717         list_for_each_entry(params, &hdev->pend_le_conns, action) {
718                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
719                                            &params->addr, params->addr_type))
720                         continue;
721
722                 if (white_list_entries >= hdev->le_white_list_size) {
723                         /* Select filter policy to accept all advertising */
724                         return 0x00;
725                 }
726
727                 if (hci_find_irk_by_addr(hdev, &params->addr,
728                                          params->addr_type)) {
729                         /* White list can not be used with RPAs */
730                         return 0x00;
731                 }
732
733                 white_list_entries++;
734                 add_to_white_list(req, params);
735         }
736
737         /* After adding all new pending connections, walk through
738          * the list of pending reports and also add these to the
739          * white list if there is still space.
740          */
741         list_for_each_entry(params, &hdev->pend_le_reports, action) {
742                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
743                                            &params->addr, params->addr_type))
744                         continue;
745
746                 if (white_list_entries >= hdev->le_white_list_size) {
747                         /* Select filter policy to accept all advertising */
748                         return 0x00;
749                 }
750
751                 if (hci_find_irk_by_addr(hdev, &params->addr,
752                                          params->addr_type)) {
753                         /* White list can not be used with RPAs */
754                         return 0x00;
755                 }
756
757                 white_list_entries++;
758                 add_to_white_list(req, params);
759         }
760
761         /* Select filter policy to use white list */
762         return 0x01;
763 }
764
765 static bool scan_use_rpa(struct hci_dev *hdev)
766 {
767         return hci_dev_test_flag(hdev, HCI_PRIVACY);
768 }
769
770 void hci_req_add_le_passive_scan(struct hci_request *req)
771 {
772         struct hci_cp_le_set_scan_param param_cp;
773         struct hci_cp_le_set_scan_enable enable_cp;
774         struct hci_dev *hdev = req->hdev;
775         u8 own_addr_type;
776         u8 filter_policy;
777
778         /* Set require_privacy to false since no SCAN_REQ are send
779          * during passive scanning. Not using an non-resolvable address
780          * here is important so that peer devices using direct
781          * advertising with our address will be correctly reported
782          * by the controller.
783          */
784         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
785                                       &own_addr_type))
786                 return;
787
788         /* Adding or removing entries from the white list must
789          * happen before enabling scanning. The controller does
790          * not allow white list modification while scanning.
791          */
792         filter_policy = update_white_list(req);
793
794         /* When the controller is using random resolvable addresses and
795          * with that having LE privacy enabled, then controllers with
796          * Extended Scanner Filter Policies support can now enable support
797          * for handling directed advertising.
798          *
799          * So instead of using filter polices 0x00 (no whitelist)
800          * and 0x01 (whitelist enabled) use the new filter policies
801          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
802          */
803         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
804             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
805                 filter_policy |= 0x02;
806
807         memset(&param_cp, 0, sizeof(param_cp));
808         param_cp.type = LE_SCAN_PASSIVE;
809         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
810         param_cp.window = cpu_to_le16(hdev->le_scan_window);
811         param_cp.own_address_type = own_addr_type;
812         param_cp.filter_policy = filter_policy;
813         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
814                     &param_cp);
815
816         memset(&enable_cp, 0, sizeof(enable_cp));
817         enable_cp.enable = LE_SCAN_ENABLE;
818         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
819         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
820                     &enable_cp);
821 }
822
823 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
824 {
825         u8 instance = hdev->cur_adv_instance;
826         struct adv_info *adv_instance;
827
828         /* Ignore instance 0 */
829         if (instance == 0x00)
830                 return 0;
831
832         adv_instance = hci_find_adv_instance(hdev, instance);
833         if (!adv_instance)
834                 return 0;
835
836         /* TODO: Take into account the "appearance" and "local-name" flags here.
837          * These are currently being ignored as they are not supported.
838          */
839         return adv_instance->scan_rsp_len;
840 }
841
842 void __hci_req_disable_advertising(struct hci_request *req)
843 {
844         u8 enable = 0x00;
845
846         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
847 }
848
849 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
850 {
851         u32 flags;
852         struct adv_info *adv_instance;
853
854         if (instance == 0x00) {
855                 /* Instance 0 always manages the "Tx Power" and "Flags"
856                  * fields
857                  */
858                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
859
860                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
861                  * corresponds to the "connectable" instance flag.
862                  */
863                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
864                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
865
866                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
867                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
868                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
869                         flags |= MGMT_ADV_FLAG_DISCOV;
870
871                 return flags;
872         }
873
874         adv_instance = hci_find_adv_instance(hdev, instance);
875
876         /* Return 0 when we got an invalid instance identifier. */
877         if (!adv_instance)
878                 return 0;
879
880         return adv_instance->flags;
881 }
882
883 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
884 {
885         /* If privacy is not enabled don't use RPA */
886         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
887                 return false;
888
889         /* If basic privacy mode is enabled use RPA */
890         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
891                 return true;
892
893         /* If limited privacy mode is enabled don't use RPA if we're
894          * both discoverable and bondable.
895          */
896         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
897             hci_dev_test_flag(hdev, HCI_BONDABLE))
898                 return false;
899
900         /* We're neither bondable nor discoverable in the limited
901          * privacy mode, therefore use RPA.
902          */
903         return true;
904 }
905
906 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
907 {
908         /* If there is no connection we are OK to advertise. */
909         if (hci_conn_num(hdev, LE_LINK) == 0)
910                 return true;
911
912         /* Check le_states if there is any connection in slave role. */
913         if (hdev->conn_hash.le_num_slave > 0) {
914                 /* Slave connection state and non connectable mode bit 20. */
915                 if (!connectable && !(hdev->le_states[2] & 0x10))
916                         return false;
917
918                 /* Slave connection state and connectable mode bit 38
919                  * and scannable bit 21.
920                  */
921                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
922                                     !(hdev->le_states[2] & 0x20)))
923                         return false;
924         }
925
926         /* Check le_states if there is any connection in master role. */
927         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
928                 /* Master connection state and non connectable mode bit 18. */
929                 if (!connectable && !(hdev->le_states[2] & 0x02))
930                         return false;
931
932                 /* Master connection state and connectable mode bit 35 and
933                  * scannable 19.
934                  */
935                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
936                                     !(hdev->le_states[2] & 0x08)))
937                         return false;
938         }
939
940         return true;
941 }
942
943 void __hci_req_enable_advertising(struct hci_request *req)
944 {
945         struct hci_dev *hdev = req->hdev;
946         struct hci_cp_le_set_adv_param cp;
947         u8 own_addr_type, enable = 0x01;
948         bool connectable;
949         u32 flags;
950
951         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
952
953         /* If the "connectable" instance flag was not set, then choose between
954          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
955          */
956         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
957                       mgmt_get_connectable(hdev);
958
959         if (!is_advertising_allowed(hdev, connectable))
960                 return;
961
962         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
963                 __hci_req_disable_advertising(req);
964
965         /* Clear the HCI_LE_ADV bit temporarily so that the
966          * hci_update_random_address knows that it's safe to go ahead
967          * and write a new random address. The flag will be set back on
968          * as soon as the SET_ADV_ENABLE HCI command completes.
969          */
970         hci_dev_clear_flag(hdev, HCI_LE_ADV);
971
972         /* Set require_privacy to true only when non-connectable
973          * advertising is used. In that case it is fine to use a
974          * non-resolvable private address.
975          */
976         if (hci_update_random_address(req, !connectable,
977                                       adv_use_rpa(hdev, flags),
978                                       &own_addr_type) < 0)
979                 return;
980
981         memset(&cp, 0, sizeof(cp));
982         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
983         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
984
985         if (connectable)
986                 cp.type = LE_ADV_IND;
987         else if (get_cur_adv_instance_scan_rsp_len(hdev))
988                 cp.type = LE_ADV_SCAN_IND;
989         else
990                 cp.type = LE_ADV_NONCONN_IND;
991
992         cp.own_address_type = own_addr_type;
993         cp.channel_map = hdev->le_adv_channel_map;
994
995         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
996
997         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
998 }
999
1000 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1001 {
1002         size_t short_len;
1003         size_t complete_len;
1004
1005         /* no space left for name (+ NULL + type + len) */
1006         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1007                 return ad_len;
1008
1009         /* use complete name if present and fits */
1010         complete_len = strlen(hdev->dev_name);
1011         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1012                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1013                                        hdev->dev_name, complete_len + 1);
1014
1015         /* use short name if present */
1016         short_len = strlen(hdev->short_name);
1017         if (short_len)
1018                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1019                                        hdev->short_name, short_len + 1);
1020
1021         /* use shortened full name if present, we already know that name
1022          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1023          */
1024         if (complete_len) {
1025                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1026
1027                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1028                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1029
1030                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1031                                        sizeof(name));
1032         }
1033
1034         return ad_len;
1035 }
1036
1037 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1038 {
1039         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1040 }
1041
1042 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1043 {
1044         u8 scan_rsp_len = 0;
1045
1046         if (hdev->appearance) {
1047                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1048         }
1049
1050         return append_local_name(hdev, ptr, scan_rsp_len);
1051 }
1052
1053 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1054                                         u8 *ptr)
1055 {
1056         struct adv_info *adv_instance;
1057         u32 instance_flags;
1058         u8 scan_rsp_len = 0;
1059
1060         adv_instance = hci_find_adv_instance(hdev, instance);
1061         if (!adv_instance)
1062                 return 0;
1063
1064         instance_flags = adv_instance->flags;
1065
1066         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1067                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1068         }
1069
1070         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1071                adv_instance->scan_rsp_len);
1072
1073         scan_rsp_len += adv_instance->scan_rsp_len;
1074
1075         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1076                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1077
1078         return scan_rsp_len;
1079 }
1080
1081 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1082 {
1083         struct hci_dev *hdev = req->hdev;
1084         struct hci_cp_le_set_scan_rsp_data cp;
1085         u8 len;
1086
1087         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1088                 return;
1089
1090         memset(&cp, 0, sizeof(cp));
1091
1092         if (instance)
1093                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1094         else
1095                 len = create_default_scan_rsp_data(hdev, cp.data);
1096
1097         if (hdev->scan_rsp_data_len == len &&
1098             !memcmp(cp.data, hdev->scan_rsp_data, len))
1099                 return;
1100
1101         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1102         hdev->scan_rsp_data_len = len;
1103
1104         cp.length = len;
1105
1106         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1107 }
1108
1109 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1110 {
1111         struct adv_info *adv_instance = NULL;
1112         u8 ad_len = 0, flags = 0;
1113         u32 instance_flags;
1114
1115         /* Return 0 when the current instance identifier is invalid. */
1116         if (instance) {
1117                 adv_instance = hci_find_adv_instance(hdev, instance);
1118                 if (!adv_instance)
1119                         return 0;
1120         }
1121
1122         instance_flags = get_adv_instance_flags(hdev, instance);
1123
1124         /* The Add Advertising command allows userspace to set both the general
1125          * and limited discoverable flags.
1126          */
1127         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1128                 flags |= LE_AD_GENERAL;
1129
1130         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1131                 flags |= LE_AD_LIMITED;
1132
1133         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1134                 flags |= LE_AD_NO_BREDR;
1135
1136         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1137                 /* If a discovery flag wasn't provided, simply use the global
1138                  * settings.
1139                  */
1140                 if (!flags)
1141                         flags |= mgmt_get_adv_discov_flags(hdev);
1142
1143                 /* If flags would still be empty, then there is no need to
1144                  * include the "Flags" AD field".
1145                  */
1146                 if (flags) {
1147                         ptr[0] = 0x02;
1148                         ptr[1] = EIR_FLAGS;
1149                         ptr[2] = flags;
1150
1151                         ad_len += 3;
1152                         ptr += 3;
1153                 }
1154         }
1155
1156         if (adv_instance) {
1157                 memcpy(ptr, adv_instance->adv_data,
1158                        adv_instance->adv_data_len);
1159                 ad_len += adv_instance->adv_data_len;
1160                 ptr += adv_instance->adv_data_len;
1161         }
1162
1163         /* Provide Tx Power only if we can provide a valid value for it */
1164         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1165             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1166                 ptr[0] = 0x02;
1167                 ptr[1] = EIR_TX_POWER;
1168                 ptr[2] = (u8)hdev->adv_tx_power;
1169
1170                 ad_len += 3;
1171                 ptr += 3;
1172         }
1173
1174         return ad_len;
1175 }
1176
1177 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1178 {
1179         struct hci_dev *hdev = req->hdev;
1180         struct hci_cp_le_set_adv_data cp;
1181         u8 len;
1182
1183         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1184                 return;
1185
1186         memset(&cp, 0, sizeof(cp));
1187
1188         len = create_instance_adv_data(hdev, instance, cp.data);
1189
1190         /* There's nothing to do if the data hasn't changed */
1191         if (hdev->adv_data_len == len &&
1192             memcmp(cp.data, hdev->adv_data, len) == 0)
1193                 return;
1194
1195         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1196         hdev->adv_data_len = len;
1197
1198         cp.length = len;
1199
1200         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1201 }
1202
1203 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1204 {
1205         struct hci_request req;
1206
1207         hci_req_init(&req, hdev);
1208         __hci_req_update_adv_data(&req, instance);
1209
1210         return hci_req_run(&req, NULL);
1211 }
1212
1213 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1214 {
1215         BT_DBG("%s status %u", hdev->name, status);
1216 }
1217
1218 void hci_req_reenable_advertising(struct hci_dev *hdev)
1219 {
1220         struct hci_request req;
1221
1222         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1223             list_empty(&hdev->adv_instances))
1224                 return;
1225
1226         hci_req_init(&req, hdev);
1227
1228         if (hdev->cur_adv_instance) {
1229                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1230                                                 true);
1231         } else {
1232                 __hci_req_update_adv_data(&req, 0x00);
1233                 __hci_req_update_scan_rsp_data(&req, 0x00);
1234                 __hci_req_enable_advertising(&req);
1235         }
1236
1237         hci_req_run(&req, adv_enable_complete);
1238 }
1239
1240 static void adv_timeout_expire(struct work_struct *work)
1241 {
1242         struct hci_dev *hdev = container_of(work, struct hci_dev,
1243                                             adv_instance_expire.work);
1244
1245         struct hci_request req;
1246         u8 instance;
1247
1248         BT_DBG("%s", hdev->name);
1249
1250         hci_dev_lock(hdev);
1251
1252         hdev->adv_instance_timeout = 0;
1253
1254         instance = hdev->cur_adv_instance;
1255         if (instance == 0x00)
1256                 goto unlock;
1257
1258         hci_req_init(&req, hdev);
1259
1260         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1261
1262         if (list_empty(&hdev->adv_instances))
1263                 __hci_req_disable_advertising(&req);
1264
1265         hci_req_run(&req, NULL);
1266
1267 unlock:
1268         hci_dev_unlock(hdev);
1269 }
1270
1271 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1272                                     bool force)
1273 {
1274         struct hci_dev *hdev = req->hdev;
1275         struct adv_info *adv_instance = NULL;
1276         u16 timeout;
1277
1278         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1279             list_empty(&hdev->adv_instances))
1280                 return -EPERM;
1281
1282         if (hdev->adv_instance_timeout)
1283                 return -EBUSY;
1284
1285         adv_instance = hci_find_adv_instance(hdev, instance);
1286         if (!adv_instance)
1287                 return -ENOENT;
1288
1289         /* A zero timeout means unlimited advertising. As long as there is
1290          * only one instance, duration should be ignored. We still set a timeout
1291          * in case further instances are being added later on.
1292          *
1293          * If the remaining lifetime of the instance is more than the duration
1294          * then the timeout corresponds to the duration, otherwise it will be
1295          * reduced to the remaining instance lifetime.
1296          */
1297         if (adv_instance->timeout == 0 ||
1298             adv_instance->duration <= adv_instance->remaining_time)
1299                 timeout = adv_instance->duration;
1300         else
1301                 timeout = adv_instance->remaining_time;
1302
1303         /* The remaining time is being reduced unless the instance is being
1304          * advertised without time limit.
1305          */
1306         if (adv_instance->timeout)
1307                 adv_instance->remaining_time =
1308                                 adv_instance->remaining_time - timeout;
1309
1310         hdev->adv_instance_timeout = timeout;
1311         queue_delayed_work(hdev->req_workqueue,
1312                            &hdev->adv_instance_expire,
1313                            msecs_to_jiffies(timeout * 1000));
1314
1315         /* If we're just re-scheduling the same instance again then do not
1316          * execute any HCI commands. This happens when a single instance is
1317          * being advertised.
1318          */
1319         if (!force && hdev->cur_adv_instance == instance &&
1320             hci_dev_test_flag(hdev, HCI_LE_ADV))
1321                 return 0;
1322
1323         hdev->cur_adv_instance = instance;
1324         __hci_req_update_adv_data(req, instance);
1325         __hci_req_update_scan_rsp_data(req, instance);
1326         __hci_req_enable_advertising(req);
1327
1328         return 0;
1329 }
1330
1331 static void cancel_adv_timeout(struct hci_dev *hdev)
1332 {
1333         if (hdev->adv_instance_timeout) {
1334                 hdev->adv_instance_timeout = 0;
1335                 cancel_delayed_work(&hdev->adv_instance_expire);
1336         }
1337 }
1338
1339 /* For a single instance:
1340  * - force == true: The instance will be removed even when its remaining
1341  *   lifetime is not zero.
1342  * - force == false: the instance will be deactivated but kept stored unless
1343  *   the remaining lifetime is zero.
1344  *
1345  * For instance == 0x00:
1346  * - force == true: All instances will be removed regardless of their timeout
1347  *   setting.
1348  * - force == false: Only instances that have a timeout will be removed.
1349  */
1350 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1351                                 struct hci_request *req, u8 instance,
1352                                 bool force)
1353 {
1354         struct adv_info *adv_instance, *n, *next_instance = NULL;
1355         int err;
1356         u8 rem_inst;
1357
1358         /* Cancel any timeout concerning the removed instance(s). */
1359         if (!instance || hdev->cur_adv_instance == instance)
1360                 cancel_adv_timeout(hdev);
1361
1362         /* Get the next instance to advertise BEFORE we remove
1363          * the current one. This can be the same instance again
1364          * if there is only one instance.
1365          */
1366         if (instance && hdev->cur_adv_instance == instance)
1367                 next_instance = hci_get_next_instance(hdev, instance);
1368
1369         if (instance == 0x00) {
1370                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1371                                          list) {
1372                         if (!(force || adv_instance->timeout))
1373                                 continue;
1374
1375                         rem_inst = adv_instance->instance;
1376                         err = hci_remove_adv_instance(hdev, rem_inst);
1377                         if (!err)
1378                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1379                 }
1380         } else {
1381                 adv_instance = hci_find_adv_instance(hdev, instance);
1382
1383                 if (force || (adv_instance && adv_instance->timeout &&
1384                               !adv_instance->remaining_time)) {
1385                         /* Don't advertise a removed instance. */
1386                         if (next_instance &&
1387                             next_instance->instance == instance)
1388                                 next_instance = NULL;
1389
1390                         err = hci_remove_adv_instance(hdev, instance);
1391                         if (!err)
1392                                 mgmt_advertising_removed(sk, hdev, instance);
1393                 }
1394         }
1395
1396         if (!req || !hdev_is_powered(hdev) ||
1397             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1398                 return;
1399
1400         if (next_instance)
1401                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1402                                                 false);
1403 }
1404
1405 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1406 {
1407         struct hci_dev *hdev = req->hdev;
1408
1409         /* If we're advertising or initiating an LE connection we can't
1410          * go ahead and change the random address at this time. This is
1411          * because the eventual initiator address used for the
1412          * subsequently created connection will be undefined (some
1413          * controllers use the new address and others the one we had
1414          * when the operation started).
1415          *
1416          * In this kind of scenario skip the update and let the random
1417          * address be updated at the next cycle.
1418          */
1419         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1420             hci_lookup_le_connect(hdev)) {
1421                 BT_DBG("Deferring random address update");
1422                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1423                 return;
1424         }
1425
1426         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1427 }
1428
1429 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1430                               bool use_rpa, u8 *own_addr_type)
1431 {
1432         struct hci_dev *hdev = req->hdev;
1433         int err;
1434
1435         /* If privacy is enabled use a resolvable private address. If
1436          * current RPA has expired or there is something else than
1437          * the current RPA in use, then generate a new one.
1438          */
1439         if (use_rpa) {
1440                 int to;
1441
1442                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1443
1444                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1445                     !bacmp(&hdev->random_addr, &hdev->rpa))
1446                         return 0;
1447
1448                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1449                 if (err < 0) {
1450                         bt_dev_err(hdev, "failed to generate new RPA");
1451                         return err;
1452                 }
1453
1454                 set_random_addr(req, &hdev->rpa);
1455
1456                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1457                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1458
1459                 return 0;
1460         }
1461
1462         /* In case of required privacy without resolvable private address,
1463          * use an non-resolvable private address. This is useful for active
1464          * scanning and non-connectable advertising.
1465          */
1466         if (require_privacy) {
1467                 bdaddr_t nrpa;
1468
1469                 while (true) {
1470                         /* The non-resolvable private address is generated
1471                          * from random six bytes with the two most significant
1472                          * bits cleared.
1473                          */
1474                         get_random_bytes(&nrpa, 6);
1475                         nrpa.b[5] &= 0x3f;
1476
1477                         /* The non-resolvable private address shall not be
1478                          * equal to the public address.
1479                          */
1480                         if (bacmp(&hdev->bdaddr, &nrpa))
1481                                 break;
1482                 }
1483
1484                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1485                 set_random_addr(req, &nrpa);
1486                 return 0;
1487         }
1488
1489         /* If forcing static address is in use or there is no public
1490          * address use the static address as random address (but skip
1491          * the HCI command if the current random address is already the
1492          * static one.
1493          *
1494          * In case BR/EDR has been disabled on a dual-mode controller
1495          * and a static address has been configured, then use that
1496          * address instead of the public BR/EDR address.
1497          */
1498         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1499             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1500             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1501              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1502                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1503                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1504                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1505                                     &hdev->static_addr);
1506                 return 0;
1507         }
1508
1509         /* Neither privacy nor static address is being used so use a
1510          * public address.
1511          */
1512         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1513
1514         return 0;
1515 }
1516
1517 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1518 {
1519         struct bdaddr_list *b;
1520
1521         list_for_each_entry(b, &hdev->whitelist, list) {
1522                 struct hci_conn *conn;
1523
1524                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1525                 if (!conn)
1526                         return true;
1527
1528                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1529                         return true;
1530         }
1531
1532         return false;
1533 }
1534
1535 void __hci_req_update_scan(struct hci_request *req)
1536 {
1537         struct hci_dev *hdev = req->hdev;
1538         u8 scan;
1539
1540         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1541                 return;
1542
1543         if (!hdev_is_powered(hdev))
1544                 return;
1545
1546         if (mgmt_powering_down(hdev))
1547                 return;
1548
1549         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1550             disconnected_whitelist_entries(hdev))
1551                 scan = SCAN_PAGE;
1552         else
1553                 scan = SCAN_DISABLED;
1554
1555         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1556                 scan |= SCAN_INQUIRY;
1557
1558         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1559             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1560                 return;
1561
1562         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1563 }
1564
1565 static int update_scan(struct hci_request *req, unsigned long opt)
1566 {
1567         hci_dev_lock(req->hdev);
1568         __hci_req_update_scan(req);
1569         hci_dev_unlock(req->hdev);
1570         return 0;
1571 }
1572
1573 static void scan_update_work(struct work_struct *work)
1574 {
1575         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1576
1577         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1578 }
1579
1580 static int connectable_update(struct hci_request *req, unsigned long opt)
1581 {
1582         struct hci_dev *hdev = req->hdev;
1583
1584         hci_dev_lock(hdev);
1585
1586         __hci_req_update_scan(req);
1587
1588         /* If BR/EDR is not enabled and we disable advertising as a
1589          * by-product of disabling connectable, we need to update the
1590          * advertising flags.
1591          */
1592         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1593                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1594
1595         /* Update the advertising parameters if necessary */
1596         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1597             !list_empty(&hdev->adv_instances))
1598                 __hci_req_enable_advertising(req);
1599
1600         __hci_update_background_scan(req);
1601
1602         hci_dev_unlock(hdev);
1603
1604         return 0;
1605 }
1606
1607 static void connectable_update_work(struct work_struct *work)
1608 {
1609         struct hci_dev *hdev = container_of(work, struct hci_dev,
1610                                             connectable_update);
1611         u8 status;
1612
1613         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1614         mgmt_set_connectable_complete(hdev, status);
1615 }
1616
1617 static u8 get_service_classes(struct hci_dev *hdev)
1618 {
1619         struct bt_uuid *uuid;
1620         u8 val = 0;
1621
1622         list_for_each_entry(uuid, &hdev->uuids, list)
1623                 val |= uuid->svc_hint;
1624
1625         return val;
1626 }
1627
1628 void __hci_req_update_class(struct hci_request *req)
1629 {
1630         struct hci_dev *hdev = req->hdev;
1631         u8 cod[3];
1632
1633         BT_DBG("%s", hdev->name);
1634
1635         if (!hdev_is_powered(hdev))
1636                 return;
1637
1638         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1639                 return;
1640
1641         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1642                 return;
1643
1644         cod[0] = hdev->minor_class;
1645         cod[1] = hdev->major_class;
1646         cod[2] = get_service_classes(hdev);
1647
1648         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1649                 cod[1] |= 0x20;
1650
1651         if (memcmp(cod, hdev->dev_class, 3) == 0)
1652                 return;
1653
1654         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1655 }
1656
1657 static void write_iac(struct hci_request *req)
1658 {
1659         struct hci_dev *hdev = req->hdev;
1660         struct hci_cp_write_current_iac_lap cp;
1661
1662         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1663                 return;
1664
1665         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1666                 /* Limited discoverable mode */
1667                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1668                 cp.iac_lap[0] = 0x00;   /* LIAC */
1669                 cp.iac_lap[1] = 0x8b;
1670                 cp.iac_lap[2] = 0x9e;
1671                 cp.iac_lap[3] = 0x33;   /* GIAC */
1672                 cp.iac_lap[4] = 0x8b;
1673                 cp.iac_lap[5] = 0x9e;
1674         } else {
1675                 /* General discoverable mode */
1676                 cp.num_iac = 1;
1677                 cp.iac_lap[0] = 0x33;   /* GIAC */
1678                 cp.iac_lap[1] = 0x8b;
1679                 cp.iac_lap[2] = 0x9e;
1680         }
1681
1682         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1683                     (cp.num_iac * 3) + 1, &cp);
1684 }
1685
1686 static int discoverable_update(struct hci_request *req, unsigned long opt)
1687 {
1688         struct hci_dev *hdev = req->hdev;
1689
1690         hci_dev_lock(hdev);
1691
1692         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1693                 write_iac(req);
1694                 __hci_req_update_scan(req);
1695                 __hci_req_update_class(req);
1696         }
1697
1698         /* Advertising instances don't use the global discoverable setting, so
1699          * only update AD if advertising was enabled using Set Advertising.
1700          */
1701         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1702                 __hci_req_update_adv_data(req, 0x00);
1703
1704                 /* Discoverable mode affects the local advertising
1705                  * address in limited privacy mode.
1706                  */
1707                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1708                         __hci_req_enable_advertising(req);
1709         }
1710
1711         hci_dev_unlock(hdev);
1712
1713         return 0;
1714 }
1715
1716 static void discoverable_update_work(struct work_struct *work)
1717 {
1718         struct hci_dev *hdev = container_of(work, struct hci_dev,
1719                                             discoverable_update);
1720         u8 status;
1721
1722         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1723         mgmt_set_discoverable_complete(hdev, status);
1724 }
1725
1726 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1727                       u8 reason)
1728 {
1729         switch (conn->state) {
1730         case BT_CONNECTED:
1731         case BT_CONFIG:
1732                 if (conn->type == AMP_LINK) {
1733                         struct hci_cp_disconn_phy_link cp;
1734
1735                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1736                         cp.reason = reason;
1737                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1738                                     &cp);
1739                 } else {
1740                         struct hci_cp_disconnect dc;
1741
1742                         dc.handle = cpu_to_le16(conn->handle);
1743                         dc.reason = reason;
1744                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1745                 }
1746
1747                 conn->state = BT_DISCONN;
1748
1749                 break;
1750         case BT_CONNECT:
1751                 if (conn->type == LE_LINK) {
1752                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1753                                 break;
1754                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1755                                     0, NULL);
1756                 } else if (conn->type == ACL_LINK) {
1757                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1758                                 break;
1759                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1760                                     6, &conn->dst);
1761                 }
1762                 break;
1763         case BT_CONNECT2:
1764                 if (conn->type == ACL_LINK) {
1765                         struct hci_cp_reject_conn_req rej;
1766
1767                         bacpy(&rej.bdaddr, &conn->dst);
1768                         rej.reason = reason;
1769
1770                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1771                                     sizeof(rej), &rej);
1772                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1773                         struct hci_cp_reject_sync_conn_req rej;
1774
1775                         bacpy(&rej.bdaddr, &conn->dst);
1776
1777                         /* SCO rejection has its own limited set of
1778                          * allowed error values (0x0D-0x0F) which isn't
1779                          * compatible with most values passed to this
1780                          * function. To be safe hard-code one of the
1781                          * values that's suitable for SCO.
1782                          */
1783                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1784
1785                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1786                                     sizeof(rej), &rej);
1787                 }
1788                 break;
1789         default:
1790                 conn->state = BT_CLOSED;
1791                 break;
1792         }
1793 }
1794
1795 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1796 {
1797         if (status)
1798                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1799 }
1800
1801 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1802 {
1803         struct hci_request req;
1804         int err;
1805
1806         hci_req_init(&req, conn->hdev);
1807
1808         __hci_abort_conn(&req, conn, reason);
1809
1810         err = hci_req_run(&req, abort_conn_complete);
1811         if (err && err != -ENODATA) {
1812                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
1813                 return err;
1814         }
1815
1816         return 0;
1817 }
1818
1819 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1820 {
1821         hci_dev_lock(req->hdev);
1822         __hci_update_background_scan(req);
1823         hci_dev_unlock(req->hdev);
1824         return 0;
1825 }
1826
1827 static void bg_scan_update(struct work_struct *work)
1828 {
1829         struct hci_dev *hdev = container_of(work, struct hci_dev,
1830                                             bg_scan_update);
1831         struct hci_conn *conn;
1832         u8 status;
1833         int err;
1834
1835         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1836         if (!err)
1837                 return;
1838
1839         hci_dev_lock(hdev);
1840
1841         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1842         if (conn)
1843                 hci_le_conn_failed(conn, status);
1844
1845         hci_dev_unlock(hdev);
1846 }
1847
1848 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1849 {
1850         hci_req_add_le_scan_disable(req);
1851         return 0;
1852 }
1853
1854 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1855 {
1856         u8 length = opt;
1857         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1858         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1859         struct hci_cp_inquiry cp;
1860
1861         BT_DBG("%s", req->hdev->name);
1862
1863         hci_dev_lock(req->hdev);
1864         hci_inquiry_cache_flush(req->hdev);
1865         hci_dev_unlock(req->hdev);
1866
1867         memset(&cp, 0, sizeof(cp));
1868
1869         if (req->hdev->discovery.limited)
1870                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1871         else
1872                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1873
1874         cp.length = length;
1875
1876         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1877
1878         return 0;
1879 }
1880
1881 static void le_scan_disable_work(struct work_struct *work)
1882 {
1883         struct hci_dev *hdev = container_of(work, struct hci_dev,
1884                                             le_scan_disable.work);
1885         u8 status;
1886
1887         BT_DBG("%s", hdev->name);
1888
1889         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1890                 return;
1891
1892         cancel_delayed_work(&hdev->le_scan_restart);
1893
1894         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1895         if (status) {
1896                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1897                            status);
1898                 return;
1899         }
1900
1901         hdev->discovery.scan_start = 0;
1902
1903         /* If we were running LE only scan, change discovery state. If
1904          * we were running both LE and BR/EDR inquiry simultaneously,
1905          * and BR/EDR inquiry is already finished, stop discovery,
1906          * otherwise BR/EDR inquiry will stop discovery when finished.
1907          * If we will resolve remote device name, do not change
1908          * discovery state.
1909          */
1910
1911         if (hdev->discovery.type == DISCOV_TYPE_LE)
1912                 goto discov_stopped;
1913
1914         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1915                 return;
1916
1917         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1918                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1919                     hdev->discovery.state != DISCOVERY_RESOLVING)
1920                         goto discov_stopped;
1921
1922                 return;
1923         }
1924
1925         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1926                      HCI_CMD_TIMEOUT, &status);
1927         if (status) {
1928                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
1929                 goto discov_stopped;
1930         }
1931
1932         return;
1933
1934 discov_stopped:
1935         hci_dev_lock(hdev);
1936         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1937         hci_dev_unlock(hdev);
1938 }
1939
1940 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1941 {
1942         struct hci_dev *hdev = req->hdev;
1943         struct hci_cp_le_set_scan_enable cp;
1944
1945         /* If controller is not scanning we are done. */
1946         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1947                 return 0;
1948
1949         hci_req_add_le_scan_disable(req);
1950
1951         memset(&cp, 0, sizeof(cp));
1952         cp.enable = LE_SCAN_ENABLE;
1953         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1954         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1955
1956         return 0;
1957 }
1958
1959 static void le_scan_restart_work(struct work_struct *work)
1960 {
1961         struct hci_dev *hdev = container_of(work, struct hci_dev,
1962                                             le_scan_restart.work);
1963         unsigned long timeout, duration, scan_start, now;
1964         u8 status;
1965
1966         BT_DBG("%s", hdev->name);
1967
1968         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1969         if (status) {
1970                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
1971                            status);
1972                 return;
1973         }
1974
1975         hci_dev_lock(hdev);
1976
1977         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1978             !hdev->discovery.scan_start)
1979                 goto unlock;
1980
1981         /* When the scan was started, hdev->le_scan_disable has been queued
1982          * after duration from scan_start. During scan restart this job
1983          * has been canceled, and we need to queue it again after proper
1984          * timeout, to make sure that scan does not run indefinitely.
1985          */
1986         duration = hdev->discovery.scan_duration;
1987         scan_start = hdev->discovery.scan_start;
1988         now = jiffies;
1989         if (now - scan_start <= duration) {
1990                 int elapsed;
1991
1992                 if (now >= scan_start)
1993                         elapsed = now - scan_start;
1994                 else
1995                         elapsed = ULONG_MAX - scan_start + now;
1996
1997                 timeout = duration - elapsed;
1998         } else {
1999                 timeout = 0;
2000         }
2001
2002         queue_delayed_work(hdev->req_workqueue,
2003                            &hdev->le_scan_disable, timeout);
2004
2005 unlock:
2006         hci_dev_unlock(hdev);
2007 }
2008
2009 static int active_scan(struct hci_request *req, unsigned long opt)
2010 {
2011         uint16_t interval = opt;
2012         struct hci_dev *hdev = req->hdev;
2013         struct hci_cp_le_set_scan_param param_cp;
2014         struct hci_cp_le_set_scan_enable enable_cp;
2015         u8 own_addr_type;
2016         int err;
2017
2018         BT_DBG("%s", hdev->name);
2019
2020         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2021                 hci_dev_lock(hdev);
2022
2023                 /* Don't let discovery abort an outgoing connection attempt
2024                  * that's using directed advertising.
2025                  */
2026                 if (hci_lookup_le_connect(hdev)) {
2027                         hci_dev_unlock(hdev);
2028                         return -EBUSY;
2029                 }
2030
2031                 cancel_adv_timeout(hdev);
2032                 hci_dev_unlock(hdev);
2033
2034                 __hci_req_disable_advertising(req);
2035         }
2036
2037         /* If controller is scanning, it means the background scanning is
2038          * running. Thus, we should temporarily stop it in order to set the
2039          * discovery scanning parameters.
2040          */
2041         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2042                 hci_req_add_le_scan_disable(req);
2043
2044         /* All active scans will be done with either a resolvable private
2045          * address (when privacy feature has been enabled) or non-resolvable
2046          * private address.
2047          */
2048         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2049                                         &own_addr_type);
2050         if (err < 0)
2051                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2052
2053         memset(&param_cp, 0, sizeof(param_cp));
2054         param_cp.type = LE_SCAN_ACTIVE;
2055         param_cp.interval = cpu_to_le16(interval);
2056         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2057         param_cp.own_address_type = own_addr_type;
2058
2059         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2060                     &param_cp);
2061
2062         memset(&enable_cp, 0, sizeof(enable_cp));
2063         enable_cp.enable = LE_SCAN_ENABLE;
2064         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2065
2066         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2067                     &enable_cp);
2068
2069         return 0;
2070 }
2071
2072 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2073 {
2074         int err;
2075
2076         BT_DBG("%s", req->hdev->name);
2077
2078         err = active_scan(req, opt);
2079         if (err)
2080                 return err;
2081
2082         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2083 }
2084
2085 static void start_discovery(struct hci_dev *hdev, u8 *status)
2086 {
2087         unsigned long timeout;
2088
2089         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2090
2091         switch (hdev->discovery.type) {
2092         case DISCOV_TYPE_BREDR:
2093                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2094                         hci_req_sync(hdev, bredr_inquiry,
2095                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2096                                      status);
2097                 return;
2098         case DISCOV_TYPE_INTERLEAVED:
2099                 /* When running simultaneous discovery, the LE scanning time
2100                  * should occupy the whole discovery time sine BR/EDR inquiry
2101                  * and LE scanning are scheduled by the controller.
2102                  *
2103                  * For interleaving discovery in comparison, BR/EDR inquiry
2104                  * and LE scanning are done sequentially with separate
2105                  * timeouts.
2106                  */
2107                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2108                              &hdev->quirks)) {
2109                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2110                         /* During simultaneous discovery, we double LE scan
2111                          * interval. We must leave some time for the controller
2112                          * to do BR/EDR inquiry.
2113                          */
2114                         hci_req_sync(hdev, interleaved_discov,
2115                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2116                                      status);
2117                         break;
2118                 }
2119
2120                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2121                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2122                              HCI_CMD_TIMEOUT, status);
2123                 break;
2124         case DISCOV_TYPE_LE:
2125                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2126                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2127                              HCI_CMD_TIMEOUT, status);
2128                 break;
2129         default:
2130                 *status = HCI_ERROR_UNSPECIFIED;
2131                 return;
2132         }
2133
2134         if (*status)
2135                 return;
2136
2137         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2138
2139         /* When service discovery is used and the controller has a
2140          * strict duplicate filter, it is important to remember the
2141          * start and duration of the scan. This is required for
2142          * restarting scanning during the discovery phase.
2143          */
2144         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2145                      hdev->discovery.result_filtering) {
2146                 hdev->discovery.scan_start = jiffies;
2147                 hdev->discovery.scan_duration = timeout;
2148         }
2149
2150         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2151                            timeout);
2152 }
2153
2154 bool hci_req_stop_discovery(struct hci_request *req)
2155 {
2156         struct hci_dev *hdev = req->hdev;
2157         struct discovery_state *d = &hdev->discovery;
2158         struct hci_cp_remote_name_req_cancel cp;
2159         struct inquiry_entry *e;
2160         bool ret = false;
2161
2162         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2163
2164         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2165                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2166                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2167
2168                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2169                         cancel_delayed_work(&hdev->le_scan_disable);
2170                         hci_req_add_le_scan_disable(req);
2171                 }
2172
2173                 ret = true;
2174         } else {
2175                 /* Passive scanning */
2176                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2177                         hci_req_add_le_scan_disable(req);
2178                         ret = true;
2179                 }
2180         }
2181
2182         /* No further actions needed for LE-only discovery */
2183         if (d->type == DISCOV_TYPE_LE)
2184                 return ret;
2185
2186         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2187                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2188                                                      NAME_PENDING);
2189                 if (!e)
2190                         return ret;
2191
2192                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2193                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2194                             &cp);
2195                 ret = true;
2196         }
2197
2198         return ret;
2199 }
2200
2201 static int stop_discovery(struct hci_request *req, unsigned long opt)
2202 {
2203         hci_dev_lock(req->hdev);
2204         hci_req_stop_discovery(req);
2205         hci_dev_unlock(req->hdev);
2206
2207         return 0;
2208 }
2209
2210 static void discov_update(struct work_struct *work)
2211 {
2212         struct hci_dev *hdev = container_of(work, struct hci_dev,
2213                                             discov_update);
2214         u8 status = 0;
2215
2216         switch (hdev->discovery.state) {
2217         case DISCOVERY_STARTING:
2218                 start_discovery(hdev, &status);
2219                 mgmt_start_discovery_complete(hdev, status);
2220                 if (status)
2221                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2222                 else
2223                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2224                 break;
2225         case DISCOVERY_STOPPING:
2226                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2227                 mgmt_stop_discovery_complete(hdev, status);
2228                 if (!status)
2229                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2230                 break;
2231         case DISCOVERY_STOPPED:
2232         default:
2233                 return;
2234         }
2235 }
2236
2237 static void discov_off(struct work_struct *work)
2238 {
2239         struct hci_dev *hdev = container_of(work, struct hci_dev,
2240                                             discov_off.work);
2241
2242         BT_DBG("%s", hdev->name);
2243
2244         hci_dev_lock(hdev);
2245
2246         /* When discoverable timeout triggers, then just make sure
2247          * the limited discoverable flag is cleared. Even in the case
2248          * of a timeout triggered from general discoverable, it is
2249          * safe to unconditionally clear the flag.
2250          */
2251         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2252         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2253         hdev->discov_timeout = 0;
2254
2255         hci_dev_unlock(hdev);
2256
2257         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2258         mgmt_new_settings(hdev);
2259 }
2260
2261 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2262 {
2263         struct hci_dev *hdev = req->hdev;
2264         u8 link_sec;
2265
2266         hci_dev_lock(hdev);
2267
2268         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2269             !lmp_host_ssp_capable(hdev)) {
2270                 u8 mode = 0x01;
2271
2272                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2273
2274                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2275                         u8 support = 0x01;
2276
2277                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2278                                     sizeof(support), &support);
2279                 }
2280         }
2281
2282         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2283             lmp_bredr_capable(hdev)) {
2284                 struct hci_cp_write_le_host_supported cp;
2285
2286                 cp.le = 0x01;
2287                 cp.simul = 0x00;
2288
2289                 /* Check first if we already have the right
2290                  * host state (host features set)
2291                  */
2292                 if (cp.le != lmp_host_le_capable(hdev) ||
2293                     cp.simul != lmp_host_le_br_capable(hdev))
2294                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2295                                     sizeof(cp), &cp);
2296         }
2297
2298         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2299                 /* Make sure the controller has a good default for
2300                  * advertising data. This also applies to the case
2301                  * where BR/EDR was toggled during the AUTO_OFF phase.
2302                  */
2303                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2304                     list_empty(&hdev->adv_instances)) {
2305                         __hci_req_update_adv_data(req, 0x00);
2306                         __hci_req_update_scan_rsp_data(req, 0x00);
2307
2308                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2309                                 __hci_req_enable_advertising(req);
2310                 } else if (!list_empty(&hdev->adv_instances)) {
2311                         struct adv_info *adv_instance;
2312
2313                         adv_instance = list_first_entry(&hdev->adv_instances,
2314                                                         struct adv_info, list);
2315                         __hci_req_schedule_adv_instance(req,
2316                                                         adv_instance->instance,
2317                                                         true);
2318                 }
2319         }
2320
2321         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2322         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2323                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2324                             sizeof(link_sec), &link_sec);
2325
2326         if (lmp_bredr_capable(hdev)) {
2327                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2328                         __hci_req_write_fast_connectable(req, true);
2329                 else
2330                         __hci_req_write_fast_connectable(req, false);
2331                 __hci_req_update_scan(req);
2332                 __hci_req_update_class(req);
2333                 __hci_req_update_name(req);
2334                 __hci_req_update_eir(req);
2335         }
2336
2337         hci_dev_unlock(hdev);
2338         return 0;
2339 }
2340
2341 int __hci_req_hci_power_on(struct hci_dev *hdev)
2342 {
2343         /* Register the available SMP channels (BR/EDR and LE) only when
2344          * successfully powering on the controller. This late
2345          * registration is required so that LE SMP can clearly decide if
2346          * the public address or static address is used.
2347          */
2348         smp_register(hdev);
2349
2350         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2351                               NULL);
2352 }
2353
2354 void hci_request_setup(struct hci_dev *hdev)
2355 {
2356         INIT_WORK(&hdev->discov_update, discov_update);
2357         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2358         INIT_WORK(&hdev->scan_update, scan_update_work);
2359         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2360         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2361         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2362         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2363         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2364         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2365 }
2366
2367 void hci_request_cancel_all(struct hci_dev *hdev)
2368 {
2369         hci_req_sync_cancel(hdev, ENODEV);
2370
2371         cancel_work_sync(&hdev->discov_update);
2372         cancel_work_sync(&hdev->bg_scan_update);
2373         cancel_work_sync(&hdev->scan_update);
2374         cancel_work_sync(&hdev->connectable_update);
2375         cancel_work_sync(&hdev->discoverable_update);
2376         cancel_delayed_work_sync(&hdev->discov_off);
2377         cancel_delayed_work_sync(&hdev->le_scan_disable);
2378         cancel_delayed_work_sync(&hdev->le_scan_restart);
2379
2380         if (hdev->adv_instance_timeout) {
2381                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2382                 hdev->adv_instance_timeout = 0;
2383         }
2384 }