GNU Linux-libre 4.9.333-gnu1
[releases.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/mgmt.h>
27
28 #include "smp.h"
29 #include "hci_request.h"
30
31 #define HCI_REQ_DONE      0
32 #define HCI_REQ_PEND      1
33 #define HCI_REQ_CANCELED  2
34
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37         skb_queue_head_init(&req->cmd_q);
38         req->hdev = hdev;
39         req->err = 0;
40 }
41
42 static int req_run(struct hci_request *req, hci_req_complete_t complete,
43                    hci_req_complete_skb_t complete_skb)
44 {
45         struct hci_dev *hdev = req->hdev;
46         struct sk_buff *skb;
47         unsigned long flags;
48
49         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51         /* If an error occurred during request building, remove all HCI
52          * commands queued on the HCI request queue.
53          */
54         if (req->err) {
55                 skb_queue_purge(&req->cmd_q);
56                 return req->err;
57         }
58
59         /* Do not allow empty requests */
60         if (skb_queue_empty(&req->cmd_q))
61                 return -ENODATA;
62
63         skb = skb_peek_tail(&req->cmd_q);
64         if (complete) {
65                 bt_cb(skb)->hci.req_complete = complete;
66         } else if (complete_skb) {
67                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69         }
70
71         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75         queue_work(hdev->workqueue, &hdev->cmd_work);
76
77         return 0;
78 }
79
80 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81 {
82         return req_run(req, complete, NULL);
83 }
84
85 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86 {
87         return req_run(req, NULL, complete);
88 }
89
90 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91                                   struct sk_buff *skb)
92 {
93         BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95         if (hdev->req_status == HCI_REQ_PEND) {
96                 hdev->req_result = result;
97                 hdev->req_status = HCI_REQ_DONE;
98                 if (skb)
99                         hdev->req_skb = skb_get(skb);
100                 wake_up_interruptible(&hdev->req_wait_q);
101         }
102 }
103
104 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
105 {
106         BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = err;
110                 hdev->req_status = HCI_REQ_CANCELED;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116                                   const void *param, u8 event, u32 timeout)
117 {
118         DECLARE_WAITQUEUE(wait, current);
119         struct hci_request req;
120         struct sk_buff *skb;
121         int err = 0;
122
123         BT_DBG("%s", hdev->name);
124
125         hci_req_init(&req, hdev);
126
127         hci_req_add_ev(&req, opcode, plen, param, event);
128
129         hdev->req_status = HCI_REQ_PEND;
130
131         add_wait_queue(&hdev->req_wait_q, &wait);
132         set_current_state(TASK_INTERRUPTIBLE);
133
134         err = hci_req_run_skb(&req, hci_req_sync_complete);
135         if (err < 0) {
136                 remove_wait_queue(&hdev->req_wait_q, &wait);
137                 set_current_state(TASK_RUNNING);
138                 return ERR_PTR(err);
139         }
140
141         schedule_timeout(timeout);
142
143         remove_wait_queue(&hdev->req_wait_q, &wait);
144
145         if (signal_pending(current))
146                 return ERR_PTR(-EINTR);
147
148         switch (hdev->req_status) {
149         case HCI_REQ_DONE:
150                 err = -bt_to_errno(hdev->req_result);
151                 break;
152
153         case HCI_REQ_CANCELED:
154                 err = -hdev->req_result;
155                 break;
156
157         default:
158                 err = -ETIMEDOUT;
159                 break;
160         }
161
162         hdev->req_status = hdev->req_result = 0;
163         skb = hdev->req_skb;
164         hdev->req_skb = NULL;
165
166         BT_DBG("%s end: err %d", hdev->name, err);
167
168         if (err < 0) {
169                 kfree_skb(skb);
170                 return ERR_PTR(err);
171         }
172
173         if (!skb)
174                 return ERR_PTR(-ENODATA);
175
176         return skb;
177 }
178 EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181                                const void *param, u32 timeout)
182 {
183         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184 }
185 EXPORT_SYMBOL(__hci_cmd_sync);
186
187 /* Execute request and wait for completion. */
188 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189                                                      unsigned long opt),
190                    unsigned long opt, u32 timeout, u8 *hci_status)
191 {
192         struct hci_request req;
193         DECLARE_WAITQUEUE(wait, current);
194         int err = 0;
195
196         BT_DBG("%s start", hdev->name);
197
198         hci_req_init(&req, hdev);
199
200         hdev->req_status = HCI_REQ_PEND;
201
202         err = func(&req, opt);
203         if (err) {
204                 if (hci_status)
205                         *hci_status = HCI_ERROR_UNSPECIFIED;
206                 return err;
207         }
208
209         add_wait_queue(&hdev->req_wait_q, &wait);
210         set_current_state(TASK_INTERRUPTIBLE);
211
212         err = hci_req_run_skb(&req, hci_req_sync_complete);
213         if (err < 0) {
214                 hdev->req_status = 0;
215
216                 remove_wait_queue(&hdev->req_wait_q, &wait);
217                 set_current_state(TASK_RUNNING);
218
219                 /* ENODATA means the HCI request command queue is empty.
220                  * This can happen when a request with conditionals doesn't
221                  * trigger any commands to be sent. This is normal behavior
222                  * and should not trigger an error return.
223                  */
224                 if (err == -ENODATA) {
225                         if (hci_status)
226                                 *hci_status = 0;
227                         return 0;
228                 }
229
230                 if (hci_status)
231                         *hci_status = HCI_ERROR_UNSPECIFIED;
232
233                 return err;
234         }
235
236         schedule_timeout(timeout);
237
238         remove_wait_queue(&hdev->req_wait_q, &wait);
239
240         if (signal_pending(current))
241                 return -EINTR;
242
243         switch (hdev->req_status) {
244         case HCI_REQ_DONE:
245                 err = -bt_to_errno(hdev->req_result);
246                 if (hci_status)
247                         *hci_status = hdev->req_result;
248                 break;
249
250         case HCI_REQ_CANCELED:
251                 err = -hdev->req_result;
252                 if (hci_status)
253                         *hci_status = HCI_ERROR_UNSPECIFIED;
254                 break;
255
256         default:
257                 err = -ETIMEDOUT;
258                 if (hci_status)
259                         *hci_status = HCI_ERROR_UNSPECIFIED;
260                 break;
261         }
262
263         kfree_skb(hdev->req_skb);
264         hdev->req_skb = NULL;
265         hdev->req_status = hdev->req_result = 0;
266
267         BT_DBG("%s end: err %d", hdev->name, err);
268
269         return err;
270 }
271
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273                                                   unsigned long opt),
274                  unsigned long opt, u32 timeout, u8 *hci_status)
275 {
276         int ret;
277
278         /* Serialize all requests */
279         hci_req_sync_lock(hdev);
280         /* check the state after obtaing the lock to protect the HCI_UP
281          * against any races from hci_dev_do_close when the controller
282          * gets removed.
283          */
284         if (test_bit(HCI_UP, &hdev->flags))
285                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286         else
287                 ret = -ENETDOWN;
288         hci_req_sync_unlock(hdev);
289
290         return ret;
291 }
292
293 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
294                                 const void *param)
295 {
296         int len = HCI_COMMAND_HDR_SIZE + plen;
297         struct hci_command_hdr *hdr;
298         struct sk_buff *skb;
299
300         skb = bt_skb_alloc(len, GFP_ATOMIC);
301         if (!skb)
302                 return NULL;
303
304         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
305         hdr->opcode = cpu_to_le16(opcode);
306         hdr->plen   = plen;
307
308         if (plen)
309                 memcpy(skb_put(skb, plen), param, plen);
310
311         BT_DBG("skb len %d", skb->len);
312
313         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
314         hci_skb_opcode(skb) = opcode;
315
316         return skb;
317 }
318
319 /* Queue a command to an asynchronous HCI request */
320 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
321                     const void *param, u8 event)
322 {
323         struct hci_dev *hdev = req->hdev;
324         struct sk_buff *skb;
325
326         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
327
328         /* If an error occurred during request building, there is no point in
329          * queueing the HCI command. We can simply return.
330          */
331         if (req->err)
332                 return;
333
334         skb = hci_prepare_cmd(hdev, opcode, plen, param);
335         if (!skb) {
336                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
337                        hdev->name, opcode);
338                 req->err = -ENOMEM;
339                 return;
340         }
341
342         if (skb_queue_empty(&req->cmd_q))
343                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
344
345         bt_cb(skb)->hci.req_event = event;
346
347         skb_queue_tail(&req->cmd_q, skb);
348 }
349
350 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
351                  const void *param)
352 {
353         hci_req_add_ev(req, opcode, plen, param, 0);
354 }
355
356 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
357 {
358         struct hci_dev *hdev = req->hdev;
359         struct hci_cp_write_page_scan_activity acp;
360         u8 type;
361
362         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
363                 return;
364
365         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
366                 return;
367
368         if (enable) {
369                 type = PAGE_SCAN_TYPE_INTERLACED;
370
371                 /* 160 msec page scan interval */
372                 acp.interval = cpu_to_le16(0x0100);
373         } else {
374                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
375
376                 /* default 1.28 sec page scan */
377                 acp.interval = cpu_to_le16(0x0800);
378         }
379
380         acp.window = cpu_to_le16(0x0012);
381
382         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
383             __cpu_to_le16(hdev->page_scan_window) != acp.window)
384                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
385                             sizeof(acp), &acp);
386
387         if (hdev->page_scan_type != type)
388                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
389 }
390
391 /* This function controls the background scanning based on hdev->pend_le_conns
392  * list. If there are pending LE connection we start the background scanning,
393  * otherwise we stop it.
394  *
395  * This function requires the caller holds hdev->lock.
396  */
397 static void __hci_update_background_scan(struct hci_request *req)
398 {
399         struct hci_dev *hdev = req->hdev;
400
401         if (!test_bit(HCI_UP, &hdev->flags) ||
402             test_bit(HCI_INIT, &hdev->flags) ||
403             hci_dev_test_flag(hdev, HCI_SETUP) ||
404             hci_dev_test_flag(hdev, HCI_CONFIG) ||
405             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
406             hci_dev_test_flag(hdev, HCI_UNREGISTER))
407                 return;
408
409         /* No point in doing scanning if LE support hasn't been enabled */
410         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
411                 return;
412
413         /* If discovery is active don't interfere with it */
414         if (hdev->discovery.state != DISCOVERY_STOPPED)
415                 return;
416
417         /* Reset RSSI and UUID filters when starting background scanning
418          * since these filters are meant for service discovery only.
419          *
420          * The Start Discovery and Start Service Discovery operations
421          * ensure to set proper values for RSSI threshold and UUID
422          * filter list. So it is safe to just reset them here.
423          */
424         hci_discovery_filter_clear(hdev);
425
426         if (list_empty(&hdev->pend_le_conns) &&
427             list_empty(&hdev->pend_le_reports)) {
428                 /* If there is no pending LE connections or devices
429                  * to be scanned for, we should stop the background
430                  * scanning.
431                  */
432
433                 /* If controller is not scanning we are done. */
434                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
435                         return;
436
437                 hci_req_add_le_scan_disable(req);
438
439                 BT_DBG("%s stopping background scanning", hdev->name);
440         } else {
441                 /* If there is at least one pending LE connection, we should
442                  * keep the background scan running.
443                  */
444
445                 /* If controller is connecting, we should not start scanning
446                  * since some controllers are not able to scan and connect at
447                  * the same time.
448                  */
449                 if (hci_lookup_le_connect(hdev))
450                         return;
451
452                 /* If controller is currently scanning, we stop it to ensure we
453                  * don't miss any advertising (due to duplicates filter).
454                  */
455                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
456                         hci_req_add_le_scan_disable(req);
457
458                 hci_req_add_le_passive_scan(req);
459
460                 BT_DBG("%s starting background scanning", hdev->name);
461         }
462 }
463
464 void __hci_req_update_name(struct hci_request *req)
465 {
466         struct hci_dev *hdev = req->hdev;
467         struct hci_cp_write_local_name cp;
468
469         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
470
471         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
472 }
473
474 #define PNP_INFO_SVCLASS_ID             0x1200
475
476 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
477 {
478         u8 *ptr = data, *uuids_start = NULL;
479         struct bt_uuid *uuid;
480
481         if (len < 4)
482                 return ptr;
483
484         list_for_each_entry(uuid, &hdev->uuids, list) {
485                 u16 uuid16;
486
487                 if (uuid->size != 16)
488                         continue;
489
490                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
491                 if (uuid16 < 0x1100)
492                         continue;
493
494                 if (uuid16 == PNP_INFO_SVCLASS_ID)
495                         continue;
496
497                 if (!uuids_start) {
498                         uuids_start = ptr;
499                         uuids_start[0] = 1;
500                         uuids_start[1] = EIR_UUID16_ALL;
501                         ptr += 2;
502                 }
503
504                 /* Stop if not enough space to put next UUID */
505                 if ((ptr - data) + sizeof(u16) > len) {
506                         uuids_start[1] = EIR_UUID16_SOME;
507                         break;
508                 }
509
510                 *ptr++ = (uuid16 & 0x00ff);
511                 *ptr++ = (uuid16 & 0xff00) >> 8;
512                 uuids_start[0] += sizeof(uuid16);
513         }
514
515         return ptr;
516 }
517
518 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
519 {
520         u8 *ptr = data, *uuids_start = NULL;
521         struct bt_uuid *uuid;
522
523         if (len < 6)
524                 return ptr;
525
526         list_for_each_entry(uuid, &hdev->uuids, list) {
527                 if (uuid->size != 32)
528                         continue;
529
530                 if (!uuids_start) {
531                         uuids_start = ptr;
532                         uuids_start[0] = 1;
533                         uuids_start[1] = EIR_UUID32_ALL;
534                         ptr += 2;
535                 }
536
537                 /* Stop if not enough space to put next UUID */
538                 if ((ptr - data) + sizeof(u32) > len) {
539                         uuids_start[1] = EIR_UUID32_SOME;
540                         break;
541                 }
542
543                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
544                 ptr += sizeof(u32);
545                 uuids_start[0] += sizeof(u32);
546         }
547
548         return ptr;
549 }
550
551 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
552 {
553         u8 *ptr = data, *uuids_start = NULL;
554         struct bt_uuid *uuid;
555
556         if (len < 18)
557                 return ptr;
558
559         list_for_each_entry(uuid, &hdev->uuids, list) {
560                 if (uuid->size != 128)
561                         continue;
562
563                 if (!uuids_start) {
564                         uuids_start = ptr;
565                         uuids_start[0] = 1;
566                         uuids_start[1] = EIR_UUID128_ALL;
567                         ptr += 2;
568                 }
569
570                 /* Stop if not enough space to put next UUID */
571                 if ((ptr - data) + 16 > len) {
572                         uuids_start[1] = EIR_UUID128_SOME;
573                         break;
574                 }
575
576                 memcpy(ptr, uuid->uuid, 16);
577                 ptr += 16;
578                 uuids_start[0] += 16;
579         }
580
581         return ptr;
582 }
583
584 static void create_eir(struct hci_dev *hdev, u8 *data)
585 {
586         u8 *ptr = data;
587         size_t name_len;
588
589         name_len = strlen(hdev->dev_name);
590
591         if (name_len > 0) {
592                 /* EIR Data type */
593                 if (name_len > 48) {
594                         name_len = 48;
595                         ptr[1] = EIR_NAME_SHORT;
596                 } else
597                         ptr[1] = EIR_NAME_COMPLETE;
598
599                 /* EIR Data length */
600                 ptr[0] = name_len + 1;
601
602                 memcpy(ptr + 2, hdev->dev_name, name_len);
603
604                 ptr += (name_len + 2);
605         }
606
607         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
608                 ptr[0] = 2;
609                 ptr[1] = EIR_TX_POWER;
610                 ptr[2] = (u8) hdev->inq_tx_power;
611
612                 ptr += 3;
613         }
614
615         if (hdev->devid_source > 0) {
616                 ptr[0] = 9;
617                 ptr[1] = EIR_DEVICE_ID;
618
619                 put_unaligned_le16(hdev->devid_source, ptr + 2);
620                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
621                 put_unaligned_le16(hdev->devid_product, ptr + 6);
622                 put_unaligned_le16(hdev->devid_version, ptr + 8);
623
624                 ptr += 10;
625         }
626
627         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
629         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
630 }
631
632 void __hci_req_update_eir(struct hci_request *req)
633 {
634         struct hci_dev *hdev = req->hdev;
635         struct hci_cp_write_eir cp;
636
637         if (!hdev_is_powered(hdev))
638                 return;
639
640         if (!lmp_ext_inq_capable(hdev))
641                 return;
642
643         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
644                 return;
645
646         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
647                 return;
648
649         memset(&cp, 0, sizeof(cp));
650
651         create_eir(hdev, cp.data);
652
653         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
654                 return;
655
656         memcpy(hdev->eir, cp.data, sizeof(cp.data));
657
658         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
659 }
660
661 void hci_req_add_le_scan_disable(struct hci_request *req)
662 {
663         struct hci_cp_le_set_scan_enable cp;
664
665         memset(&cp, 0, sizeof(cp));
666         cp.enable = LE_SCAN_DISABLE;
667         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
668 }
669
670 static void add_to_white_list(struct hci_request *req,
671                               struct hci_conn_params *params)
672 {
673         struct hci_cp_le_add_to_white_list cp;
674
675         cp.bdaddr_type = params->addr_type;
676         bacpy(&cp.bdaddr, &params->addr);
677
678         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
679 }
680
681 static u8 update_white_list(struct hci_request *req)
682 {
683         struct hci_dev *hdev = req->hdev;
684         struct hci_conn_params *params;
685         struct bdaddr_list *b;
686         uint8_t white_list_entries = 0;
687
688         /* Go through the current white list programmed into the
689          * controller one by one and check if that address is still
690          * in the list of pending connections or list of devices to
691          * report. If not present in either list, then queue the
692          * command to remove it from the controller.
693          */
694         list_for_each_entry(b, &hdev->le_white_list, list) {
695                 /* If the device is neither in pend_le_conns nor
696                  * pend_le_reports then remove it from the whitelist.
697                  */
698                 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
699                                                &b->bdaddr, b->bdaddr_type) &&
700                     !hci_pend_le_action_lookup(&hdev->pend_le_reports,
701                                                &b->bdaddr, b->bdaddr_type)) {
702                         struct hci_cp_le_del_from_white_list cp;
703
704                         cp.bdaddr_type = b->bdaddr_type;
705                         bacpy(&cp.bdaddr, &b->bdaddr);
706
707                         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
708                                     sizeof(cp), &cp);
709                         continue;
710                 }
711
712                 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
713                         /* White list can not be used with RPAs */
714                         return 0x00;
715                 }
716
717                 white_list_entries++;
718         }
719
720         /* Since all no longer valid white list entries have been
721          * removed, walk through the list of pending connections
722          * and ensure that any new device gets programmed into
723          * the controller.
724          *
725          * If the list of the devices is larger than the list of
726          * available white list entries in the controller, then
727          * just abort and return filer policy value to not use the
728          * white list.
729          */
730         list_for_each_entry(params, &hdev->pend_le_conns, action) {
731                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
732                                            &params->addr, params->addr_type))
733                         continue;
734
735                 if (white_list_entries >= hdev->le_white_list_size) {
736                         /* Select filter policy to accept all advertising */
737                         return 0x00;
738                 }
739
740                 if (hci_find_irk_by_addr(hdev, &params->addr,
741                                          params->addr_type)) {
742                         /* White list can not be used with RPAs */
743                         return 0x00;
744                 }
745
746                 white_list_entries++;
747                 add_to_white_list(req, params);
748         }
749
750         /* After adding all new pending connections, walk through
751          * the list of pending reports and also add these to the
752          * white list if there is still space.
753          */
754         list_for_each_entry(params, &hdev->pend_le_reports, action) {
755                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
756                                            &params->addr, params->addr_type))
757                         continue;
758
759                 if (white_list_entries >= hdev->le_white_list_size) {
760                         /* Select filter policy to accept all advertising */
761                         return 0x00;
762                 }
763
764                 if (hci_find_irk_by_addr(hdev, &params->addr,
765                                          params->addr_type)) {
766                         /* White list can not be used with RPAs */
767                         return 0x00;
768                 }
769
770                 white_list_entries++;
771                 add_to_white_list(req, params);
772         }
773
774         /* Select filter policy to use white list */
775         return 0x01;
776 }
777
778 static bool scan_use_rpa(struct hci_dev *hdev)
779 {
780         return hci_dev_test_flag(hdev, HCI_PRIVACY);
781 }
782
783 void hci_req_add_le_passive_scan(struct hci_request *req)
784 {
785         struct hci_cp_le_set_scan_param param_cp;
786         struct hci_cp_le_set_scan_enable enable_cp;
787         struct hci_dev *hdev = req->hdev;
788         u8 own_addr_type;
789         u8 filter_policy;
790
791         /* Set require_privacy to false since no SCAN_REQ are send
792          * during passive scanning. Not using an non-resolvable address
793          * here is important so that peer devices using direct
794          * advertising with our address will be correctly reported
795          * by the controller.
796          */
797         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
798                                       &own_addr_type))
799                 return;
800
801         /* Adding or removing entries from the white list must
802          * happen before enabling scanning. The controller does
803          * not allow white list modification while scanning.
804          */
805         filter_policy = update_white_list(req);
806
807         /* When the controller is using random resolvable addresses and
808          * with that having LE privacy enabled, then controllers with
809          * Extended Scanner Filter Policies support can now enable support
810          * for handling directed advertising.
811          *
812          * So instead of using filter polices 0x00 (no whitelist)
813          * and 0x01 (whitelist enabled) use the new filter policies
814          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
815          */
816         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
817             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
818                 filter_policy |= 0x02;
819
820         memset(&param_cp, 0, sizeof(param_cp));
821         param_cp.type = LE_SCAN_PASSIVE;
822         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
823         param_cp.window = cpu_to_le16(hdev->le_scan_window);
824         param_cp.own_address_type = own_addr_type;
825         param_cp.filter_policy = filter_policy;
826         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
827                     &param_cp);
828
829         memset(&enable_cp, 0, sizeof(enable_cp));
830         enable_cp.enable = LE_SCAN_ENABLE;
831         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
832         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
833                     &enable_cp);
834 }
835
836 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
837 {
838         u8 instance = hdev->cur_adv_instance;
839         struct adv_info *adv_instance;
840
841         /* Ignore instance 0 */
842         if (instance == 0x00)
843                 return 0;
844
845         adv_instance = hci_find_adv_instance(hdev, instance);
846         if (!adv_instance)
847                 return 0;
848
849         /* TODO: Take into account the "appearance" and "local-name" flags here.
850          * These are currently being ignored as they are not supported.
851          */
852         return adv_instance->scan_rsp_len;
853 }
854
855 void __hci_req_disable_advertising(struct hci_request *req)
856 {
857         u8 enable = 0x00;
858
859         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
860 }
861
862 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
863 {
864         u32 flags;
865         struct adv_info *adv_instance;
866
867         if (instance == 0x00) {
868                 /* Instance 0 always manages the "Tx Power" and "Flags"
869                  * fields
870                  */
871                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
872
873                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
874                  * corresponds to the "connectable" instance flag.
875                  */
876                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
877                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
878
879                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
880                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
881                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
882                         flags |= MGMT_ADV_FLAG_DISCOV;
883
884                 return flags;
885         }
886
887         adv_instance = hci_find_adv_instance(hdev, instance);
888
889         /* Return 0 when we got an invalid instance identifier. */
890         if (!adv_instance)
891                 return 0;
892
893         return adv_instance->flags;
894 }
895
896 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
897 {
898         /* If privacy is not enabled don't use RPA */
899         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
900                 return false;
901
902         /* If basic privacy mode is enabled use RPA */
903         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
904                 return true;
905
906         /* If limited privacy mode is enabled don't use RPA if we're
907          * both discoverable and bondable.
908          */
909         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
910             hci_dev_test_flag(hdev, HCI_BONDABLE))
911                 return false;
912
913         /* We're neither bondable nor discoverable in the limited
914          * privacy mode, therefore use RPA.
915          */
916         return true;
917 }
918
919 void __hci_req_enable_advertising(struct hci_request *req)
920 {
921         struct hci_dev *hdev = req->hdev;
922         struct hci_cp_le_set_adv_param cp;
923         u8 own_addr_type, enable = 0x01;
924         bool connectable;
925         u32 flags;
926
927         if (hci_conn_num(hdev, LE_LINK) > 0)
928                 return;
929
930         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
931                 __hci_req_disable_advertising(req);
932
933         /* Clear the HCI_LE_ADV bit temporarily so that the
934          * hci_update_random_address knows that it's safe to go ahead
935          * and write a new random address. The flag will be set back on
936          * as soon as the SET_ADV_ENABLE HCI command completes.
937          */
938         hci_dev_clear_flag(hdev, HCI_LE_ADV);
939
940         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
941
942         /* If the "connectable" instance flag was not set, then choose between
943          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
944          */
945         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
946                       mgmt_get_connectable(hdev);
947
948         /* Set require_privacy to true only when non-connectable
949          * advertising is used. In that case it is fine to use a
950          * non-resolvable private address.
951          */
952         if (hci_update_random_address(req, !connectable,
953                                       adv_use_rpa(hdev, flags),
954                                       &own_addr_type) < 0)
955                 return;
956
957         memset(&cp, 0, sizeof(cp));
958         cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
959         cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
960
961         if (connectable)
962                 cp.type = LE_ADV_IND;
963         else if (get_cur_adv_instance_scan_rsp_len(hdev))
964                 cp.type = LE_ADV_SCAN_IND;
965         else
966                 cp.type = LE_ADV_NONCONN_IND;
967
968         cp.own_address_type = own_addr_type;
969         cp.channel_map = hdev->le_adv_channel_map;
970
971         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
972
973         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
974 }
975
976 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
977 {
978         size_t short_len;
979         size_t complete_len;
980
981         /* no space left for name (+ NULL + type + len) */
982         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
983                 return ad_len;
984
985         /* use complete name if present and fits */
986         complete_len = strlen(hdev->dev_name);
987         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
988                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
989                                        hdev->dev_name, complete_len + 1);
990
991         /* use short name if present */
992         short_len = strlen(hdev->short_name);
993         if (short_len)
994                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
995                                        hdev->short_name, short_len + 1);
996
997         /* use shortened full name if present, we already know that name
998          * is longer then HCI_MAX_SHORT_NAME_LENGTH
999          */
1000         if (complete_len) {
1001                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1002
1003                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1004                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1005
1006                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1007                                        sizeof(name));
1008         }
1009
1010         return ad_len;
1011 }
1012
1013 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1014 {
1015         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1016 }
1017
1018 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1019 {
1020         u8 scan_rsp_len = 0;
1021
1022         if (hdev->appearance) {
1023                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1024         }
1025
1026         return append_local_name(hdev, ptr, scan_rsp_len);
1027 }
1028
1029 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1030                                         u8 *ptr)
1031 {
1032         struct adv_info *adv_instance;
1033         u32 instance_flags;
1034         u8 scan_rsp_len = 0;
1035
1036         adv_instance = hci_find_adv_instance(hdev, instance);
1037         if (!adv_instance)
1038                 return 0;
1039
1040         instance_flags = adv_instance->flags;
1041
1042         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1043                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1044         }
1045
1046         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1047                adv_instance->scan_rsp_len);
1048
1049         scan_rsp_len += adv_instance->scan_rsp_len;
1050
1051         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1052                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1053
1054         return scan_rsp_len;
1055 }
1056
1057 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1058 {
1059         struct hci_dev *hdev = req->hdev;
1060         struct hci_cp_le_set_scan_rsp_data cp;
1061         u8 len;
1062
1063         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1064                 return;
1065
1066         memset(&cp, 0, sizeof(cp));
1067
1068         if (instance)
1069                 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1070         else
1071                 len = create_default_scan_rsp_data(hdev, cp.data);
1072
1073         if (hdev->scan_rsp_data_len == len &&
1074             !memcmp(cp.data, hdev->scan_rsp_data, len))
1075                 return;
1076
1077         memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1078         hdev->scan_rsp_data_len = len;
1079
1080         cp.length = len;
1081
1082         hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1083 }
1084
1085 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1086 {
1087         struct adv_info *adv_instance = NULL;
1088         u8 ad_len = 0, flags = 0;
1089         u32 instance_flags;
1090
1091         /* Return 0 when the current instance identifier is invalid. */
1092         if (instance) {
1093                 adv_instance = hci_find_adv_instance(hdev, instance);
1094                 if (!adv_instance)
1095                         return 0;
1096         }
1097
1098         instance_flags = get_adv_instance_flags(hdev, instance);
1099
1100         /* If instance already has the flags set skip adding it once
1101          * again.
1102          */
1103         if (adv_instance && eir_get_data(adv_instance->adv_data,
1104                                          adv_instance->adv_data_len, EIR_FLAGS,
1105                                          NULL))
1106                 goto skip_flags;
1107
1108         /* The Add Advertising command allows userspace to set both the general
1109          * and limited discoverable flags.
1110          */
1111         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1112                 flags |= LE_AD_GENERAL;
1113
1114         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1115                 flags |= LE_AD_LIMITED;
1116
1117         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1118                 flags |= LE_AD_NO_BREDR;
1119
1120         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1121                 /* If a discovery flag wasn't provided, simply use the global
1122                  * settings.
1123                  */
1124                 if (!flags)
1125                         flags |= mgmt_get_adv_discov_flags(hdev);
1126
1127                 /* If flags would still be empty, then there is no need to
1128                  * include the "Flags" AD field".
1129                  */
1130                 if (flags) {
1131                         ptr[0] = 0x02;
1132                         ptr[1] = EIR_FLAGS;
1133                         ptr[2] = flags;
1134
1135                         ad_len += 3;
1136                         ptr += 3;
1137                 }
1138         }
1139
1140 skip_flags:
1141         if (adv_instance) {
1142                 memcpy(ptr, adv_instance->adv_data,
1143                        adv_instance->adv_data_len);
1144                 ad_len += adv_instance->adv_data_len;
1145                 ptr += adv_instance->adv_data_len;
1146         }
1147
1148         /* Provide Tx Power only if we can provide a valid value for it */
1149         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1150             (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1151                 ptr[0] = 0x02;
1152                 ptr[1] = EIR_TX_POWER;
1153                 ptr[2] = (u8)hdev->adv_tx_power;
1154
1155                 ad_len += 3;
1156                 ptr += 3;
1157         }
1158
1159         return ad_len;
1160 }
1161
1162 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1163 {
1164         struct hci_dev *hdev = req->hdev;
1165         struct hci_cp_le_set_adv_data cp;
1166         u8 len;
1167
1168         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1169                 return;
1170
1171         memset(&cp, 0, sizeof(cp));
1172
1173         len = create_instance_adv_data(hdev, instance, cp.data);
1174
1175         /* There's nothing to do if the data hasn't changed */
1176         if (hdev->adv_data_len == len &&
1177             memcmp(cp.data, hdev->adv_data, len) == 0)
1178                 return;
1179
1180         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1181         hdev->adv_data_len = len;
1182
1183         cp.length = len;
1184
1185         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1186 }
1187
1188 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1189 {
1190         struct hci_request req;
1191
1192         hci_req_init(&req, hdev);
1193         __hci_req_update_adv_data(&req, instance);
1194
1195         return hci_req_run(&req, NULL);
1196 }
1197
1198 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1199 {
1200         BT_DBG("%s status %u", hdev->name, status);
1201 }
1202
1203 void hci_req_reenable_advertising(struct hci_dev *hdev)
1204 {
1205         struct hci_request req;
1206
1207         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1208             list_empty(&hdev->adv_instances))
1209                 return;
1210
1211         hci_req_init(&req, hdev);
1212
1213         if (hdev->cur_adv_instance) {
1214                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1215                                                 true);
1216         } else {
1217                 __hci_req_update_adv_data(&req, 0x00);
1218                 __hci_req_update_scan_rsp_data(&req, 0x00);
1219                 __hci_req_enable_advertising(&req);
1220         }
1221
1222         hci_req_run(&req, adv_enable_complete);
1223 }
1224
1225 static void adv_timeout_expire(struct work_struct *work)
1226 {
1227         struct hci_dev *hdev = container_of(work, struct hci_dev,
1228                                             adv_instance_expire.work);
1229
1230         struct hci_request req;
1231         u8 instance;
1232
1233         BT_DBG("%s", hdev->name);
1234
1235         hci_dev_lock(hdev);
1236
1237         hdev->adv_instance_timeout = 0;
1238
1239         instance = hdev->cur_adv_instance;
1240         if (instance == 0x00)
1241                 goto unlock;
1242
1243         hci_req_init(&req, hdev);
1244
1245         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1246
1247         if (list_empty(&hdev->adv_instances))
1248                 __hci_req_disable_advertising(&req);
1249
1250         hci_req_run(&req, NULL);
1251
1252 unlock:
1253         hci_dev_unlock(hdev);
1254 }
1255
1256 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1257                                     bool force)
1258 {
1259         struct hci_dev *hdev = req->hdev;
1260         struct adv_info *adv_instance = NULL;
1261         u16 timeout;
1262
1263         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1264             list_empty(&hdev->adv_instances))
1265                 return -EPERM;
1266
1267         if (hdev->adv_instance_timeout)
1268                 return -EBUSY;
1269
1270         adv_instance = hci_find_adv_instance(hdev, instance);
1271         if (!adv_instance)
1272                 return -ENOENT;
1273
1274         /* A zero timeout means unlimited advertising. As long as there is
1275          * only one instance, duration should be ignored. We still set a timeout
1276          * in case further instances are being added later on.
1277          *
1278          * If the remaining lifetime of the instance is more than the duration
1279          * then the timeout corresponds to the duration, otherwise it will be
1280          * reduced to the remaining instance lifetime.
1281          */
1282         if (adv_instance->timeout == 0 ||
1283             adv_instance->duration <= adv_instance->remaining_time)
1284                 timeout = adv_instance->duration;
1285         else
1286                 timeout = adv_instance->remaining_time;
1287
1288         /* The remaining time is being reduced unless the instance is being
1289          * advertised without time limit.
1290          */
1291         if (adv_instance->timeout)
1292                 adv_instance->remaining_time =
1293                                 adv_instance->remaining_time - timeout;
1294
1295         hdev->adv_instance_timeout = timeout;
1296         queue_delayed_work(hdev->req_workqueue,
1297                            &hdev->adv_instance_expire,
1298                            msecs_to_jiffies(timeout * 1000));
1299
1300         /* If we're just re-scheduling the same instance again then do not
1301          * execute any HCI commands. This happens when a single instance is
1302          * being advertised.
1303          */
1304         if (!force && hdev->cur_adv_instance == instance &&
1305             hci_dev_test_flag(hdev, HCI_LE_ADV))
1306                 return 0;
1307
1308         hdev->cur_adv_instance = instance;
1309         __hci_req_update_adv_data(req, instance);
1310         __hci_req_update_scan_rsp_data(req, instance);
1311         __hci_req_enable_advertising(req);
1312
1313         return 0;
1314 }
1315
1316 static void cancel_adv_timeout(struct hci_dev *hdev)
1317 {
1318         if (hdev->adv_instance_timeout) {
1319                 hdev->adv_instance_timeout = 0;
1320                 cancel_delayed_work(&hdev->adv_instance_expire);
1321         }
1322 }
1323
1324 /* For a single instance:
1325  * - force == true: The instance will be removed even when its remaining
1326  *   lifetime is not zero.
1327  * - force == false: the instance will be deactivated but kept stored unless
1328  *   the remaining lifetime is zero.
1329  *
1330  * For instance == 0x00:
1331  * - force == true: All instances will be removed regardless of their timeout
1332  *   setting.
1333  * - force == false: Only instances that have a timeout will be removed.
1334  */
1335 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1336                                 struct hci_request *req, u8 instance,
1337                                 bool force)
1338 {
1339         struct adv_info *adv_instance, *n, *next_instance = NULL;
1340         int err;
1341         u8 rem_inst;
1342
1343         /* Cancel any timeout concerning the removed instance(s). */
1344         if (!instance || hdev->cur_adv_instance == instance)
1345                 cancel_adv_timeout(hdev);
1346
1347         /* Get the next instance to advertise BEFORE we remove
1348          * the current one. This can be the same instance again
1349          * if there is only one instance.
1350          */
1351         if (instance && hdev->cur_adv_instance == instance)
1352                 next_instance = hci_get_next_instance(hdev, instance);
1353
1354         if (instance == 0x00) {
1355                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1356                                          list) {
1357                         if (!(force || adv_instance->timeout))
1358                                 continue;
1359
1360                         rem_inst = adv_instance->instance;
1361                         err = hci_remove_adv_instance(hdev, rem_inst);
1362                         if (!err)
1363                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1364                 }
1365         } else {
1366                 adv_instance = hci_find_adv_instance(hdev, instance);
1367
1368                 if (force || (adv_instance && adv_instance->timeout &&
1369                               !adv_instance->remaining_time)) {
1370                         /* Don't advertise a removed instance. */
1371                         if (next_instance &&
1372                             next_instance->instance == instance)
1373                                 next_instance = NULL;
1374
1375                         err = hci_remove_adv_instance(hdev, instance);
1376                         if (!err)
1377                                 mgmt_advertising_removed(sk, hdev, instance);
1378                 }
1379         }
1380
1381         if (!req || !hdev_is_powered(hdev) ||
1382             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1383                 return;
1384
1385         if (next_instance)
1386                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1387                                                 false);
1388 }
1389
1390 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1391 {
1392         struct hci_dev *hdev = req->hdev;
1393
1394         /* If we're advertising or initiating an LE connection we can't
1395          * go ahead and change the random address at this time. This is
1396          * because the eventual initiator address used for the
1397          * subsequently created connection will be undefined (some
1398          * controllers use the new address and others the one we had
1399          * when the operation started).
1400          *
1401          * In this kind of scenario skip the update and let the random
1402          * address be updated at the next cycle.
1403          */
1404         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1405             hci_lookup_le_connect(hdev)) {
1406                 BT_DBG("Deferring random address update");
1407                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1408                 return;
1409         }
1410
1411         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1412 }
1413
1414 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1415                               bool use_rpa, u8 *own_addr_type)
1416 {
1417         struct hci_dev *hdev = req->hdev;
1418         int err;
1419
1420         /* If privacy is enabled use a resolvable private address. If
1421          * current RPA has expired or there is something else than
1422          * the current RPA in use, then generate a new one.
1423          */
1424         if (use_rpa) {
1425                 int to;
1426
1427                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1428
1429                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1430                     !bacmp(&hdev->random_addr, &hdev->rpa))
1431                         return 0;
1432
1433                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1434                 if (err < 0) {
1435                         BT_ERR("%s failed to generate new RPA", hdev->name);
1436                         return err;
1437                 }
1438
1439                 set_random_addr(req, &hdev->rpa);
1440
1441                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1442                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1443
1444                 return 0;
1445         }
1446
1447         /* In case of required privacy without resolvable private address,
1448          * use an non-resolvable private address. This is useful for active
1449          * scanning and non-connectable advertising.
1450          */
1451         if (require_privacy) {
1452                 bdaddr_t nrpa;
1453
1454                 while (true) {
1455                         /* The non-resolvable private address is generated
1456                          * from random six bytes with the two most significant
1457                          * bits cleared.
1458                          */
1459                         get_random_bytes(&nrpa, 6);
1460                         nrpa.b[5] &= 0x3f;
1461
1462                         /* The non-resolvable private address shall not be
1463                          * equal to the public address.
1464                          */
1465                         if (bacmp(&hdev->bdaddr, &nrpa))
1466                                 break;
1467                 }
1468
1469                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1470                 set_random_addr(req, &nrpa);
1471                 return 0;
1472         }
1473
1474         /* If forcing static address is in use or there is no public
1475          * address use the static address as random address (but skip
1476          * the HCI command if the current random address is already the
1477          * static one.
1478          *
1479          * In case BR/EDR has been disabled on a dual-mode controller
1480          * and a static address has been configured, then use that
1481          * address instead of the public BR/EDR address.
1482          */
1483         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1484             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1485             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1486              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1487                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1488                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1489                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1490                                     &hdev->static_addr);
1491                 return 0;
1492         }
1493
1494         /* Neither privacy nor static address is being used so use a
1495          * public address.
1496          */
1497         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1498
1499         return 0;
1500 }
1501
1502 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1503 {
1504         struct bdaddr_list *b;
1505
1506         list_for_each_entry(b, &hdev->whitelist, list) {
1507                 struct hci_conn *conn;
1508
1509                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1510                 if (!conn)
1511                         return true;
1512
1513                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1514                         return true;
1515         }
1516
1517         return false;
1518 }
1519
1520 void __hci_req_update_scan(struct hci_request *req)
1521 {
1522         struct hci_dev *hdev = req->hdev;
1523         u8 scan;
1524
1525         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1526                 return;
1527
1528         if (!hdev_is_powered(hdev))
1529                 return;
1530
1531         if (mgmt_powering_down(hdev))
1532                 return;
1533
1534         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1535             disconnected_whitelist_entries(hdev))
1536                 scan = SCAN_PAGE;
1537         else
1538                 scan = SCAN_DISABLED;
1539
1540         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1541                 scan |= SCAN_INQUIRY;
1542
1543         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1544             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1545                 return;
1546
1547         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1548 }
1549
1550 static int update_scan(struct hci_request *req, unsigned long opt)
1551 {
1552         hci_dev_lock(req->hdev);
1553         __hci_req_update_scan(req);
1554         hci_dev_unlock(req->hdev);
1555         return 0;
1556 }
1557
1558 static void scan_update_work(struct work_struct *work)
1559 {
1560         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1561
1562         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1563 }
1564
1565 static int connectable_update(struct hci_request *req, unsigned long opt)
1566 {
1567         struct hci_dev *hdev = req->hdev;
1568
1569         hci_dev_lock(hdev);
1570
1571         __hci_req_update_scan(req);
1572
1573         /* If BR/EDR is not enabled and we disable advertising as a
1574          * by-product of disabling connectable, we need to update the
1575          * advertising flags.
1576          */
1577         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1578                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1579
1580         /* Update the advertising parameters if necessary */
1581         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1582             !list_empty(&hdev->adv_instances))
1583                 __hci_req_enable_advertising(req);
1584
1585         __hci_update_background_scan(req);
1586
1587         hci_dev_unlock(hdev);
1588
1589         return 0;
1590 }
1591
1592 static void connectable_update_work(struct work_struct *work)
1593 {
1594         struct hci_dev *hdev = container_of(work, struct hci_dev,
1595                                             connectable_update);
1596         u8 status;
1597
1598         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1599         mgmt_set_connectable_complete(hdev, status);
1600 }
1601
1602 static u8 get_service_classes(struct hci_dev *hdev)
1603 {
1604         struct bt_uuid *uuid;
1605         u8 val = 0;
1606
1607         list_for_each_entry(uuid, &hdev->uuids, list)
1608                 val |= uuid->svc_hint;
1609
1610         return val;
1611 }
1612
1613 void __hci_req_update_class(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         u8 cod[3];
1617
1618         BT_DBG("%s", hdev->name);
1619
1620         if (!hdev_is_powered(hdev))
1621                 return;
1622
1623         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1624                 return;
1625
1626         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1627                 return;
1628
1629         cod[0] = hdev->minor_class;
1630         cod[1] = hdev->major_class;
1631         cod[2] = get_service_classes(hdev);
1632
1633         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1634                 cod[1] |= 0x20;
1635
1636         if (memcmp(cod, hdev->dev_class, 3) == 0)
1637                 return;
1638
1639         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1640 }
1641
1642 static void write_iac(struct hci_request *req)
1643 {
1644         struct hci_dev *hdev = req->hdev;
1645         struct hci_cp_write_current_iac_lap cp;
1646
1647         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1648                 return;
1649
1650         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1651                 /* Limited discoverable mode */
1652                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1653                 cp.iac_lap[0] = 0x00;   /* LIAC */
1654                 cp.iac_lap[1] = 0x8b;
1655                 cp.iac_lap[2] = 0x9e;
1656                 cp.iac_lap[3] = 0x33;   /* GIAC */
1657                 cp.iac_lap[4] = 0x8b;
1658                 cp.iac_lap[5] = 0x9e;
1659         } else {
1660                 /* General discoverable mode */
1661                 cp.num_iac = 1;
1662                 cp.iac_lap[0] = 0x33;   /* GIAC */
1663                 cp.iac_lap[1] = 0x8b;
1664                 cp.iac_lap[2] = 0x9e;
1665         }
1666
1667         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1668                     (cp.num_iac * 3) + 1, &cp);
1669 }
1670
1671 static int discoverable_update(struct hci_request *req, unsigned long opt)
1672 {
1673         struct hci_dev *hdev = req->hdev;
1674
1675         hci_dev_lock(hdev);
1676
1677         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1678                 write_iac(req);
1679                 __hci_req_update_scan(req);
1680                 __hci_req_update_class(req);
1681         }
1682
1683         /* Advertising instances don't use the global discoverable setting, so
1684          * only update AD if advertising was enabled using Set Advertising.
1685          */
1686         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1687                 __hci_req_update_adv_data(req, 0x00);
1688
1689                 /* Discoverable mode affects the local advertising
1690                  * address in limited privacy mode.
1691                  */
1692                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1693                         __hci_req_enable_advertising(req);
1694         }
1695
1696         hci_dev_unlock(hdev);
1697
1698         return 0;
1699 }
1700
1701 static void discoverable_update_work(struct work_struct *work)
1702 {
1703         struct hci_dev *hdev = container_of(work, struct hci_dev,
1704                                             discoverable_update);
1705         u8 status;
1706
1707         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1708         mgmt_set_discoverable_complete(hdev, status);
1709 }
1710
1711 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1712                       u8 reason)
1713 {
1714         switch (conn->state) {
1715         case BT_CONNECTED:
1716         case BT_CONFIG:
1717                 if (conn->type == AMP_LINK) {
1718                         struct hci_cp_disconn_phy_link cp;
1719
1720                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1721                         cp.reason = reason;
1722                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1723                                     &cp);
1724                 } else {
1725                         struct hci_cp_disconnect dc;
1726
1727                         dc.handle = cpu_to_le16(conn->handle);
1728                         dc.reason = reason;
1729                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1730                 }
1731
1732                 conn->state = BT_DISCONN;
1733
1734                 break;
1735         case BT_CONNECT:
1736                 if (conn->type == LE_LINK) {
1737                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1738                                 break;
1739                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1740                                     0, NULL);
1741                 } else if (conn->type == ACL_LINK) {
1742                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1743                                 break;
1744                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1745                                     6, &conn->dst);
1746                 }
1747                 break;
1748         case BT_CONNECT2:
1749                 if (conn->type == ACL_LINK) {
1750                         struct hci_cp_reject_conn_req rej;
1751
1752                         bacpy(&rej.bdaddr, &conn->dst);
1753                         rej.reason = reason;
1754
1755                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1756                                     sizeof(rej), &rej);
1757                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1758                         struct hci_cp_reject_sync_conn_req rej;
1759
1760                         bacpy(&rej.bdaddr, &conn->dst);
1761
1762                         /* SCO rejection has its own limited set of
1763                          * allowed error values (0x0D-0x0F) which isn't
1764                          * compatible with most values passed to this
1765                          * function. To be safe hard-code one of the
1766                          * values that's suitable for SCO.
1767                          */
1768                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1769
1770                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1771                                     sizeof(rej), &rej);
1772                 }
1773                 break;
1774         default:
1775                 conn->state = BT_CLOSED;
1776                 break;
1777         }
1778 }
1779
1780 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1781 {
1782         if (status)
1783                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1784 }
1785
1786 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1787 {
1788         struct hci_request req;
1789         int err;
1790
1791         hci_req_init(&req, conn->hdev);
1792
1793         __hci_abort_conn(&req, conn, reason);
1794
1795         err = hci_req_run(&req, abort_conn_complete);
1796         if (err && err != -ENODATA) {
1797                 BT_ERR("Failed to run HCI request: err %d", err);
1798                 return err;
1799         }
1800
1801         return 0;
1802 }
1803
1804 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1805 {
1806         hci_dev_lock(req->hdev);
1807         __hci_update_background_scan(req);
1808         hci_dev_unlock(req->hdev);
1809         return 0;
1810 }
1811
1812 static void bg_scan_update(struct work_struct *work)
1813 {
1814         struct hci_dev *hdev = container_of(work, struct hci_dev,
1815                                             bg_scan_update);
1816         struct hci_conn *conn;
1817         u8 status;
1818         int err;
1819
1820         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1821         if (!err)
1822                 return;
1823
1824         hci_dev_lock(hdev);
1825
1826         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1827         if (conn)
1828                 hci_le_conn_failed(conn, status);
1829
1830         hci_dev_unlock(hdev);
1831 }
1832
1833 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1834 {
1835         hci_req_add_le_scan_disable(req);
1836         return 0;
1837 }
1838
1839 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1840 {
1841         u8 length = opt;
1842         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1843         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1844         struct hci_cp_inquiry cp;
1845
1846         BT_DBG("%s", req->hdev->name);
1847
1848         hci_dev_lock(req->hdev);
1849         hci_inquiry_cache_flush(req->hdev);
1850         hci_dev_unlock(req->hdev);
1851
1852         memset(&cp, 0, sizeof(cp));
1853
1854         if (req->hdev->discovery.limited)
1855                 memcpy(&cp.lap, liac, sizeof(cp.lap));
1856         else
1857                 memcpy(&cp.lap, giac, sizeof(cp.lap));
1858
1859         cp.length = length;
1860
1861         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1862
1863         return 0;
1864 }
1865
1866 static void le_scan_disable_work(struct work_struct *work)
1867 {
1868         struct hci_dev *hdev = container_of(work, struct hci_dev,
1869                                             le_scan_disable.work);
1870         u8 status;
1871
1872         BT_DBG("%s", hdev->name);
1873
1874         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1875                 return;
1876
1877         cancel_delayed_work(&hdev->le_scan_restart);
1878
1879         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1880         if (status) {
1881                 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1882                 return;
1883         }
1884
1885         hdev->discovery.scan_start = 0;
1886
1887         /* If we were running LE only scan, change discovery state. If
1888          * we were running both LE and BR/EDR inquiry simultaneously,
1889          * and BR/EDR inquiry is already finished, stop discovery,
1890          * otherwise BR/EDR inquiry will stop discovery when finished.
1891          * If we will resolve remote device name, do not change
1892          * discovery state.
1893          */
1894
1895         if (hdev->discovery.type == DISCOV_TYPE_LE)
1896                 goto discov_stopped;
1897
1898         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1899                 return;
1900
1901         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1902                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1903                     hdev->discovery.state != DISCOVERY_RESOLVING)
1904                         goto discov_stopped;
1905
1906                 return;
1907         }
1908
1909         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1910                      HCI_CMD_TIMEOUT, &status);
1911         if (status) {
1912                 BT_ERR("Inquiry failed: status 0x%02x", status);
1913                 goto discov_stopped;
1914         }
1915
1916         return;
1917
1918 discov_stopped:
1919         hci_dev_lock(hdev);
1920         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1921         hci_dev_unlock(hdev);
1922 }
1923
1924 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1925 {
1926         struct hci_dev *hdev = req->hdev;
1927         struct hci_cp_le_set_scan_enable cp;
1928
1929         /* If controller is not scanning we are done. */
1930         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1931                 return 0;
1932
1933         hci_req_add_le_scan_disable(req);
1934
1935         memset(&cp, 0, sizeof(cp));
1936         cp.enable = LE_SCAN_ENABLE;
1937         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1938         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1939
1940         return 0;
1941 }
1942
1943 static void le_scan_restart_work(struct work_struct *work)
1944 {
1945         struct hci_dev *hdev = container_of(work, struct hci_dev,
1946                                             le_scan_restart.work);
1947         unsigned long timeout, duration, scan_start, now;
1948         u8 status;
1949
1950         BT_DBG("%s", hdev->name);
1951
1952         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1953         if (status) {
1954                 BT_ERR("Failed to restart LE scan: status %d", status);
1955                 return;
1956         }
1957
1958         hci_dev_lock(hdev);
1959
1960         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1961             !hdev->discovery.scan_start)
1962                 goto unlock;
1963
1964         /* When the scan was started, hdev->le_scan_disable has been queued
1965          * after duration from scan_start. During scan restart this job
1966          * has been canceled, and we need to queue it again after proper
1967          * timeout, to make sure that scan does not run indefinitely.
1968          */
1969         duration = hdev->discovery.scan_duration;
1970         scan_start = hdev->discovery.scan_start;
1971         now = jiffies;
1972         if (now - scan_start <= duration) {
1973                 int elapsed;
1974
1975                 if (now >= scan_start)
1976                         elapsed = now - scan_start;
1977                 else
1978                         elapsed = ULONG_MAX - scan_start + now;
1979
1980                 timeout = duration - elapsed;
1981         } else {
1982                 timeout = 0;
1983         }
1984
1985         queue_delayed_work(hdev->req_workqueue,
1986                            &hdev->le_scan_disable, timeout);
1987
1988 unlock:
1989         hci_dev_unlock(hdev);
1990 }
1991
1992 static void disable_advertising(struct hci_request *req)
1993 {
1994         u8 enable = 0x00;
1995
1996         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1997 }
1998
1999 static int active_scan(struct hci_request *req, unsigned long opt)
2000 {
2001         uint16_t interval = opt;
2002         struct hci_dev *hdev = req->hdev;
2003         struct hci_cp_le_set_scan_param param_cp;
2004         struct hci_cp_le_set_scan_enable enable_cp;
2005         u8 own_addr_type;
2006         int err;
2007
2008         BT_DBG("%s", hdev->name);
2009
2010         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2011                 hci_dev_lock(hdev);
2012
2013                 /* Don't let discovery abort an outgoing connection attempt
2014                  * that's using directed advertising.
2015                  */
2016                 if (hci_lookup_le_connect(hdev)) {
2017                         hci_dev_unlock(hdev);
2018                         return -EBUSY;
2019                 }
2020
2021                 cancel_adv_timeout(hdev);
2022                 hci_dev_unlock(hdev);
2023
2024                 disable_advertising(req);
2025         }
2026
2027         /* If controller is scanning, it means the background scanning is
2028          * running. Thus, we should temporarily stop it in order to set the
2029          * discovery scanning parameters.
2030          */
2031         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2032                 hci_req_add_le_scan_disable(req);
2033
2034         /* All active scans will be done with either a resolvable private
2035          * address (when privacy feature has been enabled) or non-resolvable
2036          * private address.
2037          */
2038         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2039                                         &own_addr_type);
2040         if (err < 0)
2041                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2042
2043         memset(&param_cp, 0, sizeof(param_cp));
2044         param_cp.type = LE_SCAN_ACTIVE;
2045         param_cp.interval = cpu_to_le16(interval);
2046         param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2047         param_cp.own_address_type = own_addr_type;
2048
2049         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2050                     &param_cp);
2051
2052         memset(&enable_cp, 0, sizeof(enable_cp));
2053         enable_cp.enable = LE_SCAN_ENABLE;
2054         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2055
2056         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2057                     &enable_cp);
2058
2059         return 0;
2060 }
2061
2062 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2063 {
2064         int err;
2065
2066         BT_DBG("%s", req->hdev->name);
2067
2068         err = active_scan(req, opt);
2069         if (err)
2070                 return err;
2071
2072         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2073 }
2074
2075 static void start_discovery(struct hci_dev *hdev, u8 *status)
2076 {
2077         unsigned long timeout;
2078
2079         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2080
2081         switch (hdev->discovery.type) {
2082         case DISCOV_TYPE_BREDR:
2083                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2084                         hci_req_sync(hdev, bredr_inquiry,
2085                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2086                                      status);
2087                 return;
2088         case DISCOV_TYPE_INTERLEAVED:
2089                 /* When running simultaneous discovery, the LE scanning time
2090                  * should occupy the whole discovery time sine BR/EDR inquiry
2091                  * and LE scanning are scheduled by the controller.
2092                  *
2093                  * For interleaving discovery in comparison, BR/EDR inquiry
2094                  * and LE scanning are done sequentially with separate
2095                  * timeouts.
2096                  */
2097                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2098                              &hdev->quirks)) {
2099                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2100                         /* During simultaneous discovery, we double LE scan
2101                          * interval. We must leave some time for the controller
2102                          * to do BR/EDR inquiry.
2103                          */
2104                         hci_req_sync(hdev, interleaved_discov,
2105                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2106                                      status);
2107                         break;
2108                 }
2109
2110                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2111                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2112                              HCI_CMD_TIMEOUT, status);
2113                 break;
2114         case DISCOV_TYPE_LE:
2115                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2116                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2117                              HCI_CMD_TIMEOUT, status);
2118                 break;
2119         default:
2120                 *status = HCI_ERROR_UNSPECIFIED;
2121                 return;
2122         }
2123
2124         if (*status)
2125                 return;
2126
2127         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2128
2129         /* When service discovery is used and the controller has a
2130          * strict duplicate filter, it is important to remember the
2131          * start and duration of the scan. This is required for
2132          * restarting scanning during the discovery phase.
2133          */
2134         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2135                      hdev->discovery.result_filtering) {
2136                 hdev->discovery.scan_start = jiffies;
2137                 hdev->discovery.scan_duration = timeout;
2138         }
2139
2140         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2141                            timeout);
2142 }
2143
2144 bool hci_req_stop_discovery(struct hci_request *req)
2145 {
2146         struct hci_dev *hdev = req->hdev;
2147         struct discovery_state *d = &hdev->discovery;
2148         struct hci_cp_remote_name_req_cancel cp;
2149         struct inquiry_entry *e;
2150         bool ret = false;
2151
2152         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2153
2154         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2155                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2156                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2157
2158                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2159                         cancel_delayed_work(&hdev->le_scan_disable);
2160                         hci_req_add_le_scan_disable(req);
2161                 }
2162
2163                 ret = true;
2164         } else {
2165                 /* Passive scanning */
2166                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2167                         hci_req_add_le_scan_disable(req);
2168                         ret = true;
2169                 }
2170         }
2171
2172         /* No further actions needed for LE-only discovery */
2173         if (d->type == DISCOV_TYPE_LE)
2174                 return ret;
2175
2176         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2177                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2178                                                      NAME_PENDING);
2179                 if (!e)
2180                         return ret;
2181
2182                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2183                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2184                             &cp);
2185                 ret = true;
2186         }
2187
2188         return ret;
2189 }
2190
2191 static int stop_discovery(struct hci_request *req, unsigned long opt)
2192 {
2193         hci_dev_lock(req->hdev);
2194         hci_req_stop_discovery(req);
2195         hci_dev_unlock(req->hdev);
2196
2197         return 0;
2198 }
2199
2200 static void discov_update(struct work_struct *work)
2201 {
2202         struct hci_dev *hdev = container_of(work, struct hci_dev,
2203                                             discov_update);
2204         u8 status = 0;
2205
2206         switch (hdev->discovery.state) {
2207         case DISCOVERY_STARTING:
2208                 start_discovery(hdev, &status);
2209                 mgmt_start_discovery_complete(hdev, status);
2210                 if (status)
2211                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2212                 else
2213                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2214                 break;
2215         case DISCOVERY_STOPPING:
2216                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2217                 mgmt_stop_discovery_complete(hdev, status);
2218                 if (!status)
2219                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2220                 break;
2221         case DISCOVERY_STOPPED:
2222         default:
2223                 return;
2224         }
2225 }
2226
2227 static void discov_off(struct work_struct *work)
2228 {
2229         struct hci_dev *hdev = container_of(work, struct hci_dev,
2230                                             discov_off.work);
2231
2232         BT_DBG("%s", hdev->name);
2233
2234         hci_dev_lock(hdev);
2235
2236         /* When discoverable timeout triggers, then just make sure
2237          * the limited discoverable flag is cleared. Even in the case
2238          * of a timeout triggered from general discoverable, it is
2239          * safe to unconditionally clear the flag.
2240          */
2241         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2242         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2243         hdev->discov_timeout = 0;
2244
2245         hci_dev_unlock(hdev);
2246
2247         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2248         mgmt_new_settings(hdev);
2249 }
2250
2251 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2252 {
2253         struct hci_dev *hdev = req->hdev;
2254         u8 link_sec;
2255
2256         hci_dev_lock(hdev);
2257
2258         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2259             !lmp_host_ssp_capable(hdev)) {
2260                 u8 mode = 0x01;
2261
2262                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2263
2264                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2265                         u8 support = 0x01;
2266
2267                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2268                                     sizeof(support), &support);
2269                 }
2270         }
2271
2272         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2273             lmp_bredr_capable(hdev)) {
2274                 struct hci_cp_write_le_host_supported cp;
2275
2276                 cp.le = 0x01;
2277                 cp.simul = 0x00;
2278
2279                 /* Check first if we already have the right
2280                  * host state (host features set)
2281                  */
2282                 if (cp.le != lmp_host_le_capable(hdev) ||
2283                     cp.simul != lmp_host_le_br_capable(hdev))
2284                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2285                                     sizeof(cp), &cp);
2286         }
2287
2288         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2289                 /* Make sure the controller has a good default for
2290                  * advertising data. This also applies to the case
2291                  * where BR/EDR was toggled during the AUTO_OFF phase.
2292                  */
2293                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2294                     list_empty(&hdev->adv_instances)) {
2295                         __hci_req_update_adv_data(req, 0x00);
2296                         __hci_req_update_scan_rsp_data(req, 0x00);
2297
2298                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2299                                 __hci_req_enable_advertising(req);
2300                 } else if (!list_empty(&hdev->adv_instances)) {
2301                         struct adv_info *adv_instance;
2302
2303                         adv_instance = list_first_entry(&hdev->adv_instances,
2304                                                         struct adv_info, list);
2305                         __hci_req_schedule_adv_instance(req,
2306                                                         adv_instance->instance,
2307                                                         true);
2308                 }
2309         }
2310
2311         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2312         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2313                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2314                             sizeof(link_sec), &link_sec);
2315
2316         if (lmp_bredr_capable(hdev)) {
2317                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2318                         __hci_req_write_fast_connectable(req, true);
2319                 else
2320                         __hci_req_write_fast_connectable(req, false);
2321                 __hci_req_update_scan(req);
2322                 __hci_req_update_class(req);
2323                 __hci_req_update_name(req);
2324                 __hci_req_update_eir(req);
2325         }
2326
2327         hci_dev_unlock(hdev);
2328         return 0;
2329 }
2330
2331 int __hci_req_hci_power_on(struct hci_dev *hdev)
2332 {
2333         /* Register the available SMP channels (BR/EDR and LE) only when
2334          * successfully powering on the controller. This late
2335          * registration is required so that LE SMP can clearly decide if
2336          * the public address or static address is used.
2337          */
2338         smp_register(hdev);
2339
2340         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2341                               NULL);
2342 }
2343
2344 void hci_request_setup(struct hci_dev *hdev)
2345 {
2346         INIT_WORK(&hdev->discov_update, discov_update);
2347         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2348         INIT_WORK(&hdev->scan_update, scan_update_work);
2349         INIT_WORK(&hdev->connectable_update, connectable_update_work);
2350         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2351         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2352         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2353         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2354         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2355 }
2356
2357 void hci_request_cancel_all(struct hci_dev *hdev)
2358 {
2359         hci_req_sync_cancel(hdev, ENODEV);
2360
2361         cancel_work_sync(&hdev->discov_update);
2362         cancel_work_sync(&hdev->bg_scan_update);
2363         cancel_work_sync(&hdev->scan_update);
2364         cancel_work_sync(&hdev->connectable_update);
2365         cancel_work_sync(&hdev->discoverable_update);
2366         cancel_delayed_work_sync(&hdev->discov_off);
2367         cancel_delayed_work_sync(&hdev->le_scan_disable);
2368         cancel_delayed_work_sync(&hdev->le_scan_restart);
2369
2370         if (hdev->adv_instance_timeout) {
2371                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2372                 hdev->adv_instance_timeout = 0;
2373         }
2374 }