2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
130 struct hci_request req;
134 BT_DBG("%s", hdev->name);
136 hci_req_init(&req, hdev);
138 hci_req_add_ev(&req, opcode, plen, param, event);
140 hdev->req_status = HCI_REQ_PEND;
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
152 switch (hdev->req_status) {
154 err = -bt_to_errno(hdev->req_result);
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
166 hdev->req_status = hdev->req_result = 0;
168 hdev->req_skb = NULL;
170 BT_DBG("%s end: err %d", hdev->name, err);
178 return ERR_PTR(-ENODATA);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 EXPORT_SYMBOL(__hci_cmd_sync);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt, u32 timeout, u8 *hci_status)
196 struct hci_request req;
199 BT_DBG("%s start", hdev->name);
201 hci_req_init(&req, hdev);
203 hdev->req_status = HCI_REQ_PEND;
205 err = func(&req, opt);
208 *hci_status = HCI_ERROR_UNSPECIFIED;
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err == -ENODATA) {
228 *hci_status = HCI_ERROR_UNSPECIFIED;
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
236 if (err == -ERESTARTSYS)
239 switch (hdev->req_status) {
241 err = -bt_to_errno(hdev->req_result);
243 *hci_status = hdev->req_result;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
249 *hci_status = HCI_ERROR_UNSPECIFIED;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
263 BT_DBG("%s end: err %d", hdev->name, err);
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt, u32 timeout, u8 *hci_status)
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
305 skb_put_data(skb, param, plen);
307 BT_DBG("skb len %d", skb->len);
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
319 struct hci_dev *hdev = req->hdev;
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341 bt_cb(skb)->hci.req_event = event;
343 skb_queue_tail(&req->cmd_q, skb);
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 hci_req_add_ev(req, opcode, plen, param, 0);
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
365 type = PAGE_SCAN_TYPE_INTERLACED;
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
376 acp.window = cpu_to_le16(0x0012);
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 /* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
391 * This function requires the caller holds hdev->lock.
393 static void __hci_update_background_scan(struct hci_request *req)
395 struct hci_dev *hdev = req->hdev;
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
420 hci_discovery_filter_clear(hdev);
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 hci_req_add_le_scan_disable(req);
435 BT_DBG("%s stopping background scanning", hdev->name);
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
445 if (hci_lookup_le_connect(hdev))
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
454 hci_req_add_le_passive_scan(req);
456 BT_DBG("%s starting background scanning", hdev->name);
460 void __hci_req_update_name(struct hci_request *req)
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 #define PNP_INFO_SVCLASS_ID 0x1200
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
480 list_for_each_entry(uuid, &hdev->uuids, list) {
483 if (uuid->size != 16)
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
496 uuids_start[1] = EIR_UUID16_ALL;
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
529 uuids_start[1] = EIR_UUID32_ALL;
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
541 uuids_start[0] += sizeof(u32);
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
562 uuids_start[1] = EIR_UUID128_ALL;
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
572 memcpy(ptr, uuid->uuid, 16);
574 uuids_start[0] += 16;
580 static void create_eir(struct hci_dev *hdev, u8 *data)
585 name_len = strlen(hdev->dev_name);
591 ptr[1] = EIR_NAME_SHORT;
593 ptr[1] = EIR_NAME_COMPLETE;
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
598 memcpy(ptr + 2, hdev->dev_name, name_len);
600 ptr += (name_len + 2);
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
611 if (hdev->devid_source > 0) {
613 ptr[1] = EIR_DEVICE_ID;
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 void __hci_req_update_eir(struct hci_request *req)
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
633 if (!hdev_is_powered(hdev))
636 if (!lmp_ext_inq_capable(hdev))
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 memset(&cp, 0, sizeof(cp));
647 create_eir(hdev, cp.data);
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 void hci_req_add_le_scan_disable(struct hci_request *req)
659 struct hci_dev *hdev = req->hdev;
661 if (use_ext_scan(hdev)) {
662 struct hci_cp_le_set_ext_scan_enable cp;
664 memset(&cp, 0, sizeof(cp));
665 cp.enable = LE_SCAN_DISABLE;
666 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
669 struct hci_cp_le_set_scan_enable cp;
671 memset(&cp, 0, sizeof(cp));
672 cp.enable = LE_SCAN_DISABLE;
673 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 static void add_to_white_list(struct hci_request *req,
678 struct hci_conn_params *params)
680 struct hci_cp_le_add_to_white_list cp;
682 cp.bdaddr_type = params->addr_type;
683 bacpy(&cp.bdaddr, ¶ms->addr);
685 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
688 static u8 update_white_list(struct hci_request *req)
690 struct hci_dev *hdev = req->hdev;
691 struct hci_conn_params *params;
692 struct bdaddr_list *b;
693 uint8_t white_list_entries = 0;
695 /* Go through the current white list programmed into the
696 * controller one by one and check if that address is still
697 * in the list of pending connections or list of devices to
698 * report. If not present in either list, then queue the
699 * command to remove it from the controller.
701 list_for_each_entry(b, &hdev->le_white_list, list) {
702 /* If the device is neither in pend_le_conns nor
703 * pend_le_reports then remove it from the whitelist.
705 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
706 &b->bdaddr, b->bdaddr_type) &&
707 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
708 &b->bdaddr, b->bdaddr_type)) {
709 struct hci_cp_le_del_from_white_list cp;
711 cp.bdaddr_type = b->bdaddr_type;
712 bacpy(&cp.bdaddr, &b->bdaddr);
714 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
719 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
720 /* White list can not be used with RPAs */
724 white_list_entries++;
727 /* Since all no longer valid white list entries have been
728 * removed, walk through the list of pending connections
729 * and ensure that any new device gets programmed into
732 * If the list of the devices is larger than the list of
733 * available white list entries in the controller, then
734 * just abort and return filer policy value to not use the
737 list_for_each_entry(params, &hdev->pend_le_conns, action) {
738 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
739 ¶ms->addr, params->addr_type))
742 if (white_list_entries >= hdev->le_white_list_size) {
743 /* Select filter policy to accept all advertising */
747 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
748 params->addr_type)) {
749 /* White list can not be used with RPAs */
753 white_list_entries++;
754 add_to_white_list(req, params);
757 /* After adding all new pending connections, walk through
758 * the list of pending reports and also add these to the
759 * white list if there is still space.
761 list_for_each_entry(params, &hdev->pend_le_reports, action) {
762 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
763 ¶ms->addr, params->addr_type))
766 if (white_list_entries >= hdev->le_white_list_size) {
767 /* Select filter policy to accept all advertising */
771 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
772 params->addr_type)) {
773 /* White list can not be used with RPAs */
777 white_list_entries++;
778 add_to_white_list(req, params);
781 /* Select filter policy to use white list */
785 static bool scan_use_rpa(struct hci_dev *hdev)
787 return hci_dev_test_flag(hdev, HCI_PRIVACY);
790 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
791 u16 window, u8 own_addr_type, u8 filter_policy)
793 struct hci_dev *hdev = req->hdev;
795 /* Use ext scanning if set ext scan param and ext scan enable is
798 if (use_ext_scan(hdev)) {
799 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 struct hci_cp_le_scan_phy_params *phy_params;
802 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
805 ext_param_cp = (void *)data;
806 phy_params = (void *)ext_param_cp->data;
808 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 ext_param_cp->own_addr_type = own_addr_type;
810 ext_param_cp->filter_policy = filter_policy;
812 plen = sizeof(*ext_param_cp);
814 if (scan_1m(hdev) || scan_2m(hdev)) {
815 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
817 memset(phy_params, 0, sizeof(*phy_params));
818 phy_params->type = type;
819 phy_params->interval = cpu_to_le16(interval);
820 phy_params->window = cpu_to_le16(window);
822 plen += sizeof(*phy_params);
826 if (scan_coded(hdev)) {
827 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
829 memset(phy_params, 0, sizeof(*phy_params));
830 phy_params->type = type;
831 phy_params->interval = cpu_to_le16(interval);
832 phy_params->window = cpu_to_le16(window);
834 plen += sizeof(*phy_params);
838 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
841 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 ext_enable_cp.enable = LE_SCAN_ENABLE;
843 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
845 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 sizeof(ext_enable_cp), &ext_enable_cp);
848 struct hci_cp_le_set_scan_param param_cp;
849 struct hci_cp_le_set_scan_enable enable_cp;
851 memset(¶m_cp, 0, sizeof(param_cp));
852 param_cp.type = type;
853 param_cp.interval = cpu_to_le16(interval);
854 param_cp.window = cpu_to_le16(window);
855 param_cp.own_address_type = own_addr_type;
856 param_cp.filter_policy = filter_policy;
857 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
860 memset(&enable_cp, 0, sizeof(enable_cp));
861 enable_cp.enable = LE_SCAN_ENABLE;
862 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
863 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
868 void hci_req_add_le_passive_scan(struct hci_request *req)
870 struct hci_dev *hdev = req->hdev;
874 /* Set require_privacy to false since no SCAN_REQ are send
875 * during passive scanning. Not using an non-resolvable address
876 * here is important so that peer devices using direct
877 * advertising with our address will be correctly reported
880 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
884 /* Adding or removing entries from the white list must
885 * happen before enabling scanning. The controller does
886 * not allow white list modification while scanning.
888 filter_policy = update_white_list(req);
890 /* When the controller is using random resolvable addresses and
891 * with that having LE privacy enabled, then controllers with
892 * Extended Scanner Filter Policies support can now enable support
893 * for handling directed advertising.
895 * So instead of using filter polices 0x00 (no whitelist)
896 * and 0x01 (whitelist enabled) use the new filter policies
897 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
900 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
901 filter_policy |= 0x02;
903 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
904 hdev->le_scan_window, own_addr_type, filter_policy);
907 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
909 struct adv_info *adv_instance;
911 /* Ignore instance 0 */
912 if (instance == 0x00)
915 adv_instance = hci_find_adv_instance(hdev, instance);
919 /* TODO: Take into account the "appearance" and "local-name" flags here.
920 * These are currently being ignored as they are not supported.
922 return adv_instance->scan_rsp_len;
925 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
927 u8 instance = hdev->cur_adv_instance;
928 struct adv_info *adv_instance;
930 /* Ignore instance 0 */
931 if (instance == 0x00)
934 adv_instance = hci_find_adv_instance(hdev, instance);
938 /* TODO: Take into account the "appearance" and "local-name" flags here.
939 * These are currently being ignored as they are not supported.
941 return adv_instance->scan_rsp_len;
944 void __hci_req_disable_advertising(struct hci_request *req)
946 if (ext_adv_capable(req->hdev)) {
947 struct hci_cp_le_set_ext_adv_enable cp;
950 /* Disable all sets since we only support one set at the moment */
951 cp.num_of_sets = 0x00;
953 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
957 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
961 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
964 struct adv_info *adv_instance;
966 if (instance == 0x00) {
967 /* Instance 0 always manages the "Tx Power" and "Flags"
970 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
972 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
973 * corresponds to the "connectable" instance flag.
975 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
976 flags |= MGMT_ADV_FLAG_CONNECTABLE;
978 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
979 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
980 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
981 flags |= MGMT_ADV_FLAG_DISCOV;
986 adv_instance = hci_find_adv_instance(hdev, instance);
988 /* Return 0 when we got an invalid instance identifier. */
992 return adv_instance->flags;
995 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
997 /* If privacy is not enabled don't use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1001 /* If basic privacy mode is enabled use RPA */
1002 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1005 /* If limited privacy mode is enabled don't use RPA if we're
1006 * both discoverable and bondable.
1008 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1009 hci_dev_test_flag(hdev, HCI_BONDABLE))
1012 /* We're neither bondable nor discoverable in the limited
1013 * privacy mode, therefore use RPA.
1018 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1020 /* If there is no connection we are OK to advertise. */
1021 if (hci_conn_num(hdev, LE_LINK) == 0)
1024 /* Check le_states if there is any connection in slave role. */
1025 if (hdev->conn_hash.le_num_slave > 0) {
1026 /* Slave connection state and non connectable mode bit 20. */
1027 if (!connectable && !(hdev->le_states[2] & 0x10))
1030 /* Slave connection state and connectable mode bit 38
1031 * and scannable bit 21.
1033 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1034 !(hdev->le_states[2] & 0x20)))
1038 /* Check le_states if there is any connection in master role. */
1039 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1040 /* Master connection state and non connectable mode bit 18. */
1041 if (!connectable && !(hdev->le_states[2] & 0x02))
1044 /* Master connection state and connectable mode bit 35 and
1047 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1048 !(hdev->le_states[2] & 0x08)))
1055 void __hci_req_enable_advertising(struct hci_request *req)
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_adv_param cp;
1059 u8 own_addr_type, enable = 0x01;
1063 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1065 /* If the "connectable" instance flag was not set, then choose between
1066 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1068 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1069 mgmt_get_connectable(hdev);
1071 if (!is_advertising_allowed(hdev, connectable))
1074 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1075 __hci_req_disable_advertising(req);
1077 /* Clear the HCI_LE_ADV bit temporarily so that the
1078 * hci_update_random_address knows that it's safe to go ahead
1079 * and write a new random address. The flag will be set back on
1080 * as soon as the SET_ADV_ENABLE HCI command completes.
1082 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1084 /* Set require_privacy to true only when non-connectable
1085 * advertising is used. In that case it is fine to use a
1086 * non-resolvable private address.
1088 if (hci_update_random_address(req, !connectable,
1089 adv_use_rpa(hdev, flags),
1090 &own_addr_type) < 0)
1093 memset(&cp, 0, sizeof(cp));
1094 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1095 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1098 cp.type = LE_ADV_IND;
1099 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1100 cp.type = LE_ADV_SCAN_IND;
1102 cp.type = LE_ADV_NONCONN_IND;
1104 cp.own_address_type = own_addr_type;
1105 cp.channel_map = hdev->le_adv_channel_map;
1107 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1109 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1112 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1115 size_t complete_len;
1117 /* no space left for name (+ NULL + type + len) */
1118 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1121 /* use complete name if present and fits */
1122 complete_len = strlen(hdev->dev_name);
1123 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1124 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1125 hdev->dev_name, complete_len + 1);
1127 /* use short name if present */
1128 short_len = strlen(hdev->short_name);
1130 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1131 hdev->short_name, short_len + 1);
1133 /* use shortened full name if present, we already know that name
1134 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1137 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1139 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1140 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1142 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1149 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1151 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1154 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1156 u8 scan_rsp_len = 0;
1158 if (hdev->appearance) {
1159 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1162 return append_local_name(hdev, ptr, scan_rsp_len);
1165 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1168 struct adv_info *adv_instance;
1170 u8 scan_rsp_len = 0;
1172 adv_instance = hci_find_adv_instance(hdev, instance);
1176 instance_flags = adv_instance->flags;
1178 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1179 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1182 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1183 adv_instance->scan_rsp_len);
1185 scan_rsp_len += adv_instance->scan_rsp_len;
1187 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1188 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1190 return scan_rsp_len;
1193 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1195 struct hci_dev *hdev = req->hdev;
1198 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1201 if (ext_adv_capable(hdev)) {
1202 struct hci_cp_le_set_ext_scan_rsp_data cp;
1204 memset(&cp, 0, sizeof(cp));
1207 len = create_instance_scan_rsp_data(hdev, instance,
1210 len = create_default_scan_rsp_data(hdev, cp.data);
1212 if (hdev->scan_rsp_data_len == len &&
1213 !memcmp(cp.data, hdev->scan_rsp_data, len))
1216 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1217 hdev->scan_rsp_data_len = len;
1221 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1222 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1224 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1227 struct hci_cp_le_set_scan_rsp_data cp;
1229 memset(&cp, 0, sizeof(cp));
1232 len = create_instance_scan_rsp_data(hdev, instance,
1235 len = create_default_scan_rsp_data(hdev, cp.data);
1237 if (hdev->scan_rsp_data_len == len &&
1238 !memcmp(cp.data, hdev->scan_rsp_data, len))
1241 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1242 hdev->scan_rsp_data_len = len;
1246 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1250 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1252 struct adv_info *adv_instance = NULL;
1253 u8 ad_len = 0, flags = 0;
1256 /* Return 0 when the current instance identifier is invalid. */
1258 adv_instance = hci_find_adv_instance(hdev, instance);
1263 instance_flags = get_adv_instance_flags(hdev, instance);
1265 /* If instance already has the flags set skip adding it once
1268 if (adv_instance && eir_get_data(adv_instance->adv_data,
1269 adv_instance->adv_data_len, EIR_FLAGS,
1273 /* The Add Advertising command allows userspace to set both the general
1274 * and limited discoverable flags.
1276 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1277 flags |= LE_AD_GENERAL;
1279 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1280 flags |= LE_AD_LIMITED;
1282 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1283 flags |= LE_AD_NO_BREDR;
1285 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1286 /* If a discovery flag wasn't provided, simply use the global
1290 flags |= mgmt_get_adv_discov_flags(hdev);
1292 /* If flags would still be empty, then there is no need to
1293 * include the "Flags" AD field".
1307 memcpy(ptr, adv_instance->adv_data,
1308 adv_instance->adv_data_len);
1309 ad_len += adv_instance->adv_data_len;
1310 ptr += adv_instance->adv_data_len;
1313 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1316 if (ext_adv_capable(hdev)) {
1318 adv_tx_power = adv_instance->tx_power;
1320 adv_tx_power = hdev->adv_tx_power;
1322 adv_tx_power = hdev->adv_tx_power;
1325 /* Provide Tx Power only if we can provide a valid value for it */
1326 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1328 ptr[1] = EIR_TX_POWER;
1329 ptr[2] = (u8)adv_tx_power;
1339 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1341 struct hci_dev *hdev = req->hdev;
1344 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1347 if (ext_adv_capable(hdev)) {
1348 struct hci_cp_le_set_ext_adv_data cp;
1350 memset(&cp, 0, sizeof(cp));
1352 len = create_instance_adv_data(hdev, instance, cp.data);
1354 /* There's nothing to do if the data hasn't changed */
1355 if (hdev->adv_data_len == len &&
1356 memcmp(cp.data, hdev->adv_data, len) == 0)
1359 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1360 hdev->adv_data_len = len;
1364 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1365 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1367 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1369 struct hci_cp_le_set_adv_data cp;
1371 memset(&cp, 0, sizeof(cp));
1373 len = create_instance_adv_data(hdev, instance, cp.data);
1375 /* There's nothing to do if the data hasn't changed */
1376 if (hdev->adv_data_len == len &&
1377 memcmp(cp.data, hdev->adv_data, len) == 0)
1380 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1381 hdev->adv_data_len = len;
1385 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1389 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1391 struct hci_request req;
1393 hci_req_init(&req, hdev);
1394 __hci_req_update_adv_data(&req, instance);
1396 return hci_req_run(&req, NULL);
1399 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1401 BT_DBG("%s status %u", hdev->name, status);
1404 void hci_req_reenable_advertising(struct hci_dev *hdev)
1406 struct hci_request req;
1408 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1409 list_empty(&hdev->adv_instances))
1412 hci_req_init(&req, hdev);
1414 if (hdev->cur_adv_instance) {
1415 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1418 if (ext_adv_capable(hdev)) {
1419 __hci_req_start_ext_adv(&req, 0x00);
1421 __hci_req_update_adv_data(&req, 0x00);
1422 __hci_req_update_scan_rsp_data(&req, 0x00);
1423 __hci_req_enable_advertising(&req);
1427 hci_req_run(&req, adv_enable_complete);
1430 static void adv_timeout_expire(struct work_struct *work)
1432 struct hci_dev *hdev = container_of(work, struct hci_dev,
1433 adv_instance_expire.work);
1435 struct hci_request req;
1438 BT_DBG("%s", hdev->name);
1442 hdev->adv_instance_timeout = 0;
1444 instance = hdev->cur_adv_instance;
1445 if (instance == 0x00)
1448 hci_req_init(&req, hdev);
1450 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1452 if (list_empty(&hdev->adv_instances))
1453 __hci_req_disable_advertising(&req);
1455 hci_req_run(&req, NULL);
1458 hci_dev_unlock(hdev);
1461 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1462 bool use_rpa, struct adv_info *adv_instance,
1463 u8 *own_addr_type, bdaddr_t *rand_addr)
1467 bacpy(rand_addr, BDADDR_ANY);
1469 /* If privacy is enabled use a resolvable private address. If
1470 * current RPA has expired then generate a new one.
1475 *own_addr_type = ADDR_LE_DEV_RANDOM;
1478 if (!adv_instance->rpa_expired &&
1479 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1482 adv_instance->rpa_expired = false;
1484 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1485 !bacmp(&hdev->random_addr, &hdev->rpa))
1489 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1491 BT_ERR("%s failed to generate new RPA", hdev->name);
1495 bacpy(rand_addr, &hdev->rpa);
1497 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1499 queue_delayed_work(hdev->workqueue,
1500 &adv_instance->rpa_expired_cb, to);
1502 queue_delayed_work(hdev->workqueue,
1503 &hdev->rpa_expired, to);
1508 /* In case of required privacy without resolvable private address,
1509 * use an non-resolvable private address. This is useful for
1510 * non-connectable advertising.
1512 if (require_privacy) {
1516 /* The non-resolvable private address is generated
1517 * from random six bytes with the two most significant
1520 get_random_bytes(&nrpa, 6);
1523 /* The non-resolvable private address shall not be
1524 * equal to the public address.
1526 if (bacmp(&hdev->bdaddr, &nrpa))
1530 *own_addr_type = ADDR_LE_DEV_RANDOM;
1531 bacpy(rand_addr, &nrpa);
1536 /* No privacy so use a public address. */
1537 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1542 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1544 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1547 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1549 struct hci_cp_le_set_ext_adv_params cp;
1550 struct hci_dev *hdev = req->hdev;
1553 bdaddr_t random_addr;
1556 struct adv_info *adv_instance;
1558 /* In ext adv set param interval is 3 octets */
1559 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1562 adv_instance = hci_find_adv_instance(hdev, instance);
1566 adv_instance = NULL;
1569 flags = get_adv_instance_flags(hdev, instance);
1571 /* If the "connectable" instance flag was not set, then choose between
1572 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1574 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1575 mgmt_get_connectable(hdev);
1577 if (!is_advertising_allowed(hdev, connectable))
1580 /* Set require_privacy to true only when non-connectable
1581 * advertising is used. In that case it is fine to use a
1582 * non-resolvable private address.
1584 err = hci_get_random_address(hdev, !connectable,
1585 adv_use_rpa(hdev, flags), adv_instance,
1586 &own_addr_type, &random_addr);
1590 memset(&cp, 0, sizeof(cp));
1592 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1593 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1595 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1599 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1601 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1602 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1604 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1606 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1609 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1611 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1614 cp.own_addr_type = own_addr_type;
1615 cp.channel_map = hdev->le_adv_channel_map;
1619 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1620 cp.primary_phy = HCI_ADV_PHY_1M;
1621 cp.secondary_phy = HCI_ADV_PHY_2M;
1622 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1623 cp.primary_phy = HCI_ADV_PHY_CODED;
1624 cp.secondary_phy = HCI_ADV_PHY_CODED;
1626 /* In all other cases use 1M */
1627 cp.primary_phy = HCI_ADV_PHY_1M;
1628 cp.secondary_phy = HCI_ADV_PHY_1M;
1631 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1633 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1634 bacmp(&random_addr, BDADDR_ANY)) {
1635 struct hci_cp_le_set_adv_set_rand_addr cp;
1637 /* Check if random address need to be updated */
1639 if (!bacmp(&random_addr, &adv_instance->random_addr))
1642 if (!bacmp(&random_addr, &hdev->random_addr))
1646 memset(&cp, 0, sizeof(cp));
1649 bacpy(&cp.bdaddr, &random_addr);
1652 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1659 void __hci_req_enable_ext_advertising(struct hci_request *req)
1661 struct hci_cp_le_set_ext_adv_enable *cp;
1662 struct hci_cp_ext_adv_set *adv_set;
1663 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1666 adv_set = (void *) cp->data;
1668 memset(cp, 0, sizeof(*cp));
1671 cp->num_of_sets = 0x01;
1673 memset(adv_set, 0, sizeof(*adv_set));
1675 adv_set->handle = 0;
1677 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1678 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1682 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1684 struct hci_dev *hdev = req->hdev;
1687 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1688 __hci_req_disable_advertising(req);
1690 err = __hci_req_setup_ext_adv_instance(req, instance);
1694 __hci_req_update_scan_rsp_data(req, instance);
1695 __hci_req_enable_ext_advertising(req);
1700 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1703 struct hci_dev *hdev = req->hdev;
1704 struct adv_info *adv_instance = NULL;
1707 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1708 list_empty(&hdev->adv_instances))
1711 if (hdev->adv_instance_timeout)
1714 adv_instance = hci_find_adv_instance(hdev, instance);
1718 /* A zero timeout means unlimited advertising. As long as there is
1719 * only one instance, duration should be ignored. We still set a timeout
1720 * in case further instances are being added later on.
1722 * If the remaining lifetime of the instance is more than the duration
1723 * then the timeout corresponds to the duration, otherwise it will be
1724 * reduced to the remaining instance lifetime.
1726 if (adv_instance->timeout == 0 ||
1727 adv_instance->duration <= adv_instance->remaining_time)
1728 timeout = adv_instance->duration;
1730 timeout = adv_instance->remaining_time;
1732 /* The remaining time is being reduced unless the instance is being
1733 * advertised without time limit.
1735 if (adv_instance->timeout)
1736 adv_instance->remaining_time =
1737 adv_instance->remaining_time - timeout;
1739 hdev->adv_instance_timeout = timeout;
1740 queue_delayed_work(hdev->req_workqueue,
1741 &hdev->adv_instance_expire,
1742 msecs_to_jiffies(timeout * 1000));
1744 /* If we're just re-scheduling the same instance again then do not
1745 * execute any HCI commands. This happens when a single instance is
1748 if (!force && hdev->cur_adv_instance == instance &&
1749 hci_dev_test_flag(hdev, HCI_LE_ADV))
1752 hdev->cur_adv_instance = instance;
1753 if (ext_adv_capable(hdev)) {
1754 __hci_req_start_ext_adv(req, instance);
1756 __hci_req_update_adv_data(req, instance);
1757 __hci_req_update_scan_rsp_data(req, instance);
1758 __hci_req_enable_advertising(req);
1764 static void cancel_adv_timeout(struct hci_dev *hdev)
1766 if (hdev->adv_instance_timeout) {
1767 hdev->adv_instance_timeout = 0;
1768 cancel_delayed_work(&hdev->adv_instance_expire);
1772 /* For a single instance:
1773 * - force == true: The instance will be removed even when its remaining
1774 * lifetime is not zero.
1775 * - force == false: the instance will be deactivated but kept stored unless
1776 * the remaining lifetime is zero.
1778 * For instance == 0x00:
1779 * - force == true: All instances will be removed regardless of their timeout
1781 * - force == false: Only instances that have a timeout will be removed.
1783 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1784 struct hci_request *req, u8 instance,
1787 struct adv_info *adv_instance, *n, *next_instance = NULL;
1791 /* Cancel any timeout concerning the removed instance(s). */
1792 if (!instance || hdev->cur_adv_instance == instance)
1793 cancel_adv_timeout(hdev);
1795 /* Get the next instance to advertise BEFORE we remove
1796 * the current one. This can be the same instance again
1797 * if there is only one instance.
1799 if (instance && hdev->cur_adv_instance == instance)
1800 next_instance = hci_get_next_instance(hdev, instance);
1802 if (instance == 0x00) {
1803 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1805 if (!(force || adv_instance->timeout))
1808 rem_inst = adv_instance->instance;
1809 err = hci_remove_adv_instance(hdev, rem_inst);
1811 mgmt_advertising_removed(sk, hdev, rem_inst);
1814 adv_instance = hci_find_adv_instance(hdev, instance);
1816 if (force || (adv_instance && adv_instance->timeout &&
1817 !adv_instance->remaining_time)) {
1818 /* Don't advertise a removed instance. */
1819 if (next_instance &&
1820 next_instance->instance == instance)
1821 next_instance = NULL;
1823 err = hci_remove_adv_instance(hdev, instance);
1825 mgmt_advertising_removed(sk, hdev, instance);
1829 if (!req || !hdev_is_powered(hdev) ||
1830 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1834 __hci_req_schedule_adv_instance(req, next_instance->instance,
1838 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1840 struct hci_dev *hdev = req->hdev;
1842 /* If we're advertising or initiating an LE connection we can't
1843 * go ahead and change the random address at this time. This is
1844 * because the eventual initiator address used for the
1845 * subsequently created connection will be undefined (some
1846 * controllers use the new address and others the one we had
1847 * when the operation started).
1849 * In this kind of scenario skip the update and let the random
1850 * address be updated at the next cycle.
1852 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1853 hci_lookup_le_connect(hdev)) {
1854 BT_DBG("Deferring random address update");
1855 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1859 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1862 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1863 bool use_rpa, u8 *own_addr_type)
1865 struct hci_dev *hdev = req->hdev;
1868 /* If privacy is enabled use a resolvable private address. If
1869 * current RPA has expired or there is something else than
1870 * the current RPA in use, then generate a new one.
1875 *own_addr_type = ADDR_LE_DEV_RANDOM;
1877 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1878 !bacmp(&hdev->random_addr, &hdev->rpa))
1881 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1883 bt_dev_err(hdev, "failed to generate new RPA");
1887 set_random_addr(req, &hdev->rpa);
1889 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1890 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1895 /* In case of required privacy without resolvable private address,
1896 * use an non-resolvable private address. This is useful for active
1897 * scanning and non-connectable advertising.
1899 if (require_privacy) {
1903 /* The non-resolvable private address is generated
1904 * from random six bytes with the two most significant
1907 get_random_bytes(&nrpa, 6);
1910 /* The non-resolvable private address shall not be
1911 * equal to the public address.
1913 if (bacmp(&hdev->bdaddr, &nrpa))
1917 *own_addr_type = ADDR_LE_DEV_RANDOM;
1918 set_random_addr(req, &nrpa);
1922 /* If forcing static address is in use or there is no public
1923 * address use the static address as random address (but skip
1924 * the HCI command if the current random address is already the
1927 * In case BR/EDR has been disabled on a dual-mode controller
1928 * and a static address has been configured, then use that
1929 * address instead of the public BR/EDR address.
1931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1932 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1933 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1934 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1935 *own_addr_type = ADDR_LE_DEV_RANDOM;
1936 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1937 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1938 &hdev->static_addr);
1942 /* Neither privacy nor static address is being used so use a
1945 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1950 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1952 struct bdaddr_list *b;
1954 list_for_each_entry(b, &hdev->whitelist, list) {
1955 struct hci_conn *conn;
1957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1961 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1968 void __hci_req_update_scan(struct hci_request *req)
1970 struct hci_dev *hdev = req->hdev;
1973 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1976 if (!hdev_is_powered(hdev))
1979 if (mgmt_powering_down(hdev))
1982 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1983 disconnected_whitelist_entries(hdev))
1986 scan = SCAN_DISABLED;
1988 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1989 scan |= SCAN_INQUIRY;
1991 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1992 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1995 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1998 static int update_scan(struct hci_request *req, unsigned long opt)
2000 hci_dev_lock(req->hdev);
2001 __hci_req_update_scan(req);
2002 hci_dev_unlock(req->hdev);
2006 static void scan_update_work(struct work_struct *work)
2008 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2010 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2013 static int connectable_update(struct hci_request *req, unsigned long opt)
2015 struct hci_dev *hdev = req->hdev;
2019 __hci_req_update_scan(req);
2021 /* If BR/EDR is not enabled and we disable advertising as a
2022 * by-product of disabling connectable, we need to update the
2023 * advertising flags.
2025 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2026 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2028 /* Update the advertising parameters if necessary */
2029 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2030 !list_empty(&hdev->adv_instances)) {
2031 if (ext_adv_capable(hdev))
2032 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2034 __hci_req_enable_advertising(req);
2037 __hci_update_background_scan(req);
2039 hci_dev_unlock(hdev);
2044 static void connectable_update_work(struct work_struct *work)
2046 struct hci_dev *hdev = container_of(work, struct hci_dev,
2047 connectable_update);
2050 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2051 mgmt_set_connectable_complete(hdev, status);
2054 static u8 get_service_classes(struct hci_dev *hdev)
2056 struct bt_uuid *uuid;
2059 list_for_each_entry(uuid, &hdev->uuids, list)
2060 val |= uuid->svc_hint;
2065 void __hci_req_update_class(struct hci_request *req)
2067 struct hci_dev *hdev = req->hdev;
2070 BT_DBG("%s", hdev->name);
2072 if (!hdev_is_powered(hdev))
2075 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2078 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2081 cod[0] = hdev->minor_class;
2082 cod[1] = hdev->major_class;
2083 cod[2] = get_service_classes(hdev);
2085 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2088 if (memcmp(cod, hdev->dev_class, 3) == 0)
2091 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2094 static void write_iac(struct hci_request *req)
2096 struct hci_dev *hdev = req->hdev;
2097 struct hci_cp_write_current_iac_lap cp;
2099 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2102 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2103 /* Limited discoverable mode */
2104 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2105 cp.iac_lap[0] = 0x00; /* LIAC */
2106 cp.iac_lap[1] = 0x8b;
2107 cp.iac_lap[2] = 0x9e;
2108 cp.iac_lap[3] = 0x33; /* GIAC */
2109 cp.iac_lap[4] = 0x8b;
2110 cp.iac_lap[5] = 0x9e;
2112 /* General discoverable mode */
2114 cp.iac_lap[0] = 0x33; /* GIAC */
2115 cp.iac_lap[1] = 0x8b;
2116 cp.iac_lap[2] = 0x9e;
2119 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2120 (cp.num_iac * 3) + 1, &cp);
2123 static int discoverable_update(struct hci_request *req, unsigned long opt)
2125 struct hci_dev *hdev = req->hdev;
2129 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2131 __hci_req_update_scan(req);
2132 __hci_req_update_class(req);
2135 /* Advertising instances don't use the global discoverable setting, so
2136 * only update AD if advertising was enabled using Set Advertising.
2138 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2139 __hci_req_update_adv_data(req, 0x00);
2141 /* Discoverable mode affects the local advertising
2142 * address in limited privacy mode.
2144 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2145 if (ext_adv_capable(hdev))
2146 __hci_req_start_ext_adv(req, 0x00);
2148 __hci_req_enable_advertising(req);
2152 hci_dev_unlock(hdev);
2157 static void discoverable_update_work(struct work_struct *work)
2159 struct hci_dev *hdev = container_of(work, struct hci_dev,
2160 discoverable_update);
2163 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2164 mgmt_set_discoverable_complete(hdev, status);
2167 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2170 switch (conn->state) {
2173 if (conn->type == AMP_LINK) {
2174 struct hci_cp_disconn_phy_link cp;
2176 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2178 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2181 struct hci_cp_disconnect dc;
2183 dc.handle = cpu_to_le16(conn->handle);
2185 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2188 conn->state = BT_DISCONN;
2192 if (conn->type == LE_LINK) {
2193 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2195 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2197 } else if (conn->type == ACL_LINK) {
2198 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2200 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2205 if (conn->type == ACL_LINK) {
2206 struct hci_cp_reject_conn_req rej;
2208 bacpy(&rej.bdaddr, &conn->dst);
2209 rej.reason = reason;
2211 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2213 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2214 struct hci_cp_reject_sync_conn_req rej;
2216 bacpy(&rej.bdaddr, &conn->dst);
2218 /* SCO rejection has its own limited set of
2219 * allowed error values (0x0D-0x0F) which isn't
2220 * compatible with most values passed to this
2221 * function. To be safe hard-code one of the
2222 * values that's suitable for SCO.
2224 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2226 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2231 conn->state = BT_CLOSED;
2236 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2239 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2242 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2244 struct hci_request req;
2247 hci_req_init(&req, conn->hdev);
2249 __hci_abort_conn(&req, conn, reason);
2251 err = hci_req_run(&req, abort_conn_complete);
2252 if (err && err != -ENODATA) {
2253 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2260 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2262 hci_dev_lock(req->hdev);
2263 __hci_update_background_scan(req);
2264 hci_dev_unlock(req->hdev);
2268 static void bg_scan_update(struct work_struct *work)
2270 struct hci_dev *hdev = container_of(work, struct hci_dev,
2272 struct hci_conn *conn;
2276 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2282 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2284 hci_le_conn_failed(conn, status);
2286 hci_dev_unlock(hdev);
2289 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2291 hci_req_add_le_scan_disable(req);
2295 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2298 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2299 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2300 struct hci_cp_inquiry cp;
2302 BT_DBG("%s", req->hdev->name);
2304 hci_dev_lock(req->hdev);
2305 hci_inquiry_cache_flush(req->hdev);
2306 hci_dev_unlock(req->hdev);
2308 memset(&cp, 0, sizeof(cp));
2310 if (req->hdev->discovery.limited)
2311 memcpy(&cp.lap, liac, sizeof(cp.lap));
2313 memcpy(&cp.lap, giac, sizeof(cp.lap));
2317 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2322 static void le_scan_disable_work(struct work_struct *work)
2324 struct hci_dev *hdev = container_of(work, struct hci_dev,
2325 le_scan_disable.work);
2328 BT_DBG("%s", hdev->name);
2330 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2333 cancel_delayed_work(&hdev->le_scan_restart);
2335 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2337 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2342 hdev->discovery.scan_start = 0;
2344 /* If we were running LE only scan, change discovery state. If
2345 * we were running both LE and BR/EDR inquiry simultaneously,
2346 * and BR/EDR inquiry is already finished, stop discovery,
2347 * otherwise BR/EDR inquiry will stop discovery when finished.
2348 * If we will resolve remote device name, do not change
2352 if (hdev->discovery.type == DISCOV_TYPE_LE)
2353 goto discov_stopped;
2355 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2358 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2359 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2360 hdev->discovery.state != DISCOVERY_RESOLVING)
2361 goto discov_stopped;
2366 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2367 HCI_CMD_TIMEOUT, &status);
2369 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2370 goto discov_stopped;
2377 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2378 hci_dev_unlock(hdev);
2381 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2383 struct hci_dev *hdev = req->hdev;
2385 /* If controller is not scanning we are done. */
2386 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2389 hci_req_add_le_scan_disable(req);
2391 if (use_ext_scan(hdev)) {
2392 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2394 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2395 ext_enable_cp.enable = LE_SCAN_ENABLE;
2396 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2398 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2399 sizeof(ext_enable_cp), &ext_enable_cp);
2401 struct hci_cp_le_set_scan_enable cp;
2403 memset(&cp, 0, sizeof(cp));
2404 cp.enable = LE_SCAN_ENABLE;
2405 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2406 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2412 static void le_scan_restart_work(struct work_struct *work)
2414 struct hci_dev *hdev = container_of(work, struct hci_dev,
2415 le_scan_restart.work);
2416 unsigned long timeout, duration, scan_start, now;
2419 BT_DBG("%s", hdev->name);
2421 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2423 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2430 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2431 !hdev->discovery.scan_start)
2434 /* When the scan was started, hdev->le_scan_disable has been queued
2435 * after duration from scan_start. During scan restart this job
2436 * has been canceled, and we need to queue it again after proper
2437 * timeout, to make sure that scan does not run indefinitely.
2439 duration = hdev->discovery.scan_duration;
2440 scan_start = hdev->discovery.scan_start;
2442 if (now - scan_start <= duration) {
2445 if (now >= scan_start)
2446 elapsed = now - scan_start;
2448 elapsed = ULONG_MAX - scan_start + now;
2450 timeout = duration - elapsed;
2455 queue_delayed_work(hdev->req_workqueue,
2456 &hdev->le_scan_disable, timeout);
2459 hci_dev_unlock(hdev);
2462 static int active_scan(struct hci_request *req, unsigned long opt)
2464 uint16_t interval = opt;
2465 struct hci_dev *hdev = req->hdev;
2469 BT_DBG("%s", hdev->name);
2471 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2474 /* Don't let discovery abort an outgoing connection attempt
2475 * that's using directed advertising.
2477 if (hci_lookup_le_connect(hdev)) {
2478 hci_dev_unlock(hdev);
2482 cancel_adv_timeout(hdev);
2483 hci_dev_unlock(hdev);
2485 __hci_req_disable_advertising(req);
2488 /* If controller is scanning, it means the background scanning is
2489 * running. Thus, we should temporarily stop it in order to set the
2490 * discovery scanning parameters.
2492 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2493 hci_req_add_le_scan_disable(req);
2495 /* All active scans will be done with either a resolvable private
2496 * address (when privacy feature has been enabled) or non-resolvable
2499 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2502 own_addr_type = ADDR_LE_DEV_PUBLIC;
2504 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2509 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2513 BT_DBG("%s", req->hdev->name);
2515 err = active_scan(req, opt);
2519 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2522 static void start_discovery(struct hci_dev *hdev, u8 *status)
2524 unsigned long timeout;
2526 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2528 switch (hdev->discovery.type) {
2529 case DISCOV_TYPE_BREDR:
2530 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2531 hci_req_sync(hdev, bredr_inquiry,
2532 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2535 case DISCOV_TYPE_INTERLEAVED:
2536 /* When running simultaneous discovery, the LE scanning time
2537 * should occupy the whole discovery time sine BR/EDR inquiry
2538 * and LE scanning are scheduled by the controller.
2540 * For interleaving discovery in comparison, BR/EDR inquiry
2541 * and LE scanning are done sequentially with separate
2544 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2546 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2547 /* During simultaneous discovery, we double LE scan
2548 * interval. We must leave some time for the controller
2549 * to do BR/EDR inquiry.
2551 hci_req_sync(hdev, interleaved_discov,
2552 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2557 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2558 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2559 HCI_CMD_TIMEOUT, status);
2561 case DISCOV_TYPE_LE:
2562 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2563 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2564 HCI_CMD_TIMEOUT, status);
2567 *status = HCI_ERROR_UNSPECIFIED;
2574 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2576 /* When service discovery is used and the controller has a
2577 * strict duplicate filter, it is important to remember the
2578 * start and duration of the scan. This is required for
2579 * restarting scanning during the discovery phase.
2581 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2582 hdev->discovery.result_filtering) {
2583 hdev->discovery.scan_start = jiffies;
2584 hdev->discovery.scan_duration = timeout;
2587 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2591 bool hci_req_stop_discovery(struct hci_request *req)
2593 struct hci_dev *hdev = req->hdev;
2594 struct discovery_state *d = &hdev->discovery;
2595 struct hci_cp_remote_name_req_cancel cp;
2596 struct inquiry_entry *e;
2599 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2601 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2602 if (test_bit(HCI_INQUIRY, &hdev->flags))
2603 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2605 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2606 cancel_delayed_work(&hdev->le_scan_disable);
2607 hci_req_add_le_scan_disable(req);
2612 /* Passive scanning */
2613 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2614 hci_req_add_le_scan_disable(req);
2619 /* No further actions needed for LE-only discovery */
2620 if (d->type == DISCOV_TYPE_LE)
2623 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2629 bacpy(&cp.bdaddr, &e->data.bdaddr);
2630 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2638 static int stop_discovery(struct hci_request *req, unsigned long opt)
2640 hci_dev_lock(req->hdev);
2641 hci_req_stop_discovery(req);
2642 hci_dev_unlock(req->hdev);
2647 static void discov_update(struct work_struct *work)
2649 struct hci_dev *hdev = container_of(work, struct hci_dev,
2653 switch (hdev->discovery.state) {
2654 case DISCOVERY_STARTING:
2655 start_discovery(hdev, &status);
2656 mgmt_start_discovery_complete(hdev, status);
2658 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2660 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2662 case DISCOVERY_STOPPING:
2663 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2664 mgmt_stop_discovery_complete(hdev, status);
2666 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2668 case DISCOVERY_STOPPED:
2674 static void discov_off(struct work_struct *work)
2676 struct hci_dev *hdev = container_of(work, struct hci_dev,
2679 BT_DBG("%s", hdev->name);
2683 /* When discoverable timeout triggers, then just make sure
2684 * the limited discoverable flag is cleared. Even in the case
2685 * of a timeout triggered from general discoverable, it is
2686 * safe to unconditionally clear the flag.
2688 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2689 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2690 hdev->discov_timeout = 0;
2692 hci_dev_unlock(hdev);
2694 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2695 mgmt_new_settings(hdev);
2698 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2700 struct hci_dev *hdev = req->hdev;
2705 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2706 !lmp_host_ssp_capable(hdev)) {
2709 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2711 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2714 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2715 sizeof(support), &support);
2719 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2720 lmp_bredr_capable(hdev)) {
2721 struct hci_cp_write_le_host_supported cp;
2726 /* Check first if we already have the right
2727 * host state (host features set)
2729 if (cp.le != lmp_host_le_capable(hdev) ||
2730 cp.simul != lmp_host_le_br_capable(hdev))
2731 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2735 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2736 /* Make sure the controller has a good default for
2737 * advertising data. This also applies to the case
2738 * where BR/EDR was toggled during the AUTO_OFF phase.
2740 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2741 list_empty(&hdev->adv_instances)) {
2744 if (ext_adv_capable(hdev)) {
2745 err = __hci_req_setup_ext_adv_instance(req,
2748 __hci_req_update_scan_rsp_data(req,
2752 __hci_req_update_adv_data(req, 0x00);
2753 __hci_req_update_scan_rsp_data(req, 0x00);
2756 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2757 if (!ext_adv_capable(hdev))
2758 __hci_req_enable_advertising(req);
2760 __hci_req_enable_ext_advertising(req);
2762 } else if (!list_empty(&hdev->adv_instances)) {
2763 struct adv_info *adv_instance;
2765 adv_instance = list_first_entry(&hdev->adv_instances,
2766 struct adv_info, list);
2767 __hci_req_schedule_adv_instance(req,
2768 adv_instance->instance,
2773 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2774 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2775 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2776 sizeof(link_sec), &link_sec);
2778 if (lmp_bredr_capable(hdev)) {
2779 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2780 __hci_req_write_fast_connectable(req, true);
2782 __hci_req_write_fast_connectable(req, false);
2783 __hci_req_update_scan(req);
2784 __hci_req_update_class(req);
2785 __hci_req_update_name(req);
2786 __hci_req_update_eir(req);
2789 hci_dev_unlock(hdev);
2793 int __hci_req_hci_power_on(struct hci_dev *hdev)
2795 /* Register the available SMP channels (BR/EDR and LE) only when
2796 * successfully powering on the controller. This late
2797 * registration is required so that LE SMP can clearly decide if
2798 * the public address or static address is used.
2802 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2806 void hci_request_setup(struct hci_dev *hdev)
2808 INIT_WORK(&hdev->discov_update, discov_update);
2809 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2810 INIT_WORK(&hdev->scan_update, scan_update_work);
2811 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2812 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2813 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2814 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2815 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2816 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2819 void hci_request_cancel_all(struct hci_dev *hdev)
2821 hci_req_sync_cancel(hdev, ENODEV);
2823 cancel_work_sync(&hdev->discov_update);
2824 cancel_work_sync(&hdev->bg_scan_update);
2825 cancel_work_sync(&hdev->scan_update);
2826 cancel_work_sync(&hdev->connectable_update);
2827 cancel_work_sync(&hdev->discoverable_update);
2828 cancel_delayed_work_sync(&hdev->discov_off);
2829 cancel_delayed_work_sync(&hdev->le_scan_disable);
2830 cancel_delayed_work_sync(&hdev->le_scan_restart);
2832 if (hdev->adv_instance_timeout) {
2833 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2834 hdev->adv_instance_timeout = 0;