2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
130 struct hci_request req;
134 BT_DBG("%s", hdev->name);
136 hci_req_init(&req, hdev);
138 hci_req_add_ev(&req, opcode, plen, param, event);
140 hdev->req_status = HCI_REQ_PEND;
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
152 switch (hdev->req_status) {
154 err = -bt_to_errno(hdev->req_result);
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
166 hdev->req_status = hdev->req_result = 0;
168 hdev->req_skb = NULL;
170 BT_DBG("%s end: err %d", hdev->name, err);
178 return ERR_PTR(-ENODATA);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 EXPORT_SYMBOL(__hci_cmd_sync);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt, u32 timeout, u8 *hci_status)
196 struct hci_request req;
199 BT_DBG("%s start", hdev->name);
201 hci_req_init(&req, hdev);
203 hdev->req_status = HCI_REQ_PEND;
205 err = func(&req, opt);
208 *hci_status = HCI_ERROR_UNSPECIFIED;
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err == -ENODATA) {
228 *hci_status = HCI_ERROR_UNSPECIFIED;
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
236 if (err == -ERESTARTSYS)
239 switch (hdev->req_status) {
241 err = -bt_to_errno(hdev->req_result);
243 *hci_status = hdev->req_result;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
249 *hci_status = HCI_ERROR_UNSPECIFIED;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
263 BT_DBG("%s end: err %d", hdev->name, err);
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt, u32 timeout, u8 *hci_status)
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
305 skb_put_data(skb, param, plen);
307 BT_DBG("skb len %d", skb->len);
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
319 struct hci_dev *hdev = req->hdev;
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341 bt_cb(skb)->hci.req_event = event;
343 skb_queue_tail(&req->cmd_q, skb);
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 hci_req_add_ev(req, opcode, plen, param, 0);
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
365 type = PAGE_SCAN_TYPE_INTERLACED;
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
376 acp.window = cpu_to_le16(0x0012);
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 /* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
391 * This function requires the caller holds hdev->lock.
393 static void __hci_update_background_scan(struct hci_request *req)
395 struct hci_dev *hdev = req->hdev;
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
420 hci_discovery_filter_clear(hdev);
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 hci_req_add_le_scan_disable(req);
435 BT_DBG("%s stopping background scanning", hdev->name);
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
445 if (hci_lookup_le_connect(hdev))
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
454 hci_req_add_le_passive_scan(req);
456 BT_DBG("%s starting background scanning", hdev->name);
460 void __hci_req_update_name(struct hci_request *req)
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 #define PNP_INFO_SVCLASS_ID 0x1200
472 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
480 list_for_each_entry(uuid, &hdev->uuids, list) {
483 if (uuid->size != 16)
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
496 uuids_start[1] = EIR_UUID16_ALL;
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
514 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
529 uuids_start[1] = EIR_UUID32_ALL;
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
541 uuids_start[0] += sizeof(u32);
547 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
562 uuids_start[1] = EIR_UUID128_ALL;
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
572 memcpy(ptr, uuid->uuid, 16);
574 uuids_start[0] += 16;
580 static void create_eir(struct hci_dev *hdev, u8 *data)
585 name_len = strlen(hdev->dev_name);
591 ptr[1] = EIR_NAME_SHORT;
593 ptr[1] = EIR_NAME_COMPLETE;
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
598 memcpy(ptr + 2, hdev->dev_name, name_len);
600 ptr += (name_len + 2);
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
611 if (hdev->devid_source > 0) {
613 ptr[1] = EIR_DEVICE_ID;
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 void __hci_req_update_eir(struct hci_request *req)
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
633 if (!hdev_is_powered(hdev))
636 if (!lmp_ext_inq_capable(hdev))
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 memset(&cp, 0, sizeof(cp));
647 create_eir(hdev, cp.data);
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 void hci_req_add_le_scan_disable(struct hci_request *req)
659 struct hci_dev *hdev = req->hdev;
661 if (use_ext_scan(hdev)) {
662 struct hci_cp_le_set_ext_scan_enable cp;
664 memset(&cp, 0, sizeof(cp));
665 cp.enable = LE_SCAN_DISABLE;
666 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
669 struct hci_cp_le_set_scan_enable cp;
671 memset(&cp, 0, sizeof(cp));
672 cp.enable = LE_SCAN_DISABLE;
673 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 static void add_to_white_list(struct hci_request *req,
678 struct hci_conn_params *params)
680 struct hci_cp_le_add_to_white_list cp;
682 cp.bdaddr_type = params->addr_type;
683 bacpy(&cp.bdaddr, ¶ms->addr);
685 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
688 static u8 update_white_list(struct hci_request *req)
690 struct hci_dev *hdev = req->hdev;
691 struct hci_conn_params *params;
692 struct bdaddr_list *b;
693 uint8_t white_list_entries = 0;
695 /* Go through the current white list programmed into the
696 * controller one by one and check if that address is still
697 * in the list of pending connections or list of devices to
698 * report. If not present in either list, then queue the
699 * command to remove it from the controller.
701 list_for_each_entry(b, &hdev->le_white_list, list) {
702 /* If the device is neither in pend_le_conns nor
703 * pend_le_reports then remove it from the whitelist.
705 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
706 &b->bdaddr, b->bdaddr_type) &&
707 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
708 &b->bdaddr, b->bdaddr_type)) {
709 struct hci_cp_le_del_from_white_list cp;
711 cp.bdaddr_type = b->bdaddr_type;
712 bacpy(&cp.bdaddr, &b->bdaddr);
714 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
719 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
720 /* White list can not be used with RPAs */
724 white_list_entries++;
727 /* Since all no longer valid white list entries have been
728 * removed, walk through the list of pending connections
729 * and ensure that any new device gets programmed into
732 * If the list of the devices is larger than the list of
733 * available white list entries in the controller, then
734 * just abort and return filer policy value to not use the
737 list_for_each_entry(params, &hdev->pend_le_conns, action) {
738 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
739 ¶ms->addr, params->addr_type))
742 if (white_list_entries >= hdev->le_white_list_size) {
743 /* Select filter policy to accept all advertising */
747 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
748 params->addr_type)) {
749 /* White list can not be used with RPAs */
753 white_list_entries++;
754 add_to_white_list(req, params);
757 /* After adding all new pending connections, walk through
758 * the list of pending reports and also add these to the
759 * white list if there is still space.
761 list_for_each_entry(params, &hdev->pend_le_reports, action) {
762 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
763 ¶ms->addr, params->addr_type))
766 if (white_list_entries >= hdev->le_white_list_size) {
767 /* Select filter policy to accept all advertising */
771 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
772 params->addr_type)) {
773 /* White list can not be used with RPAs */
777 white_list_entries++;
778 add_to_white_list(req, params);
781 /* Select filter policy to use white list */
785 static bool scan_use_rpa(struct hci_dev *hdev)
787 return hci_dev_test_flag(hdev, HCI_PRIVACY);
790 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
791 u16 window, u8 own_addr_type, u8 filter_policy)
793 struct hci_dev *hdev = req->hdev;
795 /* Use ext scanning if set ext scan param and ext scan enable is
798 if (use_ext_scan(hdev)) {
799 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
800 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
801 struct hci_cp_le_scan_phy_params *phy_params;
802 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
805 ext_param_cp = (void *)data;
806 phy_params = (void *)ext_param_cp->data;
808 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
809 ext_param_cp->own_addr_type = own_addr_type;
810 ext_param_cp->filter_policy = filter_policy;
812 plen = sizeof(*ext_param_cp);
814 if (scan_1m(hdev) || scan_2m(hdev)) {
815 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
817 memset(phy_params, 0, sizeof(*phy_params));
818 phy_params->type = type;
819 phy_params->interval = cpu_to_le16(interval);
820 phy_params->window = cpu_to_le16(window);
822 plen += sizeof(*phy_params);
826 if (scan_coded(hdev)) {
827 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
829 memset(phy_params, 0, sizeof(*phy_params));
830 phy_params->type = type;
831 phy_params->interval = cpu_to_le16(interval);
832 phy_params->window = cpu_to_le16(window);
834 plen += sizeof(*phy_params);
838 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
841 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
842 ext_enable_cp.enable = LE_SCAN_ENABLE;
843 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
845 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
846 sizeof(ext_enable_cp), &ext_enable_cp);
848 struct hci_cp_le_set_scan_param param_cp;
849 struct hci_cp_le_set_scan_enable enable_cp;
851 memset(¶m_cp, 0, sizeof(param_cp));
852 param_cp.type = type;
853 param_cp.interval = cpu_to_le16(interval);
854 param_cp.window = cpu_to_le16(window);
855 param_cp.own_address_type = own_addr_type;
856 param_cp.filter_policy = filter_policy;
857 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
860 memset(&enable_cp, 0, sizeof(enable_cp));
861 enable_cp.enable = LE_SCAN_ENABLE;
862 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
863 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
868 void hci_req_add_le_passive_scan(struct hci_request *req)
870 struct hci_dev *hdev = req->hdev;
874 /* Set require_privacy to false since no SCAN_REQ are send
875 * during passive scanning. Not using an non-resolvable address
876 * here is important so that peer devices using direct
877 * advertising with our address will be correctly reported
880 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
884 /* Adding or removing entries from the white list must
885 * happen before enabling scanning. The controller does
886 * not allow white list modification while scanning.
888 filter_policy = update_white_list(req);
890 /* When the controller is using random resolvable addresses and
891 * with that having LE privacy enabled, then controllers with
892 * Extended Scanner Filter Policies support can now enable support
893 * for handling directed advertising.
895 * So instead of using filter polices 0x00 (no whitelist)
896 * and 0x01 (whitelist enabled) use the new filter policies
897 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
899 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
900 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
901 filter_policy |= 0x02;
903 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
904 hdev->le_scan_window, own_addr_type, filter_policy);
907 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
909 struct adv_info *adv_instance;
911 /* Ignore instance 0 */
912 if (instance == 0x00)
915 adv_instance = hci_find_adv_instance(hdev, instance);
919 /* TODO: Take into account the "appearance" and "local-name" flags here.
920 * These are currently being ignored as they are not supported.
922 return adv_instance->scan_rsp_len;
925 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
927 u8 instance = hdev->cur_adv_instance;
928 struct adv_info *adv_instance;
930 /* Ignore instance 0 */
931 if (instance == 0x00)
934 adv_instance = hci_find_adv_instance(hdev, instance);
938 /* TODO: Take into account the "appearance" and "local-name" flags here.
939 * These are currently being ignored as they are not supported.
941 return adv_instance->scan_rsp_len;
944 void __hci_req_disable_advertising(struct hci_request *req)
946 if (ext_adv_capable(req->hdev)) {
947 struct hci_cp_le_set_ext_adv_enable cp;
950 /* Disable all sets since we only support one set at the moment */
951 cp.num_of_sets = 0x00;
953 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
957 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
961 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
964 struct adv_info *adv_instance;
966 if (instance == 0x00) {
967 /* Instance 0 always manages the "Tx Power" and "Flags"
970 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
972 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
973 * corresponds to the "connectable" instance flag.
975 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
976 flags |= MGMT_ADV_FLAG_CONNECTABLE;
978 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
979 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
980 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
981 flags |= MGMT_ADV_FLAG_DISCOV;
986 adv_instance = hci_find_adv_instance(hdev, instance);
988 /* Return 0 when we got an invalid instance identifier. */
992 return adv_instance->flags;
995 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
997 /* If privacy is not enabled don't use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1001 /* If basic privacy mode is enabled use RPA */
1002 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1005 /* If limited privacy mode is enabled don't use RPA if we're
1006 * both discoverable and bondable.
1008 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1009 hci_dev_test_flag(hdev, HCI_BONDABLE))
1012 /* We're neither bondable nor discoverable in the limited
1013 * privacy mode, therefore use RPA.
1018 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1020 /* If there is no connection we are OK to advertise. */
1021 if (hci_conn_num(hdev, LE_LINK) == 0)
1024 /* Check le_states if there is any connection in slave role. */
1025 if (hdev->conn_hash.le_num_slave > 0) {
1026 /* Slave connection state and non connectable mode bit 20. */
1027 if (!connectable && !(hdev->le_states[2] & 0x10))
1030 /* Slave connection state and connectable mode bit 38
1031 * and scannable bit 21.
1033 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1034 !(hdev->le_states[2] & 0x20)))
1038 /* Check le_states if there is any connection in master role. */
1039 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1040 /* Master connection state and non connectable mode bit 18. */
1041 if (!connectable && !(hdev->le_states[2] & 0x02))
1044 /* Master connection state and connectable mode bit 35 and
1047 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1048 !(hdev->le_states[2] & 0x08)))
1055 void __hci_req_enable_advertising(struct hci_request *req)
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_adv_param cp;
1059 u8 own_addr_type, enable = 0x01;
1061 u16 adv_min_interval, adv_max_interval;
1064 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1066 /* If the "connectable" instance flag was not set, then choose between
1067 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1069 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1070 mgmt_get_connectable(hdev);
1072 if (!is_advertising_allowed(hdev, connectable))
1075 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1076 __hci_req_disable_advertising(req);
1078 /* Clear the HCI_LE_ADV bit temporarily so that the
1079 * hci_update_random_address knows that it's safe to go ahead
1080 * and write a new random address. The flag will be set back on
1081 * as soon as the SET_ADV_ENABLE HCI command completes.
1083 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1085 /* Set require_privacy to true only when non-connectable
1086 * advertising is used. In that case it is fine to use a
1087 * non-resolvable private address.
1089 if (hci_update_random_address(req, !connectable,
1090 adv_use_rpa(hdev, flags),
1091 &own_addr_type) < 0)
1094 memset(&cp, 0, sizeof(cp));
1097 cp.type = LE_ADV_IND;
1099 adv_min_interval = hdev->le_adv_min_interval;
1100 adv_max_interval = hdev->le_adv_max_interval;
1102 if (get_cur_adv_instance_scan_rsp_len(hdev))
1103 cp.type = LE_ADV_SCAN_IND;
1105 cp.type = LE_ADV_NONCONN_IND;
1107 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1108 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1109 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1110 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1112 adv_min_interval = hdev->le_adv_min_interval;
1113 adv_max_interval = hdev->le_adv_max_interval;
1117 cp.min_interval = cpu_to_le16(adv_min_interval);
1118 cp.max_interval = cpu_to_le16(adv_max_interval);
1119 cp.own_address_type = own_addr_type;
1120 cp.channel_map = hdev->le_adv_channel_map;
1122 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1124 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1127 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1130 size_t complete_len;
1132 /* no space left for name (+ NULL + type + len) */
1133 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1136 /* use complete name if present and fits */
1137 complete_len = strlen(hdev->dev_name);
1138 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1139 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1140 hdev->dev_name, complete_len + 1);
1142 /* use short name if present */
1143 short_len = strlen(hdev->short_name);
1145 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1146 hdev->short_name, short_len + 1);
1148 /* use shortened full name if present, we already know that name
1149 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1152 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1154 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1155 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1157 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1164 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1166 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1169 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1171 u8 scan_rsp_len = 0;
1173 if (hdev->appearance) {
1174 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1177 return append_local_name(hdev, ptr, scan_rsp_len);
1180 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1183 struct adv_info *adv_instance;
1185 u8 scan_rsp_len = 0;
1187 adv_instance = hci_find_adv_instance(hdev, instance);
1191 instance_flags = adv_instance->flags;
1193 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1194 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1197 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1198 adv_instance->scan_rsp_len);
1200 scan_rsp_len += adv_instance->scan_rsp_len;
1202 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1203 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1205 return scan_rsp_len;
1208 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1210 struct hci_dev *hdev = req->hdev;
1213 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1216 if (ext_adv_capable(hdev)) {
1217 struct hci_cp_le_set_ext_scan_rsp_data cp;
1219 memset(&cp, 0, sizeof(cp));
1222 len = create_instance_scan_rsp_data(hdev, instance,
1225 len = create_default_scan_rsp_data(hdev, cp.data);
1227 if (hdev->scan_rsp_data_len == len &&
1228 !memcmp(cp.data, hdev->scan_rsp_data, len))
1231 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1232 hdev->scan_rsp_data_len = len;
1236 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1237 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1239 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1242 struct hci_cp_le_set_scan_rsp_data cp;
1244 memset(&cp, 0, sizeof(cp));
1247 len = create_instance_scan_rsp_data(hdev, instance,
1250 len = create_default_scan_rsp_data(hdev, cp.data);
1252 if (hdev->scan_rsp_data_len == len &&
1253 !memcmp(cp.data, hdev->scan_rsp_data, len))
1256 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1257 hdev->scan_rsp_data_len = len;
1261 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1265 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1267 struct adv_info *adv_instance = NULL;
1268 u8 ad_len = 0, flags = 0;
1271 /* Return 0 when the current instance identifier is invalid. */
1273 adv_instance = hci_find_adv_instance(hdev, instance);
1278 instance_flags = get_adv_instance_flags(hdev, instance);
1280 /* If instance already has the flags set skip adding it once
1283 if (adv_instance && eir_get_data(adv_instance->adv_data,
1284 adv_instance->adv_data_len, EIR_FLAGS,
1288 /* The Add Advertising command allows userspace to set both the general
1289 * and limited discoverable flags.
1291 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1292 flags |= LE_AD_GENERAL;
1294 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1295 flags |= LE_AD_LIMITED;
1297 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1298 flags |= LE_AD_NO_BREDR;
1300 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1301 /* If a discovery flag wasn't provided, simply use the global
1305 flags |= mgmt_get_adv_discov_flags(hdev);
1307 /* If flags would still be empty, then there is no need to
1308 * include the "Flags" AD field".
1322 memcpy(ptr, adv_instance->adv_data,
1323 adv_instance->adv_data_len);
1324 ad_len += adv_instance->adv_data_len;
1325 ptr += adv_instance->adv_data_len;
1328 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1331 if (ext_adv_capable(hdev)) {
1333 adv_tx_power = adv_instance->tx_power;
1335 adv_tx_power = hdev->adv_tx_power;
1337 adv_tx_power = hdev->adv_tx_power;
1340 /* Provide Tx Power only if we can provide a valid value for it */
1341 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1343 ptr[1] = EIR_TX_POWER;
1344 ptr[2] = (u8)adv_tx_power;
1354 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1356 struct hci_dev *hdev = req->hdev;
1359 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1362 if (ext_adv_capable(hdev)) {
1363 struct hci_cp_le_set_ext_adv_data cp;
1365 memset(&cp, 0, sizeof(cp));
1367 len = create_instance_adv_data(hdev, instance, cp.data);
1369 /* There's nothing to do if the data hasn't changed */
1370 if (hdev->adv_data_len == len &&
1371 memcmp(cp.data, hdev->adv_data, len) == 0)
1374 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1375 hdev->adv_data_len = len;
1379 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1380 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1382 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1384 struct hci_cp_le_set_adv_data cp;
1386 memset(&cp, 0, sizeof(cp));
1388 len = create_instance_adv_data(hdev, instance, cp.data);
1390 /* There's nothing to do if the data hasn't changed */
1391 if (hdev->adv_data_len == len &&
1392 memcmp(cp.data, hdev->adv_data, len) == 0)
1395 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1396 hdev->adv_data_len = len;
1400 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1404 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1406 struct hci_request req;
1408 hci_req_init(&req, hdev);
1409 __hci_req_update_adv_data(&req, instance);
1411 return hci_req_run(&req, NULL);
1414 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1416 BT_DBG("%s status %u", hdev->name, status);
1419 void hci_req_reenable_advertising(struct hci_dev *hdev)
1421 struct hci_request req;
1423 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1424 list_empty(&hdev->adv_instances))
1427 hci_req_init(&req, hdev);
1429 if (hdev->cur_adv_instance) {
1430 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1433 if (ext_adv_capable(hdev)) {
1434 __hci_req_start_ext_adv(&req, 0x00);
1436 __hci_req_update_adv_data(&req, 0x00);
1437 __hci_req_update_scan_rsp_data(&req, 0x00);
1438 __hci_req_enable_advertising(&req);
1442 hci_req_run(&req, adv_enable_complete);
1445 static void adv_timeout_expire(struct work_struct *work)
1447 struct hci_dev *hdev = container_of(work, struct hci_dev,
1448 adv_instance_expire.work);
1450 struct hci_request req;
1453 BT_DBG("%s", hdev->name);
1457 hdev->adv_instance_timeout = 0;
1459 instance = hdev->cur_adv_instance;
1460 if (instance == 0x00)
1463 hci_req_init(&req, hdev);
1465 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1467 if (list_empty(&hdev->adv_instances))
1468 __hci_req_disable_advertising(&req);
1470 hci_req_run(&req, NULL);
1473 hci_dev_unlock(hdev);
1476 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1477 bool use_rpa, struct adv_info *adv_instance,
1478 u8 *own_addr_type, bdaddr_t *rand_addr)
1482 bacpy(rand_addr, BDADDR_ANY);
1484 /* If privacy is enabled use a resolvable private address. If
1485 * current RPA has expired then generate a new one.
1490 *own_addr_type = ADDR_LE_DEV_RANDOM;
1493 if (!adv_instance->rpa_expired &&
1494 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1497 adv_instance->rpa_expired = false;
1499 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1500 !bacmp(&hdev->random_addr, &hdev->rpa))
1504 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1506 BT_ERR("%s failed to generate new RPA", hdev->name);
1510 bacpy(rand_addr, &hdev->rpa);
1512 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1514 queue_delayed_work(hdev->workqueue,
1515 &adv_instance->rpa_expired_cb, to);
1517 queue_delayed_work(hdev->workqueue,
1518 &hdev->rpa_expired, to);
1523 /* In case of required privacy without resolvable private address,
1524 * use an non-resolvable private address. This is useful for
1525 * non-connectable advertising.
1527 if (require_privacy) {
1531 /* The non-resolvable private address is generated
1532 * from random six bytes with the two most significant
1535 get_random_bytes(&nrpa, 6);
1538 /* The non-resolvable private address shall not be
1539 * equal to the public address.
1541 if (bacmp(&hdev->bdaddr, &nrpa))
1545 *own_addr_type = ADDR_LE_DEV_RANDOM;
1546 bacpy(rand_addr, &nrpa);
1551 /* No privacy so use a public address. */
1552 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1557 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1559 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1562 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1564 struct hci_cp_le_set_ext_adv_params cp;
1565 struct hci_dev *hdev = req->hdev;
1568 bdaddr_t random_addr;
1571 struct adv_info *adv_instance;
1573 /* In ext adv set param interval is 3 octets */
1574 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1577 adv_instance = hci_find_adv_instance(hdev, instance);
1581 adv_instance = NULL;
1584 flags = get_adv_instance_flags(hdev, instance);
1586 /* If the "connectable" instance flag was not set, then choose between
1587 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1589 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1590 mgmt_get_connectable(hdev);
1592 if (!is_advertising_allowed(hdev, connectable))
1595 /* Set require_privacy to true only when non-connectable
1596 * advertising is used. In that case it is fine to use a
1597 * non-resolvable private address.
1599 err = hci_get_random_address(hdev, !connectable,
1600 adv_use_rpa(hdev, flags), adv_instance,
1601 &own_addr_type, &random_addr);
1605 memset(&cp, 0, sizeof(cp));
1607 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1608 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1610 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1614 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1616 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1617 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1619 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1621 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1624 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1626 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1629 cp.own_addr_type = own_addr_type;
1630 cp.channel_map = hdev->le_adv_channel_map;
1632 cp.handle = instance;
1634 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1635 cp.primary_phy = HCI_ADV_PHY_1M;
1636 cp.secondary_phy = HCI_ADV_PHY_2M;
1637 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1638 cp.primary_phy = HCI_ADV_PHY_CODED;
1639 cp.secondary_phy = HCI_ADV_PHY_CODED;
1641 /* In all other cases use 1M */
1642 cp.primary_phy = HCI_ADV_PHY_1M;
1643 cp.secondary_phy = HCI_ADV_PHY_1M;
1646 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1648 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1649 bacmp(&random_addr, BDADDR_ANY)) {
1650 struct hci_cp_le_set_adv_set_rand_addr cp;
1652 /* Check if random address need to be updated */
1654 if (!bacmp(&random_addr, &adv_instance->random_addr))
1657 if (!bacmp(&random_addr, &hdev->random_addr))
1661 memset(&cp, 0, sizeof(cp));
1664 bacpy(&cp.bdaddr, &random_addr);
1667 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1674 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1676 struct hci_dev *hdev = req->hdev;
1677 struct hci_cp_le_set_ext_adv_enable *cp;
1678 struct hci_cp_ext_adv_set *adv_set;
1679 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1680 struct adv_info *adv_instance;
1683 adv_instance = hci_find_adv_instance(hdev, instance);
1687 adv_instance = NULL;
1691 adv_set = (void *) cp->data;
1693 memset(cp, 0, sizeof(*cp));
1696 cp->num_of_sets = 0x01;
1698 memset(adv_set, 0, sizeof(*adv_set));
1700 adv_set->handle = instance;
1702 /* Set duration per instance since controller is responsible for
1705 if (adv_instance && adv_instance->duration) {
1706 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1708 /* Time = N * 10 ms */
1709 adv_set->duration = cpu_to_le16(duration / 10);
1712 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1713 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1719 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1721 struct hci_dev *hdev = req->hdev;
1724 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1725 __hci_req_disable_advertising(req);
1727 err = __hci_req_setup_ext_adv_instance(req, instance);
1731 __hci_req_update_scan_rsp_data(req, instance);
1732 __hci_req_enable_ext_advertising(req, instance);
1737 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1740 struct hci_dev *hdev = req->hdev;
1741 struct adv_info *adv_instance = NULL;
1744 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1745 list_empty(&hdev->adv_instances))
1748 if (hdev->adv_instance_timeout)
1751 adv_instance = hci_find_adv_instance(hdev, instance);
1755 /* A zero timeout means unlimited advertising. As long as there is
1756 * only one instance, duration should be ignored. We still set a timeout
1757 * in case further instances are being added later on.
1759 * If the remaining lifetime of the instance is more than the duration
1760 * then the timeout corresponds to the duration, otherwise it will be
1761 * reduced to the remaining instance lifetime.
1763 if (adv_instance->timeout == 0 ||
1764 adv_instance->duration <= adv_instance->remaining_time)
1765 timeout = adv_instance->duration;
1767 timeout = adv_instance->remaining_time;
1769 /* The remaining time is being reduced unless the instance is being
1770 * advertised without time limit.
1772 if (adv_instance->timeout)
1773 adv_instance->remaining_time =
1774 adv_instance->remaining_time - timeout;
1776 /* Only use work for scheduling instances with legacy advertising */
1777 if (!ext_adv_capable(hdev)) {
1778 hdev->adv_instance_timeout = timeout;
1779 queue_delayed_work(hdev->req_workqueue,
1780 &hdev->adv_instance_expire,
1781 msecs_to_jiffies(timeout * 1000));
1784 /* If we're just re-scheduling the same instance again then do not
1785 * execute any HCI commands. This happens when a single instance is
1788 if (!force && hdev->cur_adv_instance == instance &&
1789 hci_dev_test_flag(hdev, HCI_LE_ADV))
1792 hdev->cur_adv_instance = instance;
1793 if (ext_adv_capable(hdev)) {
1794 __hci_req_start_ext_adv(req, instance);
1796 __hci_req_update_adv_data(req, instance);
1797 __hci_req_update_scan_rsp_data(req, instance);
1798 __hci_req_enable_advertising(req);
1804 static void cancel_adv_timeout(struct hci_dev *hdev)
1806 if (hdev->adv_instance_timeout) {
1807 hdev->adv_instance_timeout = 0;
1808 cancel_delayed_work(&hdev->adv_instance_expire);
1812 /* For a single instance:
1813 * - force == true: The instance will be removed even when its remaining
1814 * lifetime is not zero.
1815 * - force == false: the instance will be deactivated but kept stored unless
1816 * the remaining lifetime is zero.
1818 * For instance == 0x00:
1819 * - force == true: All instances will be removed regardless of their timeout
1821 * - force == false: Only instances that have a timeout will be removed.
1823 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1824 struct hci_request *req, u8 instance,
1827 struct adv_info *adv_instance, *n, *next_instance = NULL;
1831 /* Cancel any timeout concerning the removed instance(s). */
1832 if (!instance || hdev->cur_adv_instance == instance)
1833 cancel_adv_timeout(hdev);
1835 /* Get the next instance to advertise BEFORE we remove
1836 * the current one. This can be the same instance again
1837 * if there is only one instance.
1839 if (instance && hdev->cur_adv_instance == instance)
1840 next_instance = hci_get_next_instance(hdev, instance);
1842 if (instance == 0x00) {
1843 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1845 if (!(force || adv_instance->timeout))
1848 rem_inst = adv_instance->instance;
1849 err = hci_remove_adv_instance(hdev, rem_inst);
1851 mgmt_advertising_removed(sk, hdev, rem_inst);
1854 adv_instance = hci_find_adv_instance(hdev, instance);
1856 if (force || (adv_instance && adv_instance->timeout &&
1857 !adv_instance->remaining_time)) {
1858 /* Don't advertise a removed instance. */
1859 if (next_instance &&
1860 next_instance->instance == instance)
1861 next_instance = NULL;
1863 err = hci_remove_adv_instance(hdev, instance);
1865 mgmt_advertising_removed(sk, hdev, instance);
1869 if (!req || !hdev_is_powered(hdev) ||
1870 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1874 __hci_req_schedule_adv_instance(req, next_instance->instance,
1878 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1880 struct hci_dev *hdev = req->hdev;
1882 /* If we're advertising or initiating an LE connection we can't
1883 * go ahead and change the random address at this time. This is
1884 * because the eventual initiator address used for the
1885 * subsequently created connection will be undefined (some
1886 * controllers use the new address and others the one we had
1887 * when the operation started).
1889 * In this kind of scenario skip the update and let the random
1890 * address be updated at the next cycle.
1892 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1893 hci_lookup_le_connect(hdev)) {
1894 BT_DBG("Deferring random address update");
1895 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1899 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1902 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1903 bool use_rpa, u8 *own_addr_type)
1905 struct hci_dev *hdev = req->hdev;
1908 /* If privacy is enabled use a resolvable private address. If
1909 * current RPA has expired or there is something else than
1910 * the current RPA in use, then generate a new one.
1915 *own_addr_type = ADDR_LE_DEV_RANDOM;
1917 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1918 !bacmp(&hdev->random_addr, &hdev->rpa))
1921 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1923 bt_dev_err(hdev, "failed to generate new RPA");
1927 set_random_addr(req, &hdev->rpa);
1929 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1930 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1935 /* In case of required privacy without resolvable private address,
1936 * use an non-resolvable private address. This is useful for active
1937 * scanning and non-connectable advertising.
1939 if (require_privacy) {
1943 /* The non-resolvable private address is generated
1944 * from random six bytes with the two most significant
1947 get_random_bytes(&nrpa, 6);
1950 /* The non-resolvable private address shall not be
1951 * equal to the public address.
1953 if (bacmp(&hdev->bdaddr, &nrpa))
1957 *own_addr_type = ADDR_LE_DEV_RANDOM;
1958 set_random_addr(req, &nrpa);
1962 /* If forcing static address is in use or there is no public
1963 * address use the static address as random address (but skip
1964 * the HCI command if the current random address is already the
1967 * In case BR/EDR has been disabled on a dual-mode controller
1968 * and a static address has been configured, then use that
1969 * address instead of the public BR/EDR address.
1971 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1972 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1973 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1974 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1975 *own_addr_type = ADDR_LE_DEV_RANDOM;
1976 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1977 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1978 &hdev->static_addr);
1982 /* Neither privacy nor static address is being used so use a
1985 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1990 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1992 struct bdaddr_list *b;
1994 list_for_each_entry(b, &hdev->whitelist, list) {
1995 struct hci_conn *conn;
1997 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2001 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2008 void __hci_req_update_scan(struct hci_request *req)
2010 struct hci_dev *hdev = req->hdev;
2013 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2016 if (!hdev_is_powered(hdev))
2019 if (mgmt_powering_down(hdev))
2022 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2023 disconnected_whitelist_entries(hdev))
2026 scan = SCAN_DISABLED;
2028 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2029 scan |= SCAN_INQUIRY;
2031 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2032 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2035 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2038 static int update_scan(struct hci_request *req, unsigned long opt)
2040 hci_dev_lock(req->hdev);
2041 __hci_req_update_scan(req);
2042 hci_dev_unlock(req->hdev);
2046 static void scan_update_work(struct work_struct *work)
2048 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2050 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2053 static int connectable_update(struct hci_request *req, unsigned long opt)
2055 struct hci_dev *hdev = req->hdev;
2059 __hci_req_update_scan(req);
2061 /* If BR/EDR is not enabled and we disable advertising as a
2062 * by-product of disabling connectable, we need to update the
2063 * advertising flags.
2065 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2066 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2068 /* Update the advertising parameters if necessary */
2069 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2070 !list_empty(&hdev->adv_instances)) {
2071 if (ext_adv_capable(hdev))
2072 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2074 __hci_req_enable_advertising(req);
2077 __hci_update_background_scan(req);
2079 hci_dev_unlock(hdev);
2084 static void connectable_update_work(struct work_struct *work)
2086 struct hci_dev *hdev = container_of(work, struct hci_dev,
2087 connectable_update);
2090 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2091 mgmt_set_connectable_complete(hdev, status);
2094 static u8 get_service_classes(struct hci_dev *hdev)
2096 struct bt_uuid *uuid;
2099 list_for_each_entry(uuid, &hdev->uuids, list)
2100 val |= uuid->svc_hint;
2105 void __hci_req_update_class(struct hci_request *req)
2107 struct hci_dev *hdev = req->hdev;
2110 BT_DBG("%s", hdev->name);
2112 if (!hdev_is_powered(hdev))
2115 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2118 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2121 cod[0] = hdev->minor_class;
2122 cod[1] = hdev->major_class;
2123 cod[2] = get_service_classes(hdev);
2125 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2128 if (memcmp(cod, hdev->dev_class, 3) == 0)
2131 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2134 static void write_iac(struct hci_request *req)
2136 struct hci_dev *hdev = req->hdev;
2137 struct hci_cp_write_current_iac_lap cp;
2139 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2142 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2143 /* Limited discoverable mode */
2144 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2145 cp.iac_lap[0] = 0x00; /* LIAC */
2146 cp.iac_lap[1] = 0x8b;
2147 cp.iac_lap[2] = 0x9e;
2148 cp.iac_lap[3] = 0x33; /* GIAC */
2149 cp.iac_lap[4] = 0x8b;
2150 cp.iac_lap[5] = 0x9e;
2152 /* General discoverable mode */
2154 cp.iac_lap[0] = 0x33; /* GIAC */
2155 cp.iac_lap[1] = 0x8b;
2156 cp.iac_lap[2] = 0x9e;
2159 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2160 (cp.num_iac * 3) + 1, &cp);
2163 static int discoverable_update(struct hci_request *req, unsigned long opt)
2165 struct hci_dev *hdev = req->hdev;
2169 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2171 __hci_req_update_scan(req);
2172 __hci_req_update_class(req);
2175 /* Advertising instances don't use the global discoverable setting, so
2176 * only update AD if advertising was enabled using Set Advertising.
2178 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2179 __hci_req_update_adv_data(req, 0x00);
2181 /* Discoverable mode affects the local advertising
2182 * address in limited privacy mode.
2184 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2185 if (ext_adv_capable(hdev))
2186 __hci_req_start_ext_adv(req, 0x00);
2188 __hci_req_enable_advertising(req);
2192 hci_dev_unlock(hdev);
2197 static void discoverable_update_work(struct work_struct *work)
2199 struct hci_dev *hdev = container_of(work, struct hci_dev,
2200 discoverable_update);
2203 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2204 mgmt_set_discoverable_complete(hdev, status);
2207 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2210 switch (conn->state) {
2213 if (conn->type == AMP_LINK) {
2214 struct hci_cp_disconn_phy_link cp;
2216 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2218 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2221 struct hci_cp_disconnect dc;
2223 dc.handle = cpu_to_le16(conn->handle);
2225 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2228 conn->state = BT_DISCONN;
2232 if (conn->type == LE_LINK) {
2233 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2235 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2237 } else if (conn->type == ACL_LINK) {
2238 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2240 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2245 if (conn->type == ACL_LINK) {
2246 struct hci_cp_reject_conn_req rej;
2248 bacpy(&rej.bdaddr, &conn->dst);
2249 rej.reason = reason;
2251 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2253 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2254 struct hci_cp_reject_sync_conn_req rej;
2256 bacpy(&rej.bdaddr, &conn->dst);
2258 /* SCO rejection has its own limited set of
2259 * allowed error values (0x0D-0x0F) which isn't
2260 * compatible with most values passed to this
2261 * function. To be safe hard-code one of the
2262 * values that's suitable for SCO.
2264 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2266 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2271 conn->state = BT_CLOSED;
2276 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2279 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2282 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2284 struct hci_request req;
2287 hci_req_init(&req, conn->hdev);
2289 __hci_abort_conn(&req, conn, reason);
2291 err = hci_req_run(&req, abort_conn_complete);
2292 if (err && err != -ENODATA) {
2293 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2300 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2302 hci_dev_lock(req->hdev);
2303 __hci_update_background_scan(req);
2304 hci_dev_unlock(req->hdev);
2308 static void bg_scan_update(struct work_struct *work)
2310 struct hci_dev *hdev = container_of(work, struct hci_dev,
2312 struct hci_conn *conn;
2316 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2322 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2324 hci_le_conn_failed(conn, status);
2326 hci_dev_unlock(hdev);
2329 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2331 hci_req_add_le_scan_disable(req);
2335 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2338 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2339 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2340 struct hci_cp_inquiry cp;
2342 BT_DBG("%s", req->hdev->name);
2344 hci_dev_lock(req->hdev);
2345 hci_inquiry_cache_flush(req->hdev);
2346 hci_dev_unlock(req->hdev);
2348 memset(&cp, 0, sizeof(cp));
2350 if (req->hdev->discovery.limited)
2351 memcpy(&cp.lap, liac, sizeof(cp.lap));
2353 memcpy(&cp.lap, giac, sizeof(cp.lap));
2357 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2362 static void le_scan_disable_work(struct work_struct *work)
2364 struct hci_dev *hdev = container_of(work, struct hci_dev,
2365 le_scan_disable.work);
2368 BT_DBG("%s", hdev->name);
2370 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2373 cancel_delayed_work(&hdev->le_scan_restart);
2375 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2377 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2382 hdev->discovery.scan_start = 0;
2384 /* If we were running LE only scan, change discovery state. If
2385 * we were running both LE and BR/EDR inquiry simultaneously,
2386 * and BR/EDR inquiry is already finished, stop discovery,
2387 * otherwise BR/EDR inquiry will stop discovery when finished.
2388 * If we will resolve remote device name, do not change
2392 if (hdev->discovery.type == DISCOV_TYPE_LE)
2393 goto discov_stopped;
2395 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2398 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2399 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2400 hdev->discovery.state != DISCOVERY_RESOLVING)
2401 goto discov_stopped;
2406 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2407 HCI_CMD_TIMEOUT, &status);
2409 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2410 goto discov_stopped;
2417 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2418 hci_dev_unlock(hdev);
2421 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2423 struct hci_dev *hdev = req->hdev;
2425 /* If controller is not scanning we are done. */
2426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2429 hci_req_add_le_scan_disable(req);
2431 if (use_ext_scan(hdev)) {
2432 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2434 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2435 ext_enable_cp.enable = LE_SCAN_ENABLE;
2436 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2438 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2439 sizeof(ext_enable_cp), &ext_enable_cp);
2441 struct hci_cp_le_set_scan_enable cp;
2443 memset(&cp, 0, sizeof(cp));
2444 cp.enable = LE_SCAN_ENABLE;
2445 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2446 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2452 static void le_scan_restart_work(struct work_struct *work)
2454 struct hci_dev *hdev = container_of(work, struct hci_dev,
2455 le_scan_restart.work);
2456 unsigned long timeout, duration, scan_start, now;
2459 BT_DBG("%s", hdev->name);
2461 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2463 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2470 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2471 !hdev->discovery.scan_start)
2474 /* When the scan was started, hdev->le_scan_disable has been queued
2475 * after duration from scan_start. During scan restart this job
2476 * has been canceled, and we need to queue it again after proper
2477 * timeout, to make sure that scan does not run indefinitely.
2479 duration = hdev->discovery.scan_duration;
2480 scan_start = hdev->discovery.scan_start;
2482 if (now - scan_start <= duration) {
2485 if (now >= scan_start)
2486 elapsed = now - scan_start;
2488 elapsed = ULONG_MAX - scan_start + now;
2490 timeout = duration - elapsed;
2495 queue_delayed_work(hdev->req_workqueue,
2496 &hdev->le_scan_disable, timeout);
2499 hci_dev_unlock(hdev);
2502 static int active_scan(struct hci_request *req, unsigned long opt)
2504 uint16_t interval = opt;
2505 struct hci_dev *hdev = req->hdev;
2509 BT_DBG("%s", hdev->name);
2511 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2514 /* Don't let discovery abort an outgoing connection attempt
2515 * that's using directed advertising.
2517 if (hci_lookup_le_connect(hdev)) {
2518 hci_dev_unlock(hdev);
2522 cancel_adv_timeout(hdev);
2523 hci_dev_unlock(hdev);
2525 __hci_req_disable_advertising(req);
2528 /* If controller is scanning, it means the background scanning is
2529 * running. Thus, we should temporarily stop it in order to set the
2530 * discovery scanning parameters.
2532 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2533 hci_req_add_le_scan_disable(req);
2535 /* All active scans will be done with either a resolvable private
2536 * address (when privacy feature has been enabled) or non-resolvable
2539 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2542 own_addr_type = ADDR_LE_DEV_PUBLIC;
2544 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2549 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2553 BT_DBG("%s", req->hdev->name);
2555 err = active_scan(req, opt);
2559 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2562 static void start_discovery(struct hci_dev *hdev, u8 *status)
2564 unsigned long timeout;
2566 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2568 switch (hdev->discovery.type) {
2569 case DISCOV_TYPE_BREDR:
2570 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2571 hci_req_sync(hdev, bredr_inquiry,
2572 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2575 case DISCOV_TYPE_INTERLEAVED:
2576 /* When running simultaneous discovery, the LE scanning time
2577 * should occupy the whole discovery time sine BR/EDR inquiry
2578 * and LE scanning are scheduled by the controller.
2580 * For interleaving discovery in comparison, BR/EDR inquiry
2581 * and LE scanning are done sequentially with separate
2584 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2586 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2587 /* During simultaneous discovery, we double LE scan
2588 * interval. We must leave some time for the controller
2589 * to do BR/EDR inquiry.
2591 hci_req_sync(hdev, interleaved_discov,
2592 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2597 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2598 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2599 HCI_CMD_TIMEOUT, status);
2601 case DISCOV_TYPE_LE:
2602 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2603 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2604 HCI_CMD_TIMEOUT, status);
2607 *status = HCI_ERROR_UNSPECIFIED;
2614 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2616 /* When service discovery is used and the controller has a
2617 * strict duplicate filter, it is important to remember the
2618 * start and duration of the scan. This is required for
2619 * restarting scanning during the discovery phase.
2621 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2622 hdev->discovery.result_filtering) {
2623 hdev->discovery.scan_start = jiffies;
2624 hdev->discovery.scan_duration = timeout;
2627 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2631 bool hci_req_stop_discovery(struct hci_request *req)
2633 struct hci_dev *hdev = req->hdev;
2634 struct discovery_state *d = &hdev->discovery;
2635 struct hci_cp_remote_name_req_cancel cp;
2636 struct inquiry_entry *e;
2639 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2641 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2642 if (test_bit(HCI_INQUIRY, &hdev->flags))
2643 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2645 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2646 cancel_delayed_work(&hdev->le_scan_disable);
2647 hci_req_add_le_scan_disable(req);
2652 /* Passive scanning */
2653 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2654 hci_req_add_le_scan_disable(req);
2659 /* No further actions needed for LE-only discovery */
2660 if (d->type == DISCOV_TYPE_LE)
2663 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2664 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2669 bacpy(&cp.bdaddr, &e->data.bdaddr);
2670 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2678 static int stop_discovery(struct hci_request *req, unsigned long opt)
2680 hci_dev_lock(req->hdev);
2681 hci_req_stop_discovery(req);
2682 hci_dev_unlock(req->hdev);
2687 static void discov_update(struct work_struct *work)
2689 struct hci_dev *hdev = container_of(work, struct hci_dev,
2693 switch (hdev->discovery.state) {
2694 case DISCOVERY_STARTING:
2695 start_discovery(hdev, &status);
2696 mgmt_start_discovery_complete(hdev, status);
2698 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2700 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2702 case DISCOVERY_STOPPING:
2703 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2704 mgmt_stop_discovery_complete(hdev, status);
2706 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2708 case DISCOVERY_STOPPED:
2714 static void discov_off(struct work_struct *work)
2716 struct hci_dev *hdev = container_of(work, struct hci_dev,
2719 BT_DBG("%s", hdev->name);
2723 /* When discoverable timeout triggers, then just make sure
2724 * the limited discoverable flag is cleared. Even in the case
2725 * of a timeout triggered from general discoverable, it is
2726 * safe to unconditionally clear the flag.
2728 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2729 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2730 hdev->discov_timeout = 0;
2732 hci_dev_unlock(hdev);
2734 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2735 mgmt_new_settings(hdev);
2738 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2740 struct hci_dev *hdev = req->hdev;
2745 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2746 !lmp_host_ssp_capable(hdev)) {
2749 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2751 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2754 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2755 sizeof(support), &support);
2759 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2760 lmp_bredr_capable(hdev)) {
2761 struct hci_cp_write_le_host_supported cp;
2766 /* Check first if we already have the right
2767 * host state (host features set)
2769 if (cp.le != lmp_host_le_capable(hdev) ||
2770 cp.simul != lmp_host_le_br_capable(hdev))
2771 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2775 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2776 /* Make sure the controller has a good default for
2777 * advertising data. This also applies to the case
2778 * where BR/EDR was toggled during the AUTO_OFF phase.
2780 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2781 list_empty(&hdev->adv_instances)) {
2784 if (ext_adv_capable(hdev)) {
2785 err = __hci_req_setup_ext_adv_instance(req,
2788 __hci_req_update_scan_rsp_data(req,
2792 __hci_req_update_adv_data(req, 0x00);
2793 __hci_req_update_scan_rsp_data(req, 0x00);
2796 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2797 if (!ext_adv_capable(hdev))
2798 __hci_req_enable_advertising(req);
2800 __hci_req_enable_ext_advertising(req,
2803 } else if (!list_empty(&hdev->adv_instances)) {
2804 struct adv_info *adv_instance;
2806 adv_instance = list_first_entry(&hdev->adv_instances,
2807 struct adv_info, list);
2808 __hci_req_schedule_adv_instance(req,
2809 adv_instance->instance,
2814 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2815 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2816 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2817 sizeof(link_sec), &link_sec);
2819 if (lmp_bredr_capable(hdev)) {
2820 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2821 __hci_req_write_fast_connectable(req, true);
2823 __hci_req_write_fast_connectable(req, false);
2824 __hci_req_update_scan(req);
2825 __hci_req_update_class(req);
2826 __hci_req_update_name(req);
2827 __hci_req_update_eir(req);
2830 hci_dev_unlock(hdev);
2834 int __hci_req_hci_power_on(struct hci_dev *hdev)
2836 /* Register the available SMP channels (BR/EDR and LE) only when
2837 * successfully powering on the controller. This late
2838 * registration is required so that LE SMP can clearly decide if
2839 * the public address or static address is used.
2843 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2847 void hci_request_setup(struct hci_dev *hdev)
2849 INIT_WORK(&hdev->discov_update, discov_update);
2850 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2851 INIT_WORK(&hdev->scan_update, scan_update_work);
2852 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2853 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2854 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2855 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2856 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2857 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2860 void hci_request_cancel_all(struct hci_dev *hdev)
2862 hci_req_sync_cancel(hdev, ENODEV);
2864 cancel_work_sync(&hdev->discov_update);
2865 cancel_work_sync(&hdev->bg_scan_update);
2866 cancel_work_sync(&hdev->scan_update);
2867 cancel_work_sync(&hdev->connectable_update);
2868 cancel_work_sync(&hdev->discoverable_update);
2869 cancel_delayed_work_sync(&hdev->discov_off);
2870 cancel_delayed_work_sync(&hdev->le_scan_disable);
2871 cancel_delayed_work_sync(&hdev->le_scan_restart);
2873 if (hdev->adv_instance_timeout) {
2874 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2875 hdev->adv_instance_timeout = 0;