2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
116 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
130 struct hci_request req;
134 BT_DBG("%s", hdev->name);
136 hci_req_init(&req, hdev);
138 hci_req_add_ev(&req, opcode, plen, param, event);
140 hdev->req_status = HCI_REQ_PEND;
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
152 switch (hdev->req_status) {
154 err = -bt_to_errno(hdev->req_result);
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
166 hdev->req_status = hdev->req_result = 0;
168 hdev->req_skb = NULL;
170 BT_DBG("%s end: err %d", hdev->name, err);
178 return ERR_PTR(-ENODATA);
182 EXPORT_SYMBOL(__hci_cmd_sync_ev);
184 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
189 EXPORT_SYMBOL(__hci_cmd_sync);
191 /* Execute request and wait for completion. */
192 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
194 unsigned long opt, u32 timeout, u8 *hci_status)
196 struct hci_request req;
199 BT_DBG("%s start", hdev->name);
201 hci_req_init(&req, hdev);
203 hdev->req_status = HCI_REQ_PEND;
205 err = func(&req, opt);
208 *hci_status = HCI_ERROR_UNSPECIFIED;
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
221 if (err == -ENODATA) {
228 *hci_status = HCI_ERROR_UNSPECIFIED;
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
236 if (err == -ERESTARTSYS)
239 switch (hdev->req_status) {
241 err = -bt_to_errno(hdev->req_result);
243 *hci_status = hdev->req_result;
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
249 *hci_status = HCI_ERROR_UNSPECIFIED;
255 *hci_status = HCI_ERROR_UNSPECIFIED;
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
263 BT_DBG("%s end: err %d", hdev->name, err);
268 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt, u32 timeout, u8 *hci_status)
274 /* Serialize all requests */
275 hci_req_sync_lock(hdev);
276 /* check the state after obtaing the lock to protect the HCI_UP
277 * against any races from hci_dev_do_close when the controller
280 if (test_bit(HCI_UP, &hdev->flags))
281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 hci_req_sync_unlock(hdev);
289 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
300 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
305 skb_put_data(skb, param, plen);
307 BT_DBG("skb len %d", skb->len);
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
315 /* Queue a command to an asynchronous HCI request */
316 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
319 struct hci_dev *hdev = req->hdev;
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
332 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
338 if (skb_queue_empty(&req->cmd_q))
339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
341 bt_cb(skb)->hci.req_event = event;
343 skb_queue_tail(&req->cmd_q, skb);
346 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 hci_req_add_ev(req, opcode, plen, param, 0);
352 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
365 type = PAGE_SCAN_TYPE_INTERLACED;
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
370 type = hdev->def_page_scan_type;
371 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
374 acp.window = cpu_to_le16(hdev->def_page_scan_window);
376 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
377 __cpu_to_le16(hdev->page_scan_window) != acp.window)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 if (hdev->page_scan_type != type)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385 static void start_interleave_scan(struct hci_dev *hdev)
387 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
388 queue_delayed_work(hdev->req_workqueue,
389 &hdev->interleave_scan, 0);
392 static bool is_interleave_scanning(struct hci_dev *hdev)
394 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
397 static void cancel_interleave_scan(struct hci_dev *hdev)
399 bt_dev_dbg(hdev, "cancelling interleave scan");
401 cancel_delayed_work_sync(&hdev->interleave_scan);
403 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
406 /* Return true if interleave_scan wasn't started until exiting this function,
407 * otherwise, return false
409 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
411 /* If there is at least one ADV monitors and one pending LE connection
412 * or one device to be scanned for, we should alternate between
413 * allowlist scan and one without any filters to save power.
415 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
416 !(list_empty(&hdev->pend_le_conns) &&
417 list_empty(&hdev->pend_le_reports));
418 bool is_interleaving = is_interleave_scanning(hdev);
420 if (use_interleaving && !is_interleaving) {
421 start_interleave_scan(hdev);
422 bt_dev_dbg(hdev, "starting interleave scan");
426 if (!use_interleaving && is_interleaving)
427 cancel_interleave_scan(hdev);
432 /* This function controls the background scanning based on hdev->pend_le_conns
433 * list. If there are pending LE connection we start the background scanning,
434 * otherwise we stop it.
436 * This function requires the caller holds hdev->lock.
438 static void __hci_update_background_scan(struct hci_request *req)
440 struct hci_dev *hdev = req->hdev;
442 if (!test_bit(HCI_UP, &hdev->flags) ||
443 test_bit(HCI_INIT, &hdev->flags) ||
444 hci_dev_test_flag(hdev, HCI_SETUP) ||
445 hci_dev_test_flag(hdev, HCI_CONFIG) ||
446 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
447 hci_dev_test_flag(hdev, HCI_UNREGISTER))
450 /* No point in doing scanning if LE support hasn't been enabled */
451 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
454 /* If discovery is active don't interfere with it */
455 if (hdev->discovery.state != DISCOVERY_STOPPED)
458 /* Reset RSSI and UUID filters when starting background scanning
459 * since these filters are meant for service discovery only.
461 * The Start Discovery and Start Service Discovery operations
462 * ensure to set proper values for RSSI threshold and UUID
463 * filter list. So it is safe to just reset them here.
465 hci_discovery_filter_clear(hdev);
467 BT_DBG("%s ADV monitoring is %s", hdev->name,
468 hci_is_adv_monitoring(hdev) ? "on" : "off");
470 if (list_empty(&hdev->pend_le_conns) &&
471 list_empty(&hdev->pend_le_reports) &&
472 !hci_is_adv_monitoring(hdev)) {
473 /* If there is no pending LE connections or devices
474 * to be scanned for or no ADV monitors, we should stop the
475 * background scanning.
478 /* If controller is not scanning we are done. */
479 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
482 hci_req_add_le_scan_disable(req, false);
484 BT_DBG("%s stopping background scanning", hdev->name);
486 /* If there is at least one pending LE connection, we should
487 * keep the background scan running.
490 /* If controller is connecting, we should not start scanning
491 * since some controllers are not able to scan and connect at
494 if (hci_lookup_le_connect(hdev))
497 /* If controller is currently scanning, we stop it to ensure we
498 * don't miss any advertising (due to duplicates filter).
500 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
501 hci_req_add_le_scan_disable(req, false);
503 hci_req_add_le_passive_scan(req);
504 bt_dev_dbg(hdev, "starting background scanning");
508 void __hci_req_update_name(struct hci_request *req)
510 struct hci_dev *hdev = req->hdev;
511 struct hci_cp_write_local_name cp;
513 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
515 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
518 #define PNP_INFO_SVCLASS_ID 0x1200
520 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
522 u8 *ptr = data, *uuids_start = NULL;
523 struct bt_uuid *uuid;
528 list_for_each_entry(uuid, &hdev->uuids, list) {
531 if (uuid->size != 16)
534 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
538 if (uuid16 == PNP_INFO_SVCLASS_ID)
544 uuids_start[1] = EIR_UUID16_ALL;
548 /* Stop if not enough space to put next UUID */
549 if ((ptr - data) + sizeof(u16) > len) {
550 uuids_start[1] = EIR_UUID16_SOME;
554 *ptr++ = (uuid16 & 0x00ff);
555 *ptr++ = (uuid16 & 0xff00) >> 8;
556 uuids_start[0] += sizeof(uuid16);
562 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
564 u8 *ptr = data, *uuids_start = NULL;
565 struct bt_uuid *uuid;
570 list_for_each_entry(uuid, &hdev->uuids, list) {
571 if (uuid->size != 32)
577 uuids_start[1] = EIR_UUID32_ALL;
581 /* Stop if not enough space to put next UUID */
582 if ((ptr - data) + sizeof(u32) > len) {
583 uuids_start[1] = EIR_UUID32_SOME;
587 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
589 uuids_start[0] += sizeof(u32);
595 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
597 u8 *ptr = data, *uuids_start = NULL;
598 struct bt_uuid *uuid;
603 list_for_each_entry(uuid, &hdev->uuids, list) {
604 if (uuid->size != 128)
610 uuids_start[1] = EIR_UUID128_ALL;
614 /* Stop if not enough space to put next UUID */
615 if ((ptr - data) + 16 > len) {
616 uuids_start[1] = EIR_UUID128_SOME;
620 memcpy(ptr, uuid->uuid, 16);
622 uuids_start[0] += 16;
628 static void create_eir(struct hci_dev *hdev, u8 *data)
633 name_len = strlen(hdev->dev_name);
639 ptr[1] = EIR_NAME_SHORT;
641 ptr[1] = EIR_NAME_COMPLETE;
643 /* EIR Data length */
644 ptr[0] = name_len + 1;
646 memcpy(ptr + 2, hdev->dev_name, name_len);
648 ptr += (name_len + 2);
651 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
653 ptr[1] = EIR_TX_POWER;
654 ptr[2] = (u8) hdev->inq_tx_power;
659 if (hdev->devid_source > 0) {
661 ptr[1] = EIR_DEVICE_ID;
663 put_unaligned_le16(hdev->devid_source, ptr + 2);
664 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
665 put_unaligned_le16(hdev->devid_product, ptr + 6);
666 put_unaligned_le16(hdev->devid_version, ptr + 8);
671 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
672 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
673 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
676 void __hci_req_update_eir(struct hci_request *req)
678 struct hci_dev *hdev = req->hdev;
679 struct hci_cp_write_eir cp;
681 if (!hdev_is_powered(hdev))
684 if (!lmp_ext_inq_capable(hdev))
687 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
690 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
693 memset(&cp, 0, sizeof(cp));
695 create_eir(hdev, cp.data);
697 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
700 memcpy(hdev->eir, cp.data, sizeof(cp.data));
702 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
705 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
707 struct hci_dev *hdev = req->hdev;
709 if (hdev->scanning_paused) {
710 bt_dev_dbg(hdev, "Scanning is paused for suspend");
714 if (use_ext_scan(hdev)) {
715 struct hci_cp_le_set_ext_scan_enable cp;
717 memset(&cp, 0, sizeof(cp));
718 cp.enable = LE_SCAN_DISABLE;
719 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
722 struct hci_cp_le_set_scan_enable cp;
724 memset(&cp, 0, sizeof(cp));
725 cp.enable = LE_SCAN_DISABLE;
726 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
729 /* Disable address resolution */
730 if (use_ll_privacy(hdev) &&
731 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
732 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
735 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
739 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
742 struct hci_cp_le_del_from_accept_list cp;
744 cp.bdaddr_type = bdaddr_type;
745 bacpy(&cp.bdaddr, bdaddr);
747 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
749 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
751 if (use_ll_privacy(req->hdev) &&
752 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
755 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
757 struct hci_cp_le_del_from_resolv_list cp;
759 cp.bdaddr_type = bdaddr_type;
760 bacpy(&cp.bdaddr, bdaddr);
762 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
768 /* Adds connection to accept list if needed. On error, returns -1. */
769 static int add_to_accept_list(struct hci_request *req,
770 struct hci_conn_params *params, u8 *num_entries,
773 struct hci_cp_le_add_to_accept_list cp;
774 struct hci_dev *hdev = req->hdev;
776 /* Already in accept list */
777 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
781 /* Select filter policy to accept all advertising */
782 if (*num_entries >= hdev->le_accept_list_size)
785 /* Accept list can not be used with RPAs */
787 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
788 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
792 /* During suspend, only wakeable devices can be in accept list */
793 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
794 params->current_flags))
798 cp.bdaddr_type = params->addr_type;
799 bacpy(&cp.bdaddr, ¶ms->addr);
801 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
803 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
805 if (use_ll_privacy(hdev) &&
806 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
809 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
812 struct hci_cp_le_add_to_resolv_list cp;
814 cp.bdaddr_type = params->addr_type;
815 bacpy(&cp.bdaddr, ¶ms->addr);
816 memcpy(cp.peer_irk, irk->val, 16);
818 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
819 memcpy(cp.local_irk, hdev->irk, 16);
821 memset(cp.local_irk, 0, 16);
823 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
831 static u8 update_accept_list(struct hci_request *req)
833 struct hci_dev *hdev = req->hdev;
834 struct hci_conn_params *params;
835 struct bdaddr_list *b;
837 bool pend_conn, pend_report;
838 /* We allow usage of accept list even with RPAs in suspend. In the worst
839 * case, we won't be able to wake from devices that use the privacy1.2
840 * features. Additionally, once we support privacy1.2 and IRK
841 * offloading, we can update this to also check for those conditions.
843 bool allow_rpa = hdev->suspended;
845 if (use_ll_privacy(hdev) &&
846 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
849 /* Go through the current accept list programmed into the
850 * controller one by one and check if that address is still
851 * in the list of pending connections or list of devices to
852 * report. If not present in either list, then queue the
853 * command to remove it from the controller.
855 list_for_each_entry(b, &hdev->le_accept_list, list) {
856 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
859 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
863 /* If the device is not likely to connect or report,
864 * remove it from the accept list.
866 if (!pend_conn && !pend_report) {
867 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
871 /* Accept list can not be used with RPAs */
873 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
874 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
881 /* Since all no longer valid accept list entries have been
882 * removed, walk through the list of pending connections
883 * and ensure that any new device gets programmed into
886 * If the list of the devices is larger than the list of
887 * available accept list entries in the controller, then
888 * just abort and return filer policy value to not use the
891 list_for_each_entry(params, &hdev->pend_le_conns, action) {
892 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
896 /* After adding all new pending connections, walk through
897 * the list of pending reports and also add these to the
898 * accept list if there is still space. Abort if space runs out.
900 list_for_each_entry(params, &hdev->pend_le_reports, action) {
901 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
905 /* Use the allowlist unless the following conditions are all true:
906 * - We are not currently suspending
907 * - There are 1 or more ADV monitors registered
908 * - Interleaved scanning is not currently using the allowlist
910 * Once the controller offloading of advertisement monitor is in place,
911 * the above condition should include the support of MSFT extension
914 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
915 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
918 /* Select filter policy to use accept list */
922 static bool scan_use_rpa(struct hci_dev *hdev)
924 return hci_dev_test_flag(hdev, HCI_PRIVACY);
927 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
928 u16 window, u8 own_addr_type, u8 filter_policy,
931 struct hci_dev *hdev = req->hdev;
933 if (hdev->scanning_paused) {
934 bt_dev_dbg(hdev, "Scanning is paused for suspend");
938 if (use_ll_privacy(hdev) &&
939 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
943 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
946 /* Use ext scanning if set ext scan param and ext scan enable is
949 if (use_ext_scan(hdev)) {
950 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
951 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
952 struct hci_cp_le_scan_phy_params *phy_params;
953 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
956 ext_param_cp = (void *)data;
957 phy_params = (void *)ext_param_cp->data;
959 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
960 ext_param_cp->own_addr_type = own_addr_type;
961 ext_param_cp->filter_policy = filter_policy;
963 plen = sizeof(*ext_param_cp);
965 if (scan_1m(hdev) || scan_2m(hdev)) {
966 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
968 memset(phy_params, 0, sizeof(*phy_params));
969 phy_params->type = type;
970 phy_params->interval = cpu_to_le16(interval);
971 phy_params->window = cpu_to_le16(window);
973 plen += sizeof(*phy_params);
977 if (scan_coded(hdev)) {
978 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
980 memset(phy_params, 0, sizeof(*phy_params));
981 phy_params->type = type;
982 phy_params->interval = cpu_to_le16(interval);
983 phy_params->window = cpu_to_le16(window);
985 plen += sizeof(*phy_params);
989 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
992 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
993 ext_enable_cp.enable = LE_SCAN_ENABLE;
994 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
996 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
997 sizeof(ext_enable_cp), &ext_enable_cp);
999 struct hci_cp_le_set_scan_param param_cp;
1000 struct hci_cp_le_set_scan_enable enable_cp;
1002 memset(¶m_cp, 0, sizeof(param_cp));
1003 param_cp.type = type;
1004 param_cp.interval = cpu_to_le16(interval);
1005 param_cp.window = cpu_to_le16(window);
1006 param_cp.own_address_type = own_addr_type;
1007 param_cp.filter_policy = filter_policy;
1008 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1011 memset(&enable_cp, 0, sizeof(enable_cp));
1012 enable_cp.enable = LE_SCAN_ENABLE;
1013 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1014 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1019 /* Returns true if an le connection is in the scanning state */
1020 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1022 struct hci_conn_hash *h = &hdev->conn_hash;
1027 list_for_each_entry_rcu(c, &h->list, list) {
1028 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1029 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1040 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1041 * controller based address resolution to be able to reconfigure
1044 void hci_req_add_le_passive_scan(struct hci_request *req)
1046 struct hci_dev *hdev = req->hdev;
1049 u16 window, interval;
1050 /* Background scanning should run with address resolution */
1051 bool addr_resolv = true;
1053 if (hdev->scanning_paused) {
1054 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1058 /* Set require_privacy to false since no SCAN_REQ are send
1059 * during passive scanning. Not using an non-resolvable address
1060 * here is important so that peer devices using direct
1061 * advertising with our address will be correctly reported
1062 * by the controller.
1064 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1068 if (__hci_update_interleaved_scan(hdev))
1071 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1072 /* Adding or removing entries from the accept list must
1073 * happen before enabling scanning. The controller does
1074 * not allow accept list modification while scanning.
1076 filter_policy = update_accept_list(req);
1078 /* When the controller is using random resolvable addresses and
1079 * with that having LE privacy enabled, then controllers with
1080 * Extended Scanner Filter Policies support can now enable support
1081 * for handling directed advertising.
1083 * So instead of using filter polices 0x00 (no accept list)
1084 * and 0x01 (accept list enabled) use the new filter policies
1085 * 0x02 (no accept list) and 0x03 (accept list enabled).
1087 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1088 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1089 filter_policy |= 0x02;
1091 if (hdev->suspended) {
1092 window = hdev->le_scan_window_suspend;
1093 interval = hdev->le_scan_int_suspend;
1094 } else if (hci_is_le_conn_scanning(hdev)) {
1095 window = hdev->le_scan_window_connect;
1096 interval = hdev->le_scan_int_connect;
1097 } else if (hci_is_adv_monitoring(hdev)) {
1098 window = hdev->le_scan_window_adv_monitor;
1099 interval = hdev->le_scan_int_adv_monitor;
1101 window = hdev->le_scan_window;
1102 interval = hdev->le_scan_interval;
1105 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1107 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1108 own_addr_type, filter_policy, addr_resolv);
1111 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1113 struct adv_info *adv_instance;
1115 /* Instance 0x00 always set local name */
1116 if (instance == 0x00)
1119 adv_instance = hci_find_adv_instance(hdev, instance);
1123 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1124 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1127 return adv_instance->scan_rsp_len;
1130 static void hci_req_clear_event_filter(struct hci_request *req)
1132 struct hci_cp_set_event_filter f;
1134 memset(&f, 0, sizeof(f));
1135 f.flt_type = HCI_FLT_CLEAR_ALL;
1136 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1138 /* Update page scan state (since we may have modified it when setting
1139 * the event filter).
1141 __hci_req_update_scan(req);
1144 static void hci_req_set_event_filter(struct hci_request *req)
1146 struct bdaddr_list_with_flags *b;
1147 struct hci_cp_set_event_filter f;
1148 struct hci_dev *hdev = req->hdev;
1149 u8 scan = SCAN_DISABLED;
1151 /* Always clear event filter when starting */
1152 hci_req_clear_event_filter(req);
1154 list_for_each_entry(b, &hdev->accept_list, list) {
1155 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1159 memset(&f, 0, sizeof(f));
1160 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1161 f.flt_type = HCI_FLT_CONN_SETUP;
1162 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1163 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1165 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1166 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1170 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1173 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1175 /* Before changing params disable scan if enabled */
1176 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1177 hci_req_add_le_scan_disable(req, false);
1179 /* Configure params and enable scanning */
1180 hci_req_add_le_passive_scan(req);
1182 /* Block suspend notifier on response */
1183 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1186 static void cancel_adv_timeout(struct hci_dev *hdev)
1188 if (hdev->adv_instance_timeout) {
1189 hdev->adv_instance_timeout = 0;
1190 cancel_delayed_work(&hdev->adv_instance_expire);
1194 /* This function requires the caller holds hdev->lock */
1195 static void hci_suspend_adv_instances(struct hci_request *req)
1197 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1199 /* Call to disable any advertisements active on the controller.
1200 * This will succeed even if no advertisements are configured.
1202 __hci_req_disable_advertising(req);
1204 /* If we are using software rotation, pause the loop */
1205 if (!ext_adv_capable(req->hdev))
1206 cancel_adv_timeout(req->hdev);
1209 /* This function requires the caller holds hdev->lock */
1210 static void hci_resume_adv_instances(struct hci_request *req)
1212 struct adv_info *adv;
1214 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1216 if (ext_adv_capable(req->hdev)) {
1217 /* Call for each tracked instance to be re-enabled */
1218 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1219 __hci_req_enable_ext_advertising(req,
1224 /* Schedule for most recent instance to be restarted and begin
1225 * the software rotation loop
1227 __hci_req_schedule_adv_instance(req,
1228 req->hdev->cur_adv_instance,
1233 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1235 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1237 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1238 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1239 wake_up(&hdev->suspend_wait_q);
1243 /* Call with hci_dev_lock */
1244 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1247 struct hci_conn *conn;
1248 struct hci_request req;
1250 int disconnect_counter;
1252 if (next == hdev->suspend_state) {
1253 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1257 hdev->suspend_state = next;
1258 hci_req_init(&req, hdev);
1260 if (next == BT_SUSPEND_DISCONNECT) {
1261 /* Mark device as suspended */
1262 hdev->suspended = true;
1264 /* Pause discovery if not already stopped */
1265 old_state = hdev->discovery.state;
1266 if (old_state != DISCOVERY_STOPPED) {
1267 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1268 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1269 queue_work(hdev->req_workqueue, &hdev->discov_update);
1272 hdev->discovery_paused = true;
1273 hdev->discovery_old_state = old_state;
1275 /* Stop directed advertising */
1276 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1278 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1279 cancel_delayed_work(&hdev->discov_off);
1280 queue_delayed_work(hdev->req_workqueue,
1281 &hdev->discov_off, 0);
1284 /* Pause other advertisements */
1285 if (hdev->adv_instance_cnt)
1286 hci_suspend_adv_instances(&req);
1288 hdev->advertising_paused = true;
1289 hdev->advertising_old_state = old_state;
1290 /* Disable page scan */
1291 page_scan = SCAN_DISABLED;
1292 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1294 /* Disable LE passive scan if enabled */
1295 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1296 hci_req_add_le_scan_disable(&req, false);
1298 /* Mark task needing completion */
1299 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1301 /* Prevent disconnects from causing scanning to be re-enabled */
1302 hdev->scanning_paused = true;
1304 /* Run commands before disconnecting */
1305 hci_req_run(&req, suspend_req_complete);
1307 disconnect_counter = 0;
1308 /* Soft disconnect everything (power off) */
1309 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1310 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1311 disconnect_counter++;
1314 if (disconnect_counter > 0) {
1316 "Had %d disconnects. Will wait on them",
1317 disconnect_counter);
1318 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1320 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1321 /* Unpause to take care of updating scanning params */
1322 hdev->scanning_paused = false;
1323 /* Enable event filter for paired devices */
1324 hci_req_set_event_filter(&req);
1325 /* Enable passive scan at lower duty cycle */
1326 hci_req_config_le_suspend_scan(&req);
1327 /* Pause scan changes again. */
1328 hdev->scanning_paused = true;
1329 hci_req_run(&req, suspend_req_complete);
1331 hdev->suspended = false;
1332 hdev->scanning_paused = false;
1334 hci_req_clear_event_filter(&req);
1335 /* Reset passive/background scanning to normal */
1336 hci_req_config_le_suspend_scan(&req);
1338 /* Unpause directed advertising */
1339 hdev->advertising_paused = false;
1340 if (hdev->advertising_old_state) {
1341 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1342 hdev->suspend_tasks);
1343 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1344 queue_work(hdev->req_workqueue,
1345 &hdev->discoverable_update);
1346 hdev->advertising_old_state = 0;
1349 /* Resume other advertisements */
1350 if (hdev->adv_instance_cnt)
1351 hci_resume_adv_instances(&req);
1353 /* Unpause discovery */
1354 hdev->discovery_paused = false;
1355 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1356 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1357 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1358 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1359 queue_work(hdev->req_workqueue, &hdev->discov_update);
1362 hci_req_run(&req, suspend_req_complete);
1365 hdev->suspend_state = next;
1368 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1369 wake_up(&hdev->suspend_wait_q);
1372 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1374 u8 instance = hdev->cur_adv_instance;
1375 struct adv_info *adv_instance;
1377 /* Instance 0x00 always set local name */
1378 if (instance == 0x00)
1381 adv_instance = hci_find_adv_instance(hdev, instance);
1385 /* TODO: Take into account the "appearance" and "local-name" flags here.
1386 * These are currently being ignored as they are not supported.
1388 return adv_instance->scan_rsp_len;
1391 void __hci_req_disable_advertising(struct hci_request *req)
1393 if (ext_adv_capable(req->hdev)) {
1394 __hci_req_disable_ext_adv_instance(req, 0x00);
1399 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1403 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1406 struct adv_info *adv_instance;
1408 if (instance == 0x00) {
1409 /* Instance 0 always manages the "Tx Power" and "Flags"
1412 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1414 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1415 * corresponds to the "connectable" instance flag.
1417 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1418 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1420 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1421 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1422 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1423 flags |= MGMT_ADV_FLAG_DISCOV;
1428 adv_instance = hci_find_adv_instance(hdev, instance);
1430 /* Return 0 when we got an invalid instance identifier. */
1434 return adv_instance->flags;
1437 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1439 /* If privacy is not enabled don't use RPA */
1440 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1443 /* If basic privacy mode is enabled use RPA */
1444 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1447 /* If limited privacy mode is enabled don't use RPA if we're
1448 * both discoverable and bondable.
1450 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1451 hci_dev_test_flag(hdev, HCI_BONDABLE))
1454 /* We're neither bondable nor discoverable in the limited
1455 * privacy mode, therefore use RPA.
1460 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1462 /* If there is no connection we are OK to advertise. */
1463 if (hci_conn_num(hdev, LE_LINK) == 0)
1466 /* Check le_states if there is any connection in slave role. */
1467 if (hdev->conn_hash.le_num_slave > 0) {
1468 /* Slave connection state and non connectable mode bit 20. */
1469 if (!connectable && !(hdev->le_states[2] & 0x10))
1472 /* Slave connection state and connectable mode bit 38
1473 * and scannable bit 21.
1475 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1476 !(hdev->le_states[2] & 0x20)))
1480 /* Check le_states if there is any connection in master role. */
1481 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1482 /* Master connection state and non connectable mode bit 18. */
1483 if (!connectable && !(hdev->le_states[2] & 0x02))
1486 /* Master connection state and connectable mode bit 35 and
1489 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1490 !(hdev->le_states[2] & 0x08)))
1497 void __hci_req_enable_advertising(struct hci_request *req)
1499 struct hci_dev *hdev = req->hdev;
1500 struct hci_cp_le_set_adv_param cp;
1501 u8 own_addr_type, enable = 0x01;
1503 u16 adv_min_interval, adv_max_interval;
1506 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1508 /* If the "connectable" instance flag was not set, then choose between
1509 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1511 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1512 mgmt_get_connectable(hdev);
1514 if (!is_advertising_allowed(hdev, connectable))
1517 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1518 __hci_req_disable_advertising(req);
1520 /* Clear the HCI_LE_ADV bit temporarily so that the
1521 * hci_update_random_address knows that it's safe to go ahead
1522 * and write a new random address. The flag will be set back on
1523 * as soon as the SET_ADV_ENABLE HCI command completes.
1525 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1527 /* Set require_privacy to true only when non-connectable
1528 * advertising is used. In that case it is fine to use a
1529 * non-resolvable private address.
1531 if (hci_update_random_address(req, !connectable,
1532 adv_use_rpa(hdev, flags),
1533 &own_addr_type) < 0)
1536 memset(&cp, 0, sizeof(cp));
1539 cp.type = LE_ADV_IND;
1541 adv_min_interval = hdev->le_adv_min_interval;
1542 adv_max_interval = hdev->le_adv_max_interval;
1544 if (get_cur_adv_instance_scan_rsp_len(hdev))
1545 cp.type = LE_ADV_SCAN_IND;
1547 cp.type = LE_ADV_NONCONN_IND;
1549 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1550 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1551 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1552 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1554 adv_min_interval = hdev->le_adv_min_interval;
1555 adv_max_interval = hdev->le_adv_max_interval;
1559 cp.min_interval = cpu_to_le16(adv_min_interval);
1560 cp.max_interval = cpu_to_le16(adv_max_interval);
1561 cp.own_address_type = own_addr_type;
1562 cp.channel_map = hdev->le_adv_channel_map;
1564 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1566 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1569 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1572 size_t complete_len;
1574 /* no space left for name (+ NULL + type + len) */
1575 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1578 /* use complete name if present and fits */
1579 complete_len = strlen(hdev->dev_name);
1580 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1581 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1582 hdev->dev_name, complete_len + 1);
1584 /* use short name if present */
1585 short_len = strlen(hdev->short_name);
1587 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1588 hdev->short_name, short_len + 1);
1590 /* use shortened full name if present, we already know that name
1591 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1594 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1596 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1597 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1599 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1606 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1608 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1611 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1613 u8 scan_rsp_len = 0;
1615 if (hdev->appearance) {
1616 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1619 return append_local_name(hdev, ptr, scan_rsp_len);
1622 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1625 struct adv_info *adv_instance;
1627 u8 scan_rsp_len = 0;
1629 adv_instance = hci_find_adv_instance(hdev, instance);
1633 instance_flags = adv_instance->flags;
1635 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1636 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1639 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1640 adv_instance->scan_rsp_len);
1642 scan_rsp_len += adv_instance->scan_rsp_len;
1644 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1645 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1647 return scan_rsp_len;
1650 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1652 struct hci_dev *hdev = req->hdev;
1655 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1658 if (ext_adv_capable(hdev)) {
1660 struct hci_cp_le_set_ext_scan_rsp_data cp;
1661 u8 data[HCI_MAX_EXT_AD_LENGTH];
1664 memset(&pdu, 0, sizeof(pdu));
1667 len = create_instance_scan_rsp_data(hdev, instance,
1670 len = create_default_scan_rsp_data(hdev, pdu.data);
1672 if (hdev->scan_rsp_data_len == len &&
1673 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1676 memcpy(hdev->scan_rsp_data, pdu.data, len);
1677 hdev->scan_rsp_data_len = len;
1679 pdu.cp.handle = instance;
1680 pdu.cp.length = len;
1681 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1682 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1684 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1685 sizeof(pdu.cp) + len, &pdu.cp);
1687 struct hci_cp_le_set_scan_rsp_data cp;
1689 memset(&cp, 0, sizeof(cp));
1692 len = create_instance_scan_rsp_data(hdev, instance,
1695 len = create_default_scan_rsp_data(hdev, cp.data);
1697 if (hdev->scan_rsp_data_len == len &&
1698 !memcmp(cp.data, hdev->scan_rsp_data, len))
1701 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1702 hdev->scan_rsp_data_len = len;
1706 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1710 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1712 struct adv_info *adv_instance = NULL;
1713 u8 ad_len = 0, flags = 0;
1716 /* Return 0 when the current instance identifier is invalid. */
1718 adv_instance = hci_find_adv_instance(hdev, instance);
1723 instance_flags = get_adv_instance_flags(hdev, instance);
1725 /* If instance already has the flags set skip adding it once
1728 if (adv_instance && eir_get_data(adv_instance->adv_data,
1729 adv_instance->adv_data_len, EIR_FLAGS,
1733 /* The Add Advertising command allows userspace to set both the general
1734 * and limited discoverable flags.
1736 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1737 flags |= LE_AD_GENERAL;
1739 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1740 flags |= LE_AD_LIMITED;
1742 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 flags |= LE_AD_NO_BREDR;
1745 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1746 /* If a discovery flag wasn't provided, simply use the global
1750 flags |= mgmt_get_adv_discov_flags(hdev);
1752 /* If flags would still be empty, then there is no need to
1753 * include the "Flags" AD field".
1767 memcpy(ptr, adv_instance->adv_data,
1768 adv_instance->adv_data_len);
1769 ad_len += adv_instance->adv_data_len;
1770 ptr += adv_instance->adv_data_len;
1773 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1776 if (ext_adv_capable(hdev)) {
1778 adv_tx_power = adv_instance->tx_power;
1780 adv_tx_power = hdev->adv_tx_power;
1782 adv_tx_power = hdev->adv_tx_power;
1785 /* Provide Tx Power only if we can provide a valid value for it */
1786 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1788 ptr[1] = EIR_TX_POWER;
1789 ptr[2] = (u8)adv_tx_power;
1799 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1801 struct hci_dev *hdev = req->hdev;
1804 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1807 if (ext_adv_capable(hdev)) {
1809 struct hci_cp_le_set_ext_adv_data cp;
1810 u8 data[HCI_MAX_EXT_AD_LENGTH];
1813 memset(&pdu, 0, sizeof(pdu));
1815 len = create_instance_adv_data(hdev, instance, pdu.data);
1817 /* There's nothing to do if the data hasn't changed */
1818 if (hdev->adv_data_len == len &&
1819 memcmp(pdu.data, hdev->adv_data, len) == 0)
1822 memcpy(hdev->adv_data, pdu.data, len);
1823 hdev->adv_data_len = len;
1825 pdu.cp.length = len;
1826 pdu.cp.handle = instance;
1827 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1828 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1830 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1831 sizeof(pdu.cp) + len, &pdu.cp);
1833 struct hci_cp_le_set_adv_data cp;
1835 memset(&cp, 0, sizeof(cp));
1837 len = create_instance_adv_data(hdev, instance, cp.data);
1839 /* There's nothing to do if the data hasn't changed */
1840 if (hdev->adv_data_len == len &&
1841 memcmp(cp.data, hdev->adv_data, len) == 0)
1844 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1845 hdev->adv_data_len = len;
1849 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1853 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1855 struct hci_request req;
1857 hci_req_init(&req, hdev);
1858 __hci_req_update_adv_data(&req, instance);
1860 return hci_req_run(&req, NULL);
1863 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1866 BT_DBG("%s status %u", hdev->name, status);
1869 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1871 struct hci_request req;
1874 if (!use_ll_privacy(hdev) &&
1875 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1878 hci_req_init(&req, hdev);
1880 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1882 hci_req_run(&req, enable_addr_resolution_complete);
1885 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1887 BT_DBG("%s status %u", hdev->name, status);
1890 void hci_req_reenable_advertising(struct hci_dev *hdev)
1892 struct hci_request req;
1894 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1895 list_empty(&hdev->adv_instances))
1898 hci_req_init(&req, hdev);
1900 if (hdev->cur_adv_instance) {
1901 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1904 if (ext_adv_capable(hdev)) {
1905 __hci_req_start_ext_adv(&req, 0x00);
1907 __hci_req_update_adv_data(&req, 0x00);
1908 __hci_req_update_scan_rsp_data(&req, 0x00);
1909 __hci_req_enable_advertising(&req);
1913 hci_req_run(&req, adv_enable_complete);
1916 static void adv_timeout_expire(struct work_struct *work)
1918 struct hci_dev *hdev = container_of(work, struct hci_dev,
1919 adv_instance_expire.work);
1921 struct hci_request req;
1924 BT_DBG("%s", hdev->name);
1928 hdev->adv_instance_timeout = 0;
1930 instance = hdev->cur_adv_instance;
1931 if (instance == 0x00)
1934 hci_req_init(&req, hdev);
1936 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1938 if (list_empty(&hdev->adv_instances))
1939 __hci_req_disable_advertising(&req);
1941 hci_req_run(&req, NULL);
1944 hci_dev_unlock(hdev);
1947 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1950 struct hci_dev *hdev = req->hdev;
1955 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1956 hci_req_add_le_scan_disable(req, false);
1957 hci_req_add_le_passive_scan(req);
1959 switch (hdev->interleave_scan_state) {
1960 case INTERLEAVE_SCAN_ALLOWLIST:
1961 bt_dev_dbg(hdev, "next state: allowlist");
1962 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1964 case INTERLEAVE_SCAN_NO_FILTER:
1965 bt_dev_dbg(hdev, "next state: no filter");
1966 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1968 case INTERLEAVE_SCAN_NONE:
1969 BT_ERR("unexpected error");
1973 hci_dev_unlock(hdev);
1978 static void interleave_scan_work(struct work_struct *work)
1980 struct hci_dev *hdev = container_of(work, struct hci_dev,
1981 interleave_scan.work);
1983 unsigned long timeout;
1985 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1986 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1987 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1988 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1990 bt_dev_err(hdev, "unexpected error");
1994 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1995 HCI_CMD_TIMEOUT, &status);
1997 /* Don't continue interleaving if it was canceled */
1998 if (is_interleave_scanning(hdev))
1999 queue_delayed_work(hdev->req_workqueue,
2000 &hdev->interleave_scan, timeout);
2003 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2004 bool use_rpa, struct adv_info *adv_instance,
2005 u8 *own_addr_type, bdaddr_t *rand_addr)
2009 bacpy(rand_addr, BDADDR_ANY);
2011 /* If privacy is enabled use a resolvable private address. If
2012 * current RPA has expired then generate a new one.
2017 /* If Controller supports LL Privacy use own address type is
2020 if (use_ll_privacy(hdev))
2021 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2023 *own_addr_type = ADDR_LE_DEV_RANDOM;
2026 if (!adv_instance->rpa_expired &&
2027 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2030 adv_instance->rpa_expired = false;
2032 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2033 !bacmp(&hdev->random_addr, &hdev->rpa))
2037 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2039 bt_dev_err(hdev, "failed to generate new RPA");
2043 bacpy(rand_addr, &hdev->rpa);
2045 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2047 queue_delayed_work(hdev->workqueue,
2048 &adv_instance->rpa_expired_cb, to);
2050 queue_delayed_work(hdev->workqueue,
2051 &hdev->rpa_expired, to);
2056 /* In case of required privacy without resolvable private address,
2057 * use an non-resolvable private address. This is useful for
2058 * non-connectable advertising.
2060 if (require_privacy) {
2064 /* The non-resolvable private address is generated
2065 * from random six bytes with the two most significant
2068 get_random_bytes(&nrpa, 6);
2071 /* The non-resolvable private address shall not be
2072 * equal to the public address.
2074 if (bacmp(&hdev->bdaddr, &nrpa))
2078 *own_addr_type = ADDR_LE_DEV_RANDOM;
2079 bacpy(rand_addr, &nrpa);
2084 /* No privacy so use a public address. */
2085 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2090 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2092 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2095 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2097 struct hci_cp_le_set_ext_adv_params cp;
2098 struct hci_dev *hdev = req->hdev;
2101 bdaddr_t random_addr;
2104 struct adv_info *adv_instance;
2108 adv_instance = hci_find_adv_instance(hdev, instance);
2112 adv_instance = NULL;
2115 flags = get_adv_instance_flags(hdev, instance);
2117 /* If the "connectable" instance flag was not set, then choose between
2118 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2120 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2121 mgmt_get_connectable(hdev);
2123 if (!is_advertising_allowed(hdev, connectable))
2126 /* Set require_privacy to true only when non-connectable
2127 * advertising is used. In that case it is fine to use a
2128 * non-resolvable private address.
2130 err = hci_get_random_address(hdev, !connectable,
2131 adv_use_rpa(hdev, flags), adv_instance,
2132 &own_addr_type, &random_addr);
2136 memset(&cp, 0, sizeof(cp));
2138 /* In ext adv set param interval is 3 octets */
2139 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2140 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2142 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2146 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2148 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2149 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2151 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2153 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2156 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2158 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2161 cp.own_addr_type = own_addr_type;
2162 cp.channel_map = hdev->le_adv_channel_map;
2164 cp.handle = instance;
2166 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2167 cp.primary_phy = HCI_ADV_PHY_1M;
2168 cp.secondary_phy = HCI_ADV_PHY_2M;
2169 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2170 cp.primary_phy = HCI_ADV_PHY_CODED;
2171 cp.secondary_phy = HCI_ADV_PHY_CODED;
2173 /* In all other cases use 1M */
2174 cp.primary_phy = HCI_ADV_PHY_1M;
2175 cp.secondary_phy = HCI_ADV_PHY_1M;
2178 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2180 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2181 bacmp(&random_addr, BDADDR_ANY)) {
2182 struct hci_cp_le_set_adv_set_rand_addr cp;
2184 /* Check if random address need to be updated */
2186 if (!bacmp(&random_addr, &adv_instance->random_addr))
2189 if (!bacmp(&random_addr, &hdev->random_addr))
2193 memset(&cp, 0, sizeof(cp));
2195 cp.handle = instance;
2196 bacpy(&cp.bdaddr, &random_addr);
2199 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2206 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2208 struct hci_dev *hdev = req->hdev;
2209 struct hci_cp_le_set_ext_adv_enable *cp;
2210 struct hci_cp_ext_adv_set *adv_set;
2211 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2212 struct adv_info *adv_instance;
2215 adv_instance = hci_find_adv_instance(hdev, instance);
2219 adv_instance = NULL;
2223 adv_set = (void *) cp->data;
2225 memset(cp, 0, sizeof(*cp));
2228 cp->num_of_sets = 0x01;
2230 memset(adv_set, 0, sizeof(*adv_set));
2232 adv_set->handle = instance;
2234 /* Set duration per instance since controller is responsible for
2237 if (adv_instance && adv_instance->timeout) {
2238 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2240 /* Time = N * 10 ms */
2241 adv_set->duration = cpu_to_le16(duration / 10);
2244 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2245 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2251 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2253 struct hci_dev *hdev = req->hdev;
2254 struct hci_cp_le_set_ext_adv_enable *cp;
2255 struct hci_cp_ext_adv_set *adv_set;
2256 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2259 /* If request specifies an instance that doesn't exist, fail */
2260 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2263 memset(data, 0, sizeof(data));
2266 adv_set = (void *)cp->data;
2268 /* Instance 0x00 indicates all advertising instances will be disabled */
2269 cp->num_of_sets = !!instance;
2272 adv_set->handle = instance;
2274 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2275 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2280 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2282 struct hci_dev *hdev = req->hdev;
2284 /* If request specifies an instance that doesn't exist, fail */
2285 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2288 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2293 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2295 struct hci_dev *hdev = req->hdev;
2296 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2299 /* If instance isn't pending, the chip knows about it, and it's safe to
2302 if (adv_instance && !adv_instance->pending)
2303 __hci_req_disable_ext_adv_instance(req, instance);
2305 err = __hci_req_setup_ext_adv_instance(req, instance);
2309 __hci_req_update_scan_rsp_data(req, instance);
2310 __hci_req_enable_ext_advertising(req, instance);
2315 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2318 struct hci_dev *hdev = req->hdev;
2319 struct adv_info *adv_instance = NULL;
2322 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2323 list_empty(&hdev->adv_instances))
2326 if (hdev->adv_instance_timeout)
2329 adv_instance = hci_find_adv_instance(hdev, instance);
2333 /* A zero timeout means unlimited advertising. As long as there is
2334 * only one instance, duration should be ignored. We still set a timeout
2335 * in case further instances are being added later on.
2337 * If the remaining lifetime of the instance is more than the duration
2338 * then the timeout corresponds to the duration, otherwise it will be
2339 * reduced to the remaining instance lifetime.
2341 if (adv_instance->timeout == 0 ||
2342 adv_instance->duration <= adv_instance->remaining_time)
2343 timeout = adv_instance->duration;
2345 timeout = adv_instance->remaining_time;
2347 /* The remaining time is being reduced unless the instance is being
2348 * advertised without time limit.
2350 if (adv_instance->timeout)
2351 adv_instance->remaining_time =
2352 adv_instance->remaining_time - timeout;
2354 /* Only use work for scheduling instances with legacy advertising */
2355 if (!ext_adv_capable(hdev)) {
2356 hdev->adv_instance_timeout = timeout;
2357 queue_delayed_work(hdev->req_workqueue,
2358 &hdev->adv_instance_expire,
2359 msecs_to_jiffies(timeout * 1000));
2362 /* If we're just re-scheduling the same instance again then do not
2363 * execute any HCI commands. This happens when a single instance is
2366 if (!force && hdev->cur_adv_instance == instance &&
2367 hci_dev_test_flag(hdev, HCI_LE_ADV))
2370 hdev->cur_adv_instance = instance;
2371 if (ext_adv_capable(hdev)) {
2372 __hci_req_start_ext_adv(req, instance);
2374 __hci_req_update_adv_data(req, instance);
2375 __hci_req_update_scan_rsp_data(req, instance);
2376 __hci_req_enable_advertising(req);
2382 /* For a single instance:
2383 * - force == true: The instance will be removed even when its remaining
2384 * lifetime is not zero.
2385 * - force == false: the instance will be deactivated but kept stored unless
2386 * the remaining lifetime is zero.
2388 * For instance == 0x00:
2389 * - force == true: All instances will be removed regardless of their timeout
2391 * - force == false: Only instances that have a timeout will be removed.
2393 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2394 struct hci_request *req, u8 instance,
2397 struct adv_info *adv_instance, *n, *next_instance = NULL;
2401 /* Cancel any timeout concerning the removed instance(s). */
2402 if (!instance || hdev->cur_adv_instance == instance)
2403 cancel_adv_timeout(hdev);
2405 /* Get the next instance to advertise BEFORE we remove
2406 * the current one. This can be the same instance again
2407 * if there is only one instance.
2409 if (instance && hdev->cur_adv_instance == instance)
2410 next_instance = hci_get_next_instance(hdev, instance);
2412 if (instance == 0x00) {
2413 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2415 if (!(force || adv_instance->timeout))
2418 rem_inst = adv_instance->instance;
2419 err = hci_remove_adv_instance(hdev, rem_inst);
2421 mgmt_advertising_removed(sk, hdev, rem_inst);
2424 adv_instance = hci_find_adv_instance(hdev, instance);
2426 if (force || (adv_instance && adv_instance->timeout &&
2427 !adv_instance->remaining_time)) {
2428 /* Don't advertise a removed instance. */
2429 if (next_instance &&
2430 next_instance->instance == instance)
2431 next_instance = NULL;
2433 err = hci_remove_adv_instance(hdev, instance);
2435 mgmt_advertising_removed(sk, hdev, instance);
2439 if (!req || !hdev_is_powered(hdev) ||
2440 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2443 if (next_instance && !ext_adv_capable(hdev))
2444 __hci_req_schedule_adv_instance(req, next_instance->instance,
2448 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2450 struct hci_dev *hdev = req->hdev;
2452 /* If we're advertising or initiating an LE connection we can't
2453 * go ahead and change the random address at this time. This is
2454 * because the eventual initiator address used for the
2455 * subsequently created connection will be undefined (some
2456 * controllers use the new address and others the one we had
2457 * when the operation started).
2459 * In this kind of scenario skip the update and let the random
2460 * address be updated at the next cycle.
2462 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2463 hci_lookup_le_connect(hdev)) {
2464 BT_DBG("Deferring random address update");
2465 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2469 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2472 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2473 bool use_rpa, u8 *own_addr_type)
2475 struct hci_dev *hdev = req->hdev;
2478 /* If privacy is enabled use a resolvable private address. If
2479 * current RPA has expired or there is something else than
2480 * the current RPA in use, then generate a new one.
2485 /* If Controller supports LL Privacy use own address type is
2488 if (use_ll_privacy(hdev))
2489 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2491 *own_addr_type = ADDR_LE_DEV_RANDOM;
2493 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2494 !bacmp(&hdev->random_addr, &hdev->rpa))
2497 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2499 bt_dev_err(hdev, "failed to generate new RPA");
2503 set_random_addr(req, &hdev->rpa);
2505 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2506 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2511 /* In case of required privacy without resolvable private address,
2512 * use an non-resolvable private address. This is useful for active
2513 * scanning and non-connectable advertising.
2515 if (require_privacy) {
2519 /* The non-resolvable private address is generated
2520 * from random six bytes with the two most significant
2523 get_random_bytes(&nrpa, 6);
2526 /* The non-resolvable private address shall not be
2527 * equal to the public address.
2529 if (bacmp(&hdev->bdaddr, &nrpa))
2533 *own_addr_type = ADDR_LE_DEV_RANDOM;
2534 set_random_addr(req, &nrpa);
2538 /* If forcing static address is in use or there is no public
2539 * address use the static address as random address (but skip
2540 * the HCI command if the current random address is already the
2543 * In case BR/EDR has been disabled on a dual-mode controller
2544 * and a static address has been configured, then use that
2545 * address instead of the public BR/EDR address.
2547 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2548 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2549 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2550 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2551 *own_addr_type = ADDR_LE_DEV_RANDOM;
2552 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2553 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2554 &hdev->static_addr);
2558 /* Neither privacy nor static address is being used so use a
2561 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2566 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2568 struct bdaddr_list *b;
2570 list_for_each_entry(b, &hdev->accept_list, list) {
2571 struct hci_conn *conn;
2573 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2577 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2584 void __hci_req_update_scan(struct hci_request *req)
2586 struct hci_dev *hdev = req->hdev;
2589 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2592 if (!hdev_is_powered(hdev))
2595 if (mgmt_powering_down(hdev))
2598 if (hdev->scanning_paused)
2601 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2602 disconnected_accept_list_entries(hdev))
2605 scan = SCAN_DISABLED;
2607 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2608 scan |= SCAN_INQUIRY;
2610 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2611 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2614 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2617 static int update_scan(struct hci_request *req, unsigned long opt)
2619 hci_dev_lock(req->hdev);
2620 __hci_req_update_scan(req);
2621 hci_dev_unlock(req->hdev);
2625 static void scan_update_work(struct work_struct *work)
2627 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2629 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2632 static int connectable_update(struct hci_request *req, unsigned long opt)
2634 struct hci_dev *hdev = req->hdev;
2638 __hci_req_update_scan(req);
2640 /* If BR/EDR is not enabled and we disable advertising as a
2641 * by-product of disabling connectable, we need to update the
2642 * advertising flags.
2644 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2645 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2647 /* Update the advertising parameters if necessary */
2648 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2649 !list_empty(&hdev->adv_instances)) {
2650 if (ext_adv_capable(hdev))
2651 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2653 __hci_req_enable_advertising(req);
2656 __hci_update_background_scan(req);
2658 hci_dev_unlock(hdev);
2663 static void connectable_update_work(struct work_struct *work)
2665 struct hci_dev *hdev = container_of(work, struct hci_dev,
2666 connectable_update);
2669 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2670 mgmt_set_connectable_complete(hdev, status);
2673 static u8 get_service_classes(struct hci_dev *hdev)
2675 struct bt_uuid *uuid;
2678 list_for_each_entry(uuid, &hdev->uuids, list)
2679 val |= uuid->svc_hint;
2684 void __hci_req_update_class(struct hci_request *req)
2686 struct hci_dev *hdev = req->hdev;
2689 BT_DBG("%s", hdev->name);
2691 if (!hdev_is_powered(hdev))
2694 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2697 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2700 cod[0] = hdev->minor_class;
2701 cod[1] = hdev->major_class;
2702 cod[2] = get_service_classes(hdev);
2704 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2707 if (memcmp(cod, hdev->dev_class, 3) == 0)
2710 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2713 static void write_iac(struct hci_request *req)
2715 struct hci_dev *hdev = req->hdev;
2716 struct hci_cp_write_current_iac_lap cp;
2718 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2721 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2722 /* Limited discoverable mode */
2723 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2724 cp.iac_lap[0] = 0x00; /* LIAC */
2725 cp.iac_lap[1] = 0x8b;
2726 cp.iac_lap[2] = 0x9e;
2727 cp.iac_lap[3] = 0x33; /* GIAC */
2728 cp.iac_lap[4] = 0x8b;
2729 cp.iac_lap[5] = 0x9e;
2731 /* General discoverable mode */
2733 cp.iac_lap[0] = 0x33; /* GIAC */
2734 cp.iac_lap[1] = 0x8b;
2735 cp.iac_lap[2] = 0x9e;
2738 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2739 (cp.num_iac * 3) + 1, &cp);
2742 static int discoverable_update(struct hci_request *req, unsigned long opt)
2744 struct hci_dev *hdev = req->hdev;
2748 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2750 __hci_req_update_scan(req);
2751 __hci_req_update_class(req);
2754 /* Advertising instances don't use the global discoverable setting, so
2755 * only update AD if advertising was enabled using Set Advertising.
2757 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2758 __hci_req_update_adv_data(req, 0x00);
2760 /* Discoverable mode affects the local advertising
2761 * address in limited privacy mode.
2763 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2764 if (ext_adv_capable(hdev))
2765 __hci_req_start_ext_adv(req, 0x00);
2767 __hci_req_enable_advertising(req);
2771 hci_dev_unlock(hdev);
2776 static void discoverable_update_work(struct work_struct *work)
2778 struct hci_dev *hdev = container_of(work, struct hci_dev,
2779 discoverable_update);
2782 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2783 mgmt_set_discoverable_complete(hdev, status);
2786 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2789 switch (conn->state) {
2792 if (conn->type == AMP_LINK) {
2793 struct hci_cp_disconn_phy_link cp;
2795 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2797 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2800 struct hci_cp_disconnect dc;
2802 dc.handle = cpu_to_le16(conn->handle);
2804 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2807 conn->state = BT_DISCONN;
2811 if (conn->type == LE_LINK) {
2812 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2814 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2816 } else if (conn->type == ACL_LINK) {
2817 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2819 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2824 if (conn->type == ACL_LINK) {
2825 struct hci_cp_reject_conn_req rej;
2827 bacpy(&rej.bdaddr, &conn->dst);
2828 rej.reason = reason;
2830 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2832 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2833 struct hci_cp_reject_sync_conn_req rej;
2835 bacpy(&rej.bdaddr, &conn->dst);
2837 /* SCO rejection has its own limited set of
2838 * allowed error values (0x0D-0x0F) which isn't
2839 * compatible with most values passed to this
2840 * function. To be safe hard-code one of the
2841 * values that's suitable for SCO.
2843 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2845 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2850 conn->state = BT_CLOSED;
2855 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2858 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2861 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2863 struct hci_request req;
2866 hci_req_init(&req, conn->hdev);
2868 __hci_abort_conn(&req, conn, reason);
2870 err = hci_req_run(&req, abort_conn_complete);
2871 if (err && err != -ENODATA) {
2872 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2879 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2881 hci_dev_lock(req->hdev);
2882 __hci_update_background_scan(req);
2883 hci_dev_unlock(req->hdev);
2887 static void bg_scan_update(struct work_struct *work)
2889 struct hci_dev *hdev = container_of(work, struct hci_dev,
2891 struct hci_conn *conn;
2895 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2901 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2903 hci_le_conn_failed(conn, status);
2905 hci_dev_unlock(hdev);
2908 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2910 hci_req_add_le_scan_disable(req, false);
2914 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2917 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2918 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2919 struct hci_cp_inquiry cp;
2921 BT_DBG("%s", req->hdev->name);
2923 hci_dev_lock(req->hdev);
2924 hci_inquiry_cache_flush(req->hdev);
2925 hci_dev_unlock(req->hdev);
2927 memset(&cp, 0, sizeof(cp));
2929 if (req->hdev->discovery.limited)
2930 memcpy(&cp.lap, liac, sizeof(cp.lap));
2932 memcpy(&cp.lap, giac, sizeof(cp.lap));
2936 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2941 static void le_scan_disable_work(struct work_struct *work)
2943 struct hci_dev *hdev = container_of(work, struct hci_dev,
2944 le_scan_disable.work);
2947 BT_DBG("%s", hdev->name);
2949 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2952 cancel_delayed_work(&hdev->le_scan_restart);
2954 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2956 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2961 hdev->discovery.scan_start = 0;
2963 /* If we were running LE only scan, change discovery state. If
2964 * we were running both LE and BR/EDR inquiry simultaneously,
2965 * and BR/EDR inquiry is already finished, stop discovery,
2966 * otherwise BR/EDR inquiry will stop discovery when finished.
2967 * If we will resolve remote device name, do not change
2971 if (hdev->discovery.type == DISCOV_TYPE_LE)
2972 goto discov_stopped;
2974 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2977 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2978 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2979 hdev->discovery.state != DISCOVERY_RESOLVING)
2980 goto discov_stopped;
2985 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2986 HCI_CMD_TIMEOUT, &status);
2988 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2989 goto discov_stopped;
2996 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2997 hci_dev_unlock(hdev);
3000 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3002 struct hci_dev *hdev = req->hdev;
3004 /* If controller is not scanning we are done. */
3005 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3008 if (hdev->scanning_paused) {
3009 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3013 hci_req_add_le_scan_disable(req, false);
3015 if (use_ext_scan(hdev)) {
3016 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3018 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3019 ext_enable_cp.enable = LE_SCAN_ENABLE;
3020 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3022 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3023 sizeof(ext_enable_cp), &ext_enable_cp);
3025 struct hci_cp_le_set_scan_enable cp;
3027 memset(&cp, 0, sizeof(cp));
3028 cp.enable = LE_SCAN_ENABLE;
3029 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3030 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3036 static void le_scan_restart_work(struct work_struct *work)
3038 struct hci_dev *hdev = container_of(work, struct hci_dev,
3039 le_scan_restart.work);
3040 unsigned long timeout, duration, scan_start, now;
3043 BT_DBG("%s", hdev->name);
3045 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3047 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3054 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3055 !hdev->discovery.scan_start)
3058 /* When the scan was started, hdev->le_scan_disable has been queued
3059 * after duration from scan_start. During scan restart this job
3060 * has been canceled, and we need to queue it again after proper
3061 * timeout, to make sure that scan does not run indefinitely.
3063 duration = hdev->discovery.scan_duration;
3064 scan_start = hdev->discovery.scan_start;
3066 if (now - scan_start <= duration) {
3069 if (now >= scan_start)
3070 elapsed = now - scan_start;
3072 elapsed = ULONG_MAX - scan_start + now;
3074 timeout = duration - elapsed;
3079 queue_delayed_work(hdev->req_workqueue,
3080 &hdev->le_scan_disable, timeout);
3083 hci_dev_unlock(hdev);
3086 static int active_scan(struct hci_request *req, unsigned long opt)
3088 uint16_t interval = opt;
3089 struct hci_dev *hdev = req->hdev;
3091 /* Accept list is not used for discovery */
3092 u8 filter_policy = 0x00;
3093 /* Discovery doesn't require controller address resolution */
3094 bool addr_resolv = false;
3097 BT_DBG("%s", hdev->name);
3099 /* If controller is scanning, it means the background scanning is
3100 * running. Thus, we should temporarily stop it in order to set the
3101 * discovery scanning parameters.
3103 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3104 hci_req_add_le_scan_disable(req, false);
3106 /* All active scans will be done with either a resolvable private
3107 * address (when privacy feature has been enabled) or non-resolvable
3110 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3113 own_addr_type = ADDR_LE_DEV_PUBLIC;
3115 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3116 hdev->le_scan_window_discovery, own_addr_type,
3117 filter_policy, addr_resolv);
3121 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3125 BT_DBG("%s", req->hdev->name);
3127 err = active_scan(req, opt);
3131 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3134 static void start_discovery(struct hci_dev *hdev, u8 *status)
3136 unsigned long timeout;
3138 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3140 switch (hdev->discovery.type) {
3141 case DISCOV_TYPE_BREDR:
3142 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3143 hci_req_sync(hdev, bredr_inquiry,
3144 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3147 case DISCOV_TYPE_INTERLEAVED:
3148 /* When running simultaneous discovery, the LE scanning time
3149 * should occupy the whole discovery time sine BR/EDR inquiry
3150 * and LE scanning are scheduled by the controller.
3152 * For interleaving discovery in comparison, BR/EDR inquiry
3153 * and LE scanning are done sequentially with separate
3156 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3158 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3159 /* During simultaneous discovery, we double LE scan
3160 * interval. We must leave some time for the controller
3161 * to do BR/EDR inquiry.
3163 hci_req_sync(hdev, interleaved_discov,
3164 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3169 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3170 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3171 HCI_CMD_TIMEOUT, status);
3173 case DISCOV_TYPE_LE:
3174 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3175 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3176 HCI_CMD_TIMEOUT, status);
3179 *status = HCI_ERROR_UNSPECIFIED;
3186 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3188 /* When service discovery is used and the controller has a
3189 * strict duplicate filter, it is important to remember the
3190 * start and duration of the scan. This is required for
3191 * restarting scanning during the discovery phase.
3193 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3194 hdev->discovery.result_filtering) {
3195 hdev->discovery.scan_start = jiffies;
3196 hdev->discovery.scan_duration = timeout;
3199 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3203 bool hci_req_stop_discovery(struct hci_request *req)
3205 struct hci_dev *hdev = req->hdev;
3206 struct discovery_state *d = &hdev->discovery;
3207 struct hci_cp_remote_name_req_cancel cp;
3208 struct inquiry_entry *e;
3211 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3213 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3214 if (test_bit(HCI_INQUIRY, &hdev->flags))
3215 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3217 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3218 cancel_delayed_work(&hdev->le_scan_disable);
3219 hci_req_add_le_scan_disable(req, false);
3224 /* Passive scanning */
3225 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3226 hci_req_add_le_scan_disable(req, false);
3231 /* No further actions needed for LE-only discovery */
3232 if (d->type == DISCOV_TYPE_LE)
3235 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3236 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3241 bacpy(&cp.bdaddr, &e->data.bdaddr);
3242 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3250 static int stop_discovery(struct hci_request *req, unsigned long opt)
3252 hci_dev_lock(req->hdev);
3253 hci_req_stop_discovery(req);
3254 hci_dev_unlock(req->hdev);
3259 static void discov_update(struct work_struct *work)
3261 struct hci_dev *hdev = container_of(work, struct hci_dev,
3265 switch (hdev->discovery.state) {
3266 case DISCOVERY_STARTING:
3267 start_discovery(hdev, &status);
3268 mgmt_start_discovery_complete(hdev, status);
3270 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3272 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3274 case DISCOVERY_STOPPING:
3275 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3276 mgmt_stop_discovery_complete(hdev, status);
3278 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3280 case DISCOVERY_STOPPED:
3286 static void discov_off(struct work_struct *work)
3288 struct hci_dev *hdev = container_of(work, struct hci_dev,
3291 BT_DBG("%s", hdev->name);
3295 /* When discoverable timeout triggers, then just make sure
3296 * the limited discoverable flag is cleared. Even in the case
3297 * of a timeout triggered from general discoverable, it is
3298 * safe to unconditionally clear the flag.
3300 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3301 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3302 hdev->discov_timeout = 0;
3304 hci_dev_unlock(hdev);
3306 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3307 mgmt_new_settings(hdev);
3310 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3312 struct hci_dev *hdev = req->hdev;
3317 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3318 !lmp_host_ssp_capable(hdev)) {
3321 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3323 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3326 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3327 sizeof(support), &support);
3331 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3332 lmp_bredr_capable(hdev)) {
3333 struct hci_cp_write_le_host_supported cp;
3338 /* Check first if we already have the right
3339 * host state (host features set)
3341 if (cp.le != lmp_host_le_capable(hdev) ||
3342 cp.simul != lmp_host_le_br_capable(hdev))
3343 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3347 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3348 /* Make sure the controller has a good default for
3349 * advertising data. This also applies to the case
3350 * where BR/EDR was toggled during the AUTO_OFF phase.
3352 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3353 list_empty(&hdev->adv_instances)) {
3356 if (ext_adv_capable(hdev)) {
3357 err = __hci_req_setup_ext_adv_instance(req,
3360 __hci_req_update_scan_rsp_data(req,
3364 __hci_req_update_adv_data(req, 0x00);
3365 __hci_req_update_scan_rsp_data(req, 0x00);
3368 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3369 if (!ext_adv_capable(hdev))
3370 __hci_req_enable_advertising(req);
3372 __hci_req_enable_ext_advertising(req,
3375 } else if (!list_empty(&hdev->adv_instances)) {
3376 struct adv_info *adv_instance;
3378 adv_instance = list_first_entry(&hdev->adv_instances,
3379 struct adv_info, list);
3380 __hci_req_schedule_adv_instance(req,
3381 adv_instance->instance,
3386 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3387 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3388 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3389 sizeof(link_sec), &link_sec);
3391 if (lmp_bredr_capable(hdev)) {
3392 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3393 __hci_req_write_fast_connectable(req, true);
3395 __hci_req_write_fast_connectable(req, false);
3396 __hci_req_update_scan(req);
3397 __hci_req_update_class(req);
3398 __hci_req_update_name(req);
3399 __hci_req_update_eir(req);
3402 hci_dev_unlock(hdev);
3406 int __hci_req_hci_power_on(struct hci_dev *hdev)
3408 /* Register the available SMP channels (BR/EDR and LE) only when
3409 * successfully powering on the controller. This late
3410 * registration is required so that LE SMP can clearly decide if
3411 * the public address or static address is used.
3415 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3419 void hci_request_setup(struct hci_dev *hdev)
3421 INIT_WORK(&hdev->discov_update, discov_update);
3422 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3423 INIT_WORK(&hdev->scan_update, scan_update_work);
3424 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3425 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3426 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3427 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3428 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3429 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3430 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3433 void hci_request_cancel_all(struct hci_dev *hdev)
3435 hci_req_sync_cancel(hdev, ENODEV);
3437 cancel_work_sync(&hdev->discov_update);
3438 cancel_work_sync(&hdev->bg_scan_update);
3439 cancel_work_sync(&hdev->scan_update);
3440 cancel_work_sync(&hdev->connectable_update);
3441 cancel_work_sync(&hdev->discoverable_update);
3442 cancel_delayed_work_sync(&hdev->discov_off);
3443 cancel_delayed_work_sync(&hdev->le_scan_disable);
3444 cancel_delayed_work_sync(&hdev->le_scan_restart);
3446 if (hdev->adv_instance_timeout) {
3447 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3448 hdev->adv_instance_timeout = 0;
3451 cancel_interleave_scan(hdev);