2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
39 skb_queue_head_init(&req->cmd_q);
44 void hci_req_purge(struct hci_request *req)
46 skb_queue_purge(&req->cmd_q);
49 bool hci_req_status_pend(struct hci_dev *hdev)
51 return hdev->req_status == HCI_REQ_PEND;
54 static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
57 struct hci_dev *hdev = req->hdev;
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
67 skb_queue_purge(&req->cmd_q);
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
75 skb = skb_peek_tail(&req->cmd_q);
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
92 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
94 return req_run(req, complete, NULL);
97 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
99 return req_run(req, NULL, complete);
102 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
111 kfree_skb(hdev->req_skb);
112 hdev->req_skb = skb_get(skb);
114 wake_up_interruptible(&hdev->req_wait_q);
118 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
120 BT_DBG("%s err 0x%2.2x", hdev->name, err);
122 if (hdev->req_status == HCI_REQ_PEND) {
123 hdev->req_result = err;
124 hdev->req_status = HCI_REQ_CANCELED;
125 wake_up_interruptible(&hdev->req_wait_q);
129 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
130 const void *param, u8 event, u32 timeout)
132 struct hci_request req;
136 BT_DBG("%s", hdev->name);
138 hci_req_init(&req, hdev);
140 hci_req_add_ev(&req, opcode, plen, param, event);
142 hdev->req_status = HCI_REQ_PEND;
144 err = hci_req_run_skb(&req, hci_req_sync_complete);
148 err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 hdev->req_status != HCI_REQ_PEND, timeout);
151 if (err == -ERESTARTSYS)
152 return ERR_PTR(-EINTR);
154 switch (hdev->req_status) {
156 err = -bt_to_errno(hdev->req_result);
159 case HCI_REQ_CANCELED:
160 err = -hdev->req_result;
168 hdev->req_status = hdev->req_result = 0;
170 hdev->req_skb = NULL;
172 BT_DBG("%s end: err %d", hdev->name, err);
180 return ERR_PTR(-ENODATA);
184 EXPORT_SYMBOL(__hci_cmd_sync_ev);
186 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
187 const void *param, u32 timeout)
189 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
191 EXPORT_SYMBOL(__hci_cmd_sync);
193 /* Execute request and wait for completion. */
194 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
196 unsigned long opt, u32 timeout, u8 *hci_status)
198 struct hci_request req;
201 BT_DBG("%s start", hdev->name);
203 hci_req_init(&req, hdev);
205 hdev->req_status = HCI_REQ_PEND;
207 err = func(&req, opt);
210 *hci_status = HCI_ERROR_UNSPECIFIED;
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
216 hdev->req_status = 0;
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
223 if (err == -ENODATA) {
230 *hci_status = HCI_ERROR_UNSPECIFIED;
235 err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 hdev->req_status != HCI_REQ_PEND, timeout);
238 if (err == -ERESTARTSYS)
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
245 *hci_status = hdev->req_result;
248 case HCI_REQ_CANCELED:
249 err = -hdev->req_result;
251 *hci_status = HCI_ERROR_UNSPECIFIED;
257 *hci_status = HCI_ERROR_UNSPECIFIED;
261 kfree_skb(hdev->req_skb);
262 hdev->req_skb = NULL;
263 hdev->req_status = hdev->req_result = 0;
265 BT_DBG("%s end: err %d", hdev->name, err);
270 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
272 unsigned long opt, u32 timeout, u8 *hci_status)
276 /* Serialize all requests */
277 hci_req_sync_lock(hdev);
278 /* check the state after obtaing the lock to protect the HCI_UP
279 * against any races from hci_dev_do_close when the controller
282 if (test_bit(HCI_UP, &hdev->flags))
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286 hci_req_sync_unlock(hdev);
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
302 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
303 hdr->opcode = cpu_to_le16(opcode);
307 skb_put_data(skb, param, plen);
309 BT_DBG("skb len %d", skb->len);
311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
321 struct hci_dev *hdev = req->hdev;
324 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
334 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
340 if (skb_queue_empty(&req->cmd_q))
341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
343 bt_cb(skb)->hci.req_event = event;
345 skb_queue_tail(&req->cmd_q, skb);
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
351 hci_req_add_ev(req, opcode, plen, param, 0);
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
367 type = PAGE_SCAN_TYPE_INTERLACED;
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
372 type = hdev->def_page_scan_type;
373 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
376 acp.window = cpu_to_le16(hdev->def_page_scan_window);
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 static void start_interleave_scan(struct hci_dev *hdev)
389 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
390 queue_delayed_work(hdev->req_workqueue,
391 &hdev->interleave_scan, 0);
394 static bool is_interleave_scanning(struct hci_dev *hdev)
396 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
399 static void cancel_interleave_scan(struct hci_dev *hdev)
401 bt_dev_dbg(hdev, "cancelling interleave scan");
403 cancel_delayed_work_sync(&hdev->interleave_scan);
405 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
408 /* Return true if interleave_scan wasn't started until exiting this function,
409 * otherwise, return false
411 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
413 /* If there is at least one ADV monitors and one pending LE connection
414 * or one device to be scanned for, we should alternate between
415 * allowlist scan and one without any filters to save power.
417 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
418 !(list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports));
420 bool is_interleaving = is_interleave_scanning(hdev);
422 if (use_interleaving && !is_interleaving) {
423 start_interleave_scan(hdev);
424 bt_dev_dbg(hdev, "starting interleave scan");
428 if (!use_interleaving && is_interleaving)
429 cancel_interleave_scan(hdev);
434 /* This function controls the background scanning based on hdev->pend_le_conns
435 * list. If there are pending LE connection we start the background scanning,
436 * otherwise we stop it.
438 * This function requires the caller holds hdev->lock.
440 static void __hci_update_background_scan(struct hci_request *req)
442 struct hci_dev *hdev = req->hdev;
444 if (!test_bit(HCI_UP, &hdev->flags) ||
445 test_bit(HCI_INIT, &hdev->flags) ||
446 hci_dev_test_flag(hdev, HCI_SETUP) ||
447 hci_dev_test_flag(hdev, HCI_CONFIG) ||
448 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
449 hci_dev_test_flag(hdev, HCI_UNREGISTER))
452 /* No point in doing scanning if LE support hasn't been enabled */
453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
456 /* If discovery is active don't interfere with it */
457 if (hdev->discovery.state != DISCOVERY_STOPPED)
460 /* Reset RSSI and UUID filters when starting background scanning
461 * since these filters are meant for service discovery only.
463 * The Start Discovery and Start Service Discovery operations
464 * ensure to set proper values for RSSI threshold and UUID
465 * filter list. So it is safe to just reset them here.
467 hci_discovery_filter_clear(hdev);
469 BT_DBG("%s ADV monitoring is %s", hdev->name,
470 hci_is_adv_monitoring(hdev) ? "on" : "off");
472 if (list_empty(&hdev->pend_le_conns) &&
473 list_empty(&hdev->pend_le_reports) &&
474 !hci_is_adv_monitoring(hdev)) {
475 /* If there is no pending LE connections or devices
476 * to be scanned for or no ADV monitors, we should stop the
477 * background scanning.
480 /* If controller is not scanning we are done. */
481 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
484 hci_req_add_le_scan_disable(req, false);
486 BT_DBG("%s stopping background scanning", hdev->name);
488 /* If there is at least one pending LE connection, we should
489 * keep the background scan running.
492 /* If controller is connecting, we should not start scanning
493 * since some controllers are not able to scan and connect at
496 if (hci_lookup_le_connect(hdev))
499 /* If controller is currently scanning, we stop it to ensure we
500 * don't miss any advertising (due to duplicates filter).
502 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
503 hci_req_add_le_scan_disable(req, false);
505 hci_req_add_le_passive_scan(req);
506 bt_dev_dbg(hdev, "starting background scanning");
510 void __hci_req_update_name(struct hci_request *req)
512 struct hci_dev *hdev = req->hdev;
513 struct hci_cp_write_local_name cp;
515 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
517 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
520 #define PNP_INFO_SVCLASS_ID 0x1200
522 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
524 u8 *ptr = data, *uuids_start = NULL;
525 struct bt_uuid *uuid;
530 list_for_each_entry(uuid, &hdev->uuids, list) {
533 if (uuid->size != 16)
536 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
540 if (uuid16 == PNP_INFO_SVCLASS_ID)
546 uuids_start[1] = EIR_UUID16_ALL;
550 /* Stop if not enough space to put next UUID */
551 if ((ptr - data) + sizeof(u16) > len) {
552 uuids_start[1] = EIR_UUID16_SOME;
556 *ptr++ = (uuid16 & 0x00ff);
557 *ptr++ = (uuid16 & 0xff00) >> 8;
558 uuids_start[0] += sizeof(uuid16);
564 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
566 u8 *ptr = data, *uuids_start = NULL;
567 struct bt_uuid *uuid;
572 list_for_each_entry(uuid, &hdev->uuids, list) {
573 if (uuid->size != 32)
579 uuids_start[1] = EIR_UUID32_ALL;
583 /* Stop if not enough space to put next UUID */
584 if ((ptr - data) + sizeof(u32) > len) {
585 uuids_start[1] = EIR_UUID32_SOME;
589 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
591 uuids_start[0] += sizeof(u32);
597 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
599 u8 *ptr = data, *uuids_start = NULL;
600 struct bt_uuid *uuid;
605 list_for_each_entry(uuid, &hdev->uuids, list) {
606 if (uuid->size != 128)
612 uuids_start[1] = EIR_UUID128_ALL;
616 /* Stop if not enough space to put next UUID */
617 if ((ptr - data) + 16 > len) {
618 uuids_start[1] = EIR_UUID128_SOME;
622 memcpy(ptr, uuid->uuid, 16);
624 uuids_start[0] += 16;
630 static void create_eir(struct hci_dev *hdev, u8 *data)
635 name_len = strlen(hdev->dev_name);
641 ptr[1] = EIR_NAME_SHORT;
643 ptr[1] = EIR_NAME_COMPLETE;
645 /* EIR Data length */
646 ptr[0] = name_len + 1;
648 memcpy(ptr + 2, hdev->dev_name, name_len);
650 ptr += (name_len + 2);
653 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
655 ptr[1] = EIR_TX_POWER;
656 ptr[2] = (u8) hdev->inq_tx_power;
661 if (hdev->devid_source > 0) {
663 ptr[1] = EIR_DEVICE_ID;
665 put_unaligned_le16(hdev->devid_source, ptr + 2);
666 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
667 put_unaligned_le16(hdev->devid_product, ptr + 6);
668 put_unaligned_le16(hdev->devid_version, ptr + 8);
673 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
674 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
675 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
678 void __hci_req_update_eir(struct hci_request *req)
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_write_eir cp;
683 if (!hdev_is_powered(hdev))
686 if (!lmp_ext_inq_capable(hdev))
689 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
692 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
695 memset(&cp, 0, sizeof(cp));
697 create_eir(hdev, cp.data);
699 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
702 memcpy(hdev->eir, cp.data, sizeof(cp.data));
704 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
707 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
709 struct hci_dev *hdev = req->hdev;
711 if (hdev->scanning_paused) {
712 bt_dev_dbg(hdev, "Scanning is paused for suspend");
716 if (use_ext_scan(hdev)) {
717 struct hci_cp_le_set_ext_scan_enable cp;
719 memset(&cp, 0, sizeof(cp));
720 cp.enable = LE_SCAN_DISABLE;
721 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
724 struct hci_cp_le_set_scan_enable cp;
726 memset(&cp, 0, sizeof(cp));
727 cp.enable = LE_SCAN_DISABLE;
728 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
731 /* Disable address resolution */
732 if (use_ll_privacy(hdev) &&
733 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
734 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
737 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
741 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
744 struct hci_cp_le_del_from_accept_list cp;
746 cp.bdaddr_type = bdaddr_type;
747 bacpy(&cp.bdaddr, bdaddr);
749 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
751 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
753 if (use_ll_privacy(req->hdev) &&
754 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
757 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
759 struct hci_cp_le_del_from_resolv_list cp;
761 cp.bdaddr_type = bdaddr_type;
762 bacpy(&cp.bdaddr, bdaddr);
764 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
770 /* Adds connection to accept list if needed. On error, returns -1. */
771 static int add_to_accept_list(struct hci_request *req,
772 struct hci_conn_params *params, u8 *num_entries,
775 struct hci_cp_le_add_to_accept_list cp;
776 struct hci_dev *hdev = req->hdev;
778 /* Already in accept list */
779 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
783 /* Select filter policy to accept all advertising */
784 if (*num_entries >= hdev->le_accept_list_size)
787 /* Accept list can not be used with RPAs */
789 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
790 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
794 /* During suspend, only wakeable devices can be in accept list */
795 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
796 params->current_flags))
800 cp.bdaddr_type = params->addr_type;
801 bacpy(&cp.bdaddr, ¶ms->addr);
803 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
805 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
807 if (use_ll_privacy(hdev) &&
808 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
811 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
814 struct hci_cp_le_add_to_resolv_list cp;
816 cp.bdaddr_type = params->addr_type;
817 bacpy(&cp.bdaddr, ¶ms->addr);
818 memcpy(cp.peer_irk, irk->val, 16);
820 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
821 memcpy(cp.local_irk, hdev->irk, 16);
823 memset(cp.local_irk, 0, 16);
825 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
833 static u8 update_accept_list(struct hci_request *req)
835 struct hci_dev *hdev = req->hdev;
836 struct hci_conn_params *params;
837 struct bdaddr_list *b;
839 bool pend_conn, pend_report;
840 /* We allow usage of accept list even with RPAs in suspend. In the worst
841 * case, we won't be able to wake from devices that use the privacy1.2
842 * features. Additionally, once we support privacy1.2 and IRK
843 * offloading, we can update this to also check for those conditions.
845 bool allow_rpa = hdev->suspended;
847 if (use_ll_privacy(hdev) &&
848 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
851 /* Go through the current accept list programmed into the
852 * controller one by one and check if that address is still
853 * in the list of pending connections or list of devices to
854 * report. If not present in either list, then queue the
855 * command to remove it from the controller.
857 list_for_each_entry(b, &hdev->le_accept_list, list) {
858 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
861 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
865 /* If the device is not likely to connect or report,
866 * remove it from the accept list.
868 if (!pend_conn && !pend_report) {
869 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
873 /* Accept list can not be used with RPAs */
875 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
876 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
883 /* Since all no longer valid accept list entries have been
884 * removed, walk through the list of pending connections
885 * and ensure that any new device gets programmed into
888 * If the list of the devices is larger than the list of
889 * available accept list entries in the controller, then
890 * just abort and return filer policy value to not use the
893 list_for_each_entry(params, &hdev->pend_le_conns, action) {
894 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
898 /* After adding all new pending connections, walk through
899 * the list of pending reports and also add these to the
900 * accept list if there is still space. Abort if space runs out.
902 list_for_each_entry(params, &hdev->pend_le_reports, action) {
903 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
907 /* Use the allowlist unless the following conditions are all true:
908 * - We are not currently suspending
909 * - There are 1 or more ADV monitors registered
910 * - Interleaved scanning is not currently using the allowlist
912 * Once the controller offloading of advertisement monitor is in place,
913 * the above condition should include the support of MSFT extension
916 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
917 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
920 /* Select filter policy to use accept list */
924 static bool scan_use_rpa(struct hci_dev *hdev)
926 return hci_dev_test_flag(hdev, HCI_PRIVACY);
929 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
930 u16 window, u8 own_addr_type, u8 filter_policy,
933 struct hci_dev *hdev = req->hdev;
935 if (hdev->scanning_paused) {
936 bt_dev_dbg(hdev, "Scanning is paused for suspend");
940 if (use_ll_privacy(hdev) &&
941 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
945 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
948 /* Use ext scanning if set ext scan param and ext scan enable is
951 if (use_ext_scan(hdev)) {
952 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
953 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
954 struct hci_cp_le_scan_phy_params *phy_params;
955 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
958 ext_param_cp = (void *)data;
959 phy_params = (void *)ext_param_cp->data;
961 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
962 ext_param_cp->own_addr_type = own_addr_type;
963 ext_param_cp->filter_policy = filter_policy;
965 plen = sizeof(*ext_param_cp);
967 if (scan_1m(hdev) || scan_2m(hdev)) {
968 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
970 memset(phy_params, 0, sizeof(*phy_params));
971 phy_params->type = type;
972 phy_params->interval = cpu_to_le16(interval);
973 phy_params->window = cpu_to_le16(window);
975 plen += sizeof(*phy_params);
979 if (scan_coded(hdev)) {
980 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
982 memset(phy_params, 0, sizeof(*phy_params));
983 phy_params->type = type;
984 phy_params->interval = cpu_to_le16(interval);
985 phy_params->window = cpu_to_le16(window);
987 plen += sizeof(*phy_params);
991 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
994 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
995 ext_enable_cp.enable = LE_SCAN_ENABLE;
996 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
998 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
999 sizeof(ext_enable_cp), &ext_enable_cp);
1001 struct hci_cp_le_set_scan_param param_cp;
1002 struct hci_cp_le_set_scan_enable enable_cp;
1004 memset(¶m_cp, 0, sizeof(param_cp));
1005 param_cp.type = type;
1006 param_cp.interval = cpu_to_le16(interval);
1007 param_cp.window = cpu_to_le16(window);
1008 param_cp.own_address_type = own_addr_type;
1009 param_cp.filter_policy = filter_policy;
1010 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1013 memset(&enable_cp, 0, sizeof(enable_cp));
1014 enable_cp.enable = LE_SCAN_ENABLE;
1015 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1016 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1021 /* Returns true if an le connection is in the scanning state */
1022 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1024 struct hci_conn_hash *h = &hdev->conn_hash;
1029 list_for_each_entry_rcu(c, &h->list, list) {
1030 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1031 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1042 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
1043 * controller based address resolution to be able to reconfigure
1046 void hci_req_add_le_passive_scan(struct hci_request *req)
1048 struct hci_dev *hdev = req->hdev;
1051 u16 window, interval;
1052 /* Background scanning should run with address resolution */
1053 bool addr_resolv = true;
1055 if (hdev->scanning_paused) {
1056 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1060 /* Set require_privacy to false since no SCAN_REQ are send
1061 * during passive scanning. Not using an non-resolvable address
1062 * here is important so that peer devices using direct
1063 * advertising with our address will be correctly reported
1064 * by the controller.
1066 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
1070 if (__hci_update_interleaved_scan(hdev))
1073 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
1074 /* Adding or removing entries from the accept list must
1075 * happen before enabling scanning. The controller does
1076 * not allow accept list modification while scanning.
1078 filter_policy = update_accept_list(req);
1080 /* When the controller is using random resolvable addresses and
1081 * with that having LE privacy enabled, then controllers with
1082 * Extended Scanner Filter Policies support can now enable support
1083 * for handling directed advertising.
1085 * So instead of using filter polices 0x00 (no accept list)
1086 * and 0x01 (accept list enabled) use the new filter policies
1087 * 0x02 (no accept list) and 0x03 (accept list enabled).
1089 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
1090 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
1091 filter_policy |= 0x02;
1093 if (hdev->suspended) {
1094 window = hdev->le_scan_window_suspend;
1095 interval = hdev->le_scan_int_suspend;
1096 } else if (hci_is_le_conn_scanning(hdev)) {
1097 window = hdev->le_scan_window_connect;
1098 interval = hdev->le_scan_int_connect;
1099 } else if (hci_is_adv_monitoring(hdev)) {
1100 window = hdev->le_scan_window_adv_monitor;
1101 interval = hdev->le_scan_int_adv_monitor;
1103 window = hdev->le_scan_window;
1104 interval = hdev->le_scan_interval;
1107 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
1109 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
1110 own_addr_type, filter_policy, addr_resolv);
1113 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1115 struct adv_info *adv_instance;
1117 /* Instance 0x00 always set local name */
1118 if (instance == 0x00)
1121 adv_instance = hci_find_adv_instance(hdev, instance);
1125 if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
1126 adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1129 return adv_instance->scan_rsp_len;
1132 static void hci_req_clear_event_filter(struct hci_request *req)
1134 struct hci_cp_set_event_filter f;
1136 memset(&f, 0, sizeof(f));
1137 f.flt_type = HCI_FLT_CLEAR_ALL;
1138 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1140 /* Update page scan state (since we may have modified it when setting
1141 * the event filter).
1143 __hci_req_update_scan(req);
1146 static void hci_req_set_event_filter(struct hci_request *req)
1148 struct bdaddr_list_with_flags *b;
1149 struct hci_cp_set_event_filter f;
1150 struct hci_dev *hdev = req->hdev;
1151 u8 scan = SCAN_DISABLED;
1153 /* Always clear event filter when starting */
1154 hci_req_clear_event_filter(req);
1156 list_for_each_entry(b, &hdev->accept_list, list) {
1157 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1161 memset(&f, 0, sizeof(f));
1162 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1163 f.flt_type = HCI_FLT_CONN_SETUP;
1164 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1165 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1167 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1168 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
1172 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1175 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1177 /* Before changing params disable scan if enabled */
1178 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1179 hci_req_add_le_scan_disable(req, false);
1181 /* Configure params and enable scanning */
1182 hci_req_add_le_passive_scan(req);
1184 /* Block suspend notifier on response */
1185 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1188 static void cancel_adv_timeout(struct hci_dev *hdev)
1190 if (hdev->adv_instance_timeout) {
1191 hdev->adv_instance_timeout = 0;
1192 cancel_delayed_work(&hdev->adv_instance_expire);
1196 /* This function requires the caller holds hdev->lock */
1197 static void hci_suspend_adv_instances(struct hci_request *req)
1199 bt_dev_dbg(req->hdev, "Suspending advertising instances");
1201 /* Call to disable any advertisements active on the controller.
1202 * This will succeed even if no advertisements are configured.
1204 __hci_req_disable_advertising(req);
1206 /* If we are using software rotation, pause the loop */
1207 if (!ext_adv_capable(req->hdev))
1208 cancel_adv_timeout(req->hdev);
1211 /* This function requires the caller holds hdev->lock */
1212 static void hci_resume_adv_instances(struct hci_request *req)
1214 struct adv_info *adv;
1216 bt_dev_dbg(req->hdev, "Resuming advertising instances");
1218 if (ext_adv_capable(req->hdev)) {
1219 /* Call for each tracked instance to be re-enabled */
1220 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
1221 __hci_req_enable_ext_advertising(req,
1226 /* Schedule for most recent instance to be restarted and begin
1227 * the software rotation loop
1229 __hci_req_schedule_adv_instance(req,
1230 req->hdev->cur_adv_instance,
1235 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1237 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1239 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1240 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1241 wake_up(&hdev->suspend_wait_q);
1245 /* Call with hci_dev_lock */
1246 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1249 struct hci_conn *conn;
1250 struct hci_request req;
1252 int disconnect_counter;
1254 if (next == hdev->suspend_state) {
1255 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1259 hdev->suspend_state = next;
1260 hci_req_init(&req, hdev);
1262 if (next == BT_SUSPEND_DISCONNECT) {
1263 /* Mark device as suspended */
1264 hdev->suspended = true;
1266 /* Pause discovery if not already stopped */
1267 old_state = hdev->discovery.state;
1268 if (old_state != DISCOVERY_STOPPED) {
1269 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1270 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1271 queue_work(hdev->req_workqueue, &hdev->discov_update);
1274 hdev->discovery_paused = true;
1275 hdev->discovery_old_state = old_state;
1277 /* Stop directed advertising */
1278 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1280 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1281 cancel_delayed_work(&hdev->discov_off);
1282 queue_delayed_work(hdev->req_workqueue,
1283 &hdev->discov_off, 0);
1286 /* Pause other advertisements */
1287 if (hdev->adv_instance_cnt)
1288 hci_suspend_adv_instances(&req);
1290 hdev->advertising_paused = true;
1291 hdev->advertising_old_state = old_state;
1292 /* Disable page scan */
1293 page_scan = SCAN_DISABLED;
1294 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1296 /* Disable LE passive scan if enabled */
1297 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1298 hci_req_add_le_scan_disable(&req, false);
1300 /* Mark task needing completion */
1301 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1303 /* Prevent disconnects from causing scanning to be re-enabled */
1304 hdev->scanning_paused = true;
1306 /* Run commands before disconnecting */
1307 hci_req_run(&req, suspend_req_complete);
1309 disconnect_counter = 0;
1310 /* Soft disconnect everything (power off) */
1311 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1312 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1313 disconnect_counter++;
1316 if (disconnect_counter > 0) {
1318 "Had %d disconnects. Will wait on them",
1319 disconnect_counter);
1320 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1322 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1323 /* Unpause to take care of updating scanning params */
1324 hdev->scanning_paused = false;
1325 /* Enable event filter for paired devices */
1326 hci_req_set_event_filter(&req);
1327 /* Enable passive scan at lower duty cycle */
1328 hci_req_config_le_suspend_scan(&req);
1329 /* Pause scan changes again. */
1330 hdev->scanning_paused = true;
1331 hci_req_run(&req, suspend_req_complete);
1333 hdev->suspended = false;
1334 hdev->scanning_paused = false;
1336 hci_req_clear_event_filter(&req);
1337 /* Reset passive/background scanning to normal */
1338 hci_req_config_le_suspend_scan(&req);
1340 /* Unpause directed advertising */
1341 hdev->advertising_paused = false;
1342 if (hdev->advertising_old_state) {
1343 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1344 hdev->suspend_tasks);
1345 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1346 queue_work(hdev->req_workqueue,
1347 &hdev->discoverable_update);
1348 hdev->advertising_old_state = 0;
1351 /* Resume other advertisements */
1352 if (hdev->adv_instance_cnt)
1353 hci_resume_adv_instances(&req);
1355 /* Unpause discovery */
1356 hdev->discovery_paused = false;
1357 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1358 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1359 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1360 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1361 queue_work(hdev->req_workqueue, &hdev->discov_update);
1364 hci_req_run(&req, suspend_req_complete);
1367 hdev->suspend_state = next;
1370 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1371 wake_up(&hdev->suspend_wait_q);
1374 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1376 u8 instance = hdev->cur_adv_instance;
1377 struct adv_info *adv_instance;
1379 /* Instance 0x00 always set local name */
1380 if (instance == 0x00)
1383 adv_instance = hci_find_adv_instance(hdev, instance);
1387 /* TODO: Take into account the "appearance" and "local-name" flags here.
1388 * These are currently being ignored as they are not supported.
1390 return adv_instance->scan_rsp_len;
1393 void __hci_req_disable_advertising(struct hci_request *req)
1395 if (ext_adv_capable(req->hdev)) {
1396 __hci_req_disable_ext_adv_instance(req, 0x00);
1401 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1405 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1408 struct adv_info *adv_instance;
1410 if (instance == 0x00) {
1411 /* Instance 0 always manages the "Tx Power" and "Flags"
1414 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1416 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1417 * corresponds to the "connectable" instance flag.
1419 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1420 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1422 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1423 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1424 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1425 flags |= MGMT_ADV_FLAG_DISCOV;
1430 adv_instance = hci_find_adv_instance(hdev, instance);
1432 /* Return 0 when we got an invalid instance identifier. */
1436 return adv_instance->flags;
1439 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1441 /* If privacy is not enabled don't use RPA */
1442 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1445 /* If basic privacy mode is enabled use RPA */
1446 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1449 /* If limited privacy mode is enabled don't use RPA if we're
1450 * both discoverable and bondable.
1452 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1453 hci_dev_test_flag(hdev, HCI_BONDABLE))
1456 /* We're neither bondable nor discoverable in the limited
1457 * privacy mode, therefore use RPA.
1462 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1464 /* If there is no connection we are OK to advertise. */
1465 if (hci_conn_num(hdev, LE_LINK) == 0)
1468 /* Check le_states if there is any connection in slave role. */
1469 if (hdev->conn_hash.le_num_slave > 0) {
1470 /* Slave connection state and non connectable mode bit 20. */
1471 if (!connectable && !(hdev->le_states[2] & 0x10))
1474 /* Slave connection state and connectable mode bit 38
1475 * and scannable bit 21.
1477 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1478 !(hdev->le_states[2] & 0x20)))
1482 /* Check le_states if there is any connection in master role. */
1483 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1484 /* Master connection state and non connectable mode bit 18. */
1485 if (!connectable && !(hdev->le_states[2] & 0x02))
1488 /* Master connection state and connectable mode bit 35 and
1491 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1492 !(hdev->le_states[2] & 0x08)))
1499 void __hci_req_enable_advertising(struct hci_request *req)
1501 struct hci_dev *hdev = req->hdev;
1502 struct hci_cp_le_set_adv_param cp;
1503 u8 own_addr_type, enable = 0x01;
1505 u16 adv_min_interval, adv_max_interval;
1508 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1510 /* If the "connectable" instance flag was not set, then choose between
1511 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1513 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1514 mgmt_get_connectable(hdev);
1516 if (!is_advertising_allowed(hdev, connectable))
1519 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1520 __hci_req_disable_advertising(req);
1522 /* Clear the HCI_LE_ADV bit temporarily so that the
1523 * hci_update_random_address knows that it's safe to go ahead
1524 * and write a new random address. The flag will be set back on
1525 * as soon as the SET_ADV_ENABLE HCI command completes.
1527 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1529 /* Set require_privacy to true only when non-connectable
1530 * advertising is used. In that case it is fine to use a
1531 * non-resolvable private address.
1533 if (hci_update_random_address(req, !connectable,
1534 adv_use_rpa(hdev, flags),
1535 &own_addr_type) < 0)
1538 memset(&cp, 0, sizeof(cp));
1541 cp.type = LE_ADV_IND;
1543 adv_min_interval = hdev->le_adv_min_interval;
1544 adv_max_interval = hdev->le_adv_max_interval;
1546 if (get_cur_adv_instance_scan_rsp_len(hdev))
1547 cp.type = LE_ADV_SCAN_IND;
1549 cp.type = LE_ADV_NONCONN_IND;
1551 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1552 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1553 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1554 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1556 adv_min_interval = hdev->le_adv_min_interval;
1557 adv_max_interval = hdev->le_adv_max_interval;
1561 cp.min_interval = cpu_to_le16(adv_min_interval);
1562 cp.max_interval = cpu_to_le16(adv_max_interval);
1563 cp.own_address_type = own_addr_type;
1564 cp.channel_map = hdev->le_adv_channel_map;
1566 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1568 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1571 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1574 size_t complete_len;
1576 /* no space left for name (+ NULL + type + len) */
1577 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1580 /* use complete name if present and fits */
1581 complete_len = strlen(hdev->dev_name);
1582 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1583 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1584 hdev->dev_name, complete_len + 1);
1586 /* use short name if present */
1587 short_len = strlen(hdev->short_name);
1589 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1590 hdev->short_name, short_len + 1);
1592 /* use shortened full name if present, we already know that name
1593 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1596 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1598 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1599 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1601 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1608 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1610 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1613 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1615 u8 scan_rsp_len = 0;
1617 if (hdev->appearance) {
1618 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1621 return append_local_name(hdev, ptr, scan_rsp_len);
1624 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1627 struct adv_info *adv_instance;
1629 u8 scan_rsp_len = 0;
1631 adv_instance = hci_find_adv_instance(hdev, instance);
1635 instance_flags = adv_instance->flags;
1637 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1638 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1641 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1642 adv_instance->scan_rsp_len);
1644 scan_rsp_len += adv_instance->scan_rsp_len;
1646 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1647 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1649 return scan_rsp_len;
1652 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1654 struct hci_dev *hdev = req->hdev;
1657 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1660 if (ext_adv_capable(hdev)) {
1662 struct hci_cp_le_set_ext_scan_rsp_data cp;
1663 u8 data[HCI_MAX_EXT_AD_LENGTH];
1666 memset(&pdu, 0, sizeof(pdu));
1669 len = create_instance_scan_rsp_data(hdev, instance,
1672 len = create_default_scan_rsp_data(hdev, pdu.data);
1674 if (hdev->scan_rsp_data_len == len &&
1675 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1678 memcpy(hdev->scan_rsp_data, pdu.data, len);
1679 hdev->scan_rsp_data_len = len;
1681 pdu.cp.handle = instance;
1682 pdu.cp.length = len;
1683 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1684 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1686 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1687 sizeof(pdu.cp) + len, &pdu.cp);
1689 struct hci_cp_le_set_scan_rsp_data cp;
1691 memset(&cp, 0, sizeof(cp));
1694 len = create_instance_scan_rsp_data(hdev, instance,
1697 len = create_default_scan_rsp_data(hdev, cp.data);
1699 if (hdev->scan_rsp_data_len == len &&
1700 !memcmp(cp.data, hdev->scan_rsp_data, len))
1703 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1704 hdev->scan_rsp_data_len = len;
1708 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1712 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1714 struct adv_info *adv_instance = NULL;
1715 u8 ad_len = 0, flags = 0;
1718 /* Return 0 when the current instance identifier is invalid. */
1720 adv_instance = hci_find_adv_instance(hdev, instance);
1725 instance_flags = get_adv_instance_flags(hdev, instance);
1727 /* If instance already has the flags set skip adding it once
1730 if (adv_instance && eir_get_data(adv_instance->adv_data,
1731 adv_instance->adv_data_len, EIR_FLAGS,
1735 /* The Add Advertising command allows userspace to set both the general
1736 * and limited discoverable flags.
1738 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1739 flags |= LE_AD_GENERAL;
1741 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1742 flags |= LE_AD_LIMITED;
1744 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1745 flags |= LE_AD_NO_BREDR;
1747 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1748 /* If a discovery flag wasn't provided, simply use the global
1752 flags |= mgmt_get_adv_discov_flags(hdev);
1754 /* If flags would still be empty, then there is no need to
1755 * include the "Flags" AD field".
1769 memcpy(ptr, adv_instance->adv_data,
1770 adv_instance->adv_data_len);
1771 ad_len += adv_instance->adv_data_len;
1772 ptr += adv_instance->adv_data_len;
1775 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1778 if (ext_adv_capable(hdev)) {
1780 adv_tx_power = adv_instance->tx_power;
1782 adv_tx_power = hdev->adv_tx_power;
1784 adv_tx_power = hdev->adv_tx_power;
1787 /* Provide Tx Power only if we can provide a valid value for it */
1788 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1790 ptr[1] = EIR_TX_POWER;
1791 ptr[2] = (u8)adv_tx_power;
1801 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1803 struct hci_dev *hdev = req->hdev;
1806 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1809 if (ext_adv_capable(hdev)) {
1811 struct hci_cp_le_set_ext_adv_data cp;
1812 u8 data[HCI_MAX_EXT_AD_LENGTH];
1815 memset(&pdu, 0, sizeof(pdu));
1817 len = create_instance_adv_data(hdev, instance, pdu.data);
1819 /* There's nothing to do if the data hasn't changed */
1820 if (hdev->adv_data_len == len &&
1821 memcmp(pdu.data, hdev->adv_data, len) == 0)
1824 memcpy(hdev->adv_data, pdu.data, len);
1825 hdev->adv_data_len = len;
1827 pdu.cp.length = len;
1828 pdu.cp.handle = instance;
1829 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1830 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1832 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1833 sizeof(pdu.cp) + len, &pdu.cp);
1835 struct hci_cp_le_set_adv_data cp;
1837 memset(&cp, 0, sizeof(cp));
1839 len = create_instance_adv_data(hdev, instance, cp.data);
1841 /* There's nothing to do if the data hasn't changed */
1842 if (hdev->adv_data_len == len &&
1843 memcmp(cp.data, hdev->adv_data, len) == 0)
1846 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1847 hdev->adv_data_len = len;
1851 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1855 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1857 struct hci_request req;
1859 hci_req_init(&req, hdev);
1860 __hci_req_update_adv_data(&req, instance);
1862 return hci_req_run(&req, NULL);
1865 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1868 BT_DBG("%s status %u", hdev->name, status);
1871 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1873 struct hci_request req;
1876 if (!use_ll_privacy(hdev) &&
1877 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1880 hci_req_init(&req, hdev);
1882 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1884 hci_req_run(&req, enable_addr_resolution_complete);
1887 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1889 BT_DBG("%s status %u", hdev->name, status);
1892 void hci_req_reenable_advertising(struct hci_dev *hdev)
1894 struct hci_request req;
1896 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1897 list_empty(&hdev->adv_instances))
1900 hci_req_init(&req, hdev);
1902 if (hdev->cur_adv_instance) {
1903 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1906 if (ext_adv_capable(hdev)) {
1907 __hci_req_start_ext_adv(&req, 0x00);
1909 __hci_req_update_adv_data(&req, 0x00);
1910 __hci_req_update_scan_rsp_data(&req, 0x00);
1911 __hci_req_enable_advertising(&req);
1915 hci_req_run(&req, adv_enable_complete);
1918 static void adv_timeout_expire(struct work_struct *work)
1920 struct hci_dev *hdev = container_of(work, struct hci_dev,
1921 adv_instance_expire.work);
1923 struct hci_request req;
1926 BT_DBG("%s", hdev->name);
1930 hdev->adv_instance_timeout = 0;
1932 instance = hdev->cur_adv_instance;
1933 if (instance == 0x00)
1936 hci_req_init(&req, hdev);
1938 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1940 if (list_empty(&hdev->adv_instances))
1941 __hci_req_disable_advertising(&req);
1943 hci_req_run(&req, NULL);
1946 hci_dev_unlock(hdev);
1949 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1952 struct hci_dev *hdev = req->hdev;
1957 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1958 hci_req_add_le_scan_disable(req, false);
1959 hci_req_add_le_passive_scan(req);
1961 switch (hdev->interleave_scan_state) {
1962 case INTERLEAVE_SCAN_ALLOWLIST:
1963 bt_dev_dbg(hdev, "next state: allowlist");
1964 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1966 case INTERLEAVE_SCAN_NO_FILTER:
1967 bt_dev_dbg(hdev, "next state: no filter");
1968 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1970 case INTERLEAVE_SCAN_NONE:
1971 BT_ERR("unexpected error");
1975 hci_dev_unlock(hdev);
1980 static void interleave_scan_work(struct work_struct *work)
1982 struct hci_dev *hdev = container_of(work, struct hci_dev,
1983 interleave_scan.work);
1985 unsigned long timeout;
1987 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1988 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1989 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1990 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1992 bt_dev_err(hdev, "unexpected error");
1996 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1997 HCI_CMD_TIMEOUT, &status);
1999 /* Don't continue interleaving if it was canceled */
2000 if (is_interleave_scanning(hdev))
2001 queue_delayed_work(hdev->req_workqueue,
2002 &hdev->interleave_scan, timeout);
2005 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
2006 bool use_rpa, struct adv_info *adv_instance,
2007 u8 *own_addr_type, bdaddr_t *rand_addr)
2011 bacpy(rand_addr, BDADDR_ANY);
2013 /* If privacy is enabled use a resolvable private address. If
2014 * current RPA has expired then generate a new one.
2019 /* If Controller supports LL Privacy use own address type is
2022 if (use_ll_privacy(hdev))
2023 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2025 *own_addr_type = ADDR_LE_DEV_RANDOM;
2028 if (!adv_instance->rpa_expired &&
2029 !bacmp(&adv_instance->random_addr, &hdev->rpa))
2032 adv_instance->rpa_expired = false;
2034 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2035 !bacmp(&hdev->random_addr, &hdev->rpa))
2039 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2041 bt_dev_err(hdev, "failed to generate new RPA");
2045 bacpy(rand_addr, &hdev->rpa);
2047 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2049 queue_delayed_work(hdev->workqueue,
2050 &adv_instance->rpa_expired_cb, to);
2052 queue_delayed_work(hdev->workqueue,
2053 &hdev->rpa_expired, to);
2058 /* In case of required privacy without resolvable private address,
2059 * use an non-resolvable private address. This is useful for
2060 * non-connectable advertising.
2062 if (require_privacy) {
2066 /* The non-resolvable private address is generated
2067 * from random six bytes with the two most significant
2070 get_random_bytes(&nrpa, 6);
2073 /* The non-resolvable private address shall not be
2074 * equal to the public address.
2076 if (bacmp(&hdev->bdaddr, &nrpa))
2080 *own_addr_type = ADDR_LE_DEV_RANDOM;
2081 bacpy(rand_addr, &nrpa);
2086 /* No privacy so use a public address. */
2087 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2092 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
2094 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
2097 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
2099 struct hci_cp_le_set_ext_adv_params cp;
2100 struct hci_dev *hdev = req->hdev;
2103 bdaddr_t random_addr;
2106 struct adv_info *adv_instance;
2110 adv_instance = hci_find_adv_instance(hdev, instance);
2114 adv_instance = NULL;
2117 flags = get_adv_instance_flags(hdev, instance);
2119 /* If the "connectable" instance flag was not set, then choose between
2120 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
2122 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
2123 mgmt_get_connectable(hdev);
2125 if (!is_advertising_allowed(hdev, connectable))
2128 /* Set require_privacy to true only when non-connectable
2129 * advertising is used. In that case it is fine to use a
2130 * non-resolvable private address.
2132 err = hci_get_random_address(hdev, !connectable,
2133 adv_use_rpa(hdev, flags), adv_instance,
2134 &own_addr_type, &random_addr);
2138 memset(&cp, 0, sizeof(cp));
2140 /* In ext adv set param interval is 3 octets */
2141 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
2142 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
2144 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
2148 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
2150 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
2151 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
2153 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
2155 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
2158 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
2160 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
2163 cp.own_addr_type = own_addr_type;
2164 cp.channel_map = hdev->le_adv_channel_map;
2166 cp.handle = instance;
2168 if (flags & MGMT_ADV_FLAG_SEC_2M) {
2169 cp.primary_phy = HCI_ADV_PHY_1M;
2170 cp.secondary_phy = HCI_ADV_PHY_2M;
2171 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
2172 cp.primary_phy = HCI_ADV_PHY_CODED;
2173 cp.secondary_phy = HCI_ADV_PHY_CODED;
2175 /* In all other cases use 1M */
2176 cp.primary_phy = HCI_ADV_PHY_1M;
2177 cp.secondary_phy = HCI_ADV_PHY_1M;
2180 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
2182 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
2183 bacmp(&random_addr, BDADDR_ANY)) {
2184 struct hci_cp_le_set_adv_set_rand_addr cp;
2186 /* Check if random address need to be updated */
2188 if (!bacmp(&random_addr, &adv_instance->random_addr))
2191 if (!bacmp(&random_addr, &hdev->random_addr))
2195 memset(&cp, 0, sizeof(cp));
2197 cp.handle = instance;
2198 bacpy(&cp.bdaddr, &random_addr);
2201 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
2208 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
2210 struct hci_dev *hdev = req->hdev;
2211 struct hci_cp_le_set_ext_adv_enable *cp;
2212 struct hci_cp_ext_adv_set *adv_set;
2213 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2214 struct adv_info *adv_instance;
2217 adv_instance = hci_find_adv_instance(hdev, instance);
2221 adv_instance = NULL;
2225 adv_set = (void *) cp->data;
2227 memset(cp, 0, sizeof(*cp));
2230 cp->num_of_sets = 0x01;
2232 memset(adv_set, 0, sizeof(*adv_set));
2234 adv_set->handle = instance;
2236 /* Set duration per instance since controller is responsible for
2239 if (adv_instance && adv_instance->timeout) {
2240 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
2242 /* Time = N * 10 ms */
2243 adv_set->duration = cpu_to_le16(duration / 10);
2246 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
2247 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
2253 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2255 struct hci_dev *hdev = req->hdev;
2256 struct hci_cp_le_set_ext_adv_enable *cp;
2257 struct hci_cp_ext_adv_set *adv_set;
2258 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2261 /* If request specifies an instance that doesn't exist, fail */
2262 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2265 memset(data, 0, sizeof(data));
2268 adv_set = (void *)cp->data;
2270 /* Instance 0x00 indicates all advertising instances will be disabled */
2271 cp->num_of_sets = !!instance;
2274 adv_set->handle = instance;
2276 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2277 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2282 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2284 struct hci_dev *hdev = req->hdev;
2286 /* If request specifies an instance that doesn't exist, fail */
2287 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2290 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2295 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2297 struct hci_dev *hdev = req->hdev;
2298 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
2301 /* If instance isn't pending, the chip knows about it, and it's safe to
2304 if (adv_instance && !adv_instance->pending)
2305 __hci_req_disable_ext_adv_instance(req, instance);
2307 err = __hci_req_setup_ext_adv_instance(req, instance);
2311 __hci_req_update_scan_rsp_data(req, instance);
2312 __hci_req_enable_ext_advertising(req, instance);
2317 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2320 struct hci_dev *hdev = req->hdev;
2321 struct adv_info *adv_instance = NULL;
2324 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2325 list_empty(&hdev->adv_instances))
2328 if (hdev->adv_instance_timeout)
2331 adv_instance = hci_find_adv_instance(hdev, instance);
2335 /* A zero timeout means unlimited advertising. As long as there is
2336 * only one instance, duration should be ignored. We still set a timeout
2337 * in case further instances are being added later on.
2339 * If the remaining lifetime of the instance is more than the duration
2340 * then the timeout corresponds to the duration, otherwise it will be
2341 * reduced to the remaining instance lifetime.
2343 if (adv_instance->timeout == 0 ||
2344 adv_instance->duration <= adv_instance->remaining_time)
2345 timeout = adv_instance->duration;
2347 timeout = adv_instance->remaining_time;
2349 /* The remaining time is being reduced unless the instance is being
2350 * advertised without time limit.
2352 if (adv_instance->timeout)
2353 adv_instance->remaining_time =
2354 adv_instance->remaining_time - timeout;
2356 /* Only use work for scheduling instances with legacy advertising */
2357 if (!ext_adv_capable(hdev)) {
2358 hdev->adv_instance_timeout = timeout;
2359 queue_delayed_work(hdev->req_workqueue,
2360 &hdev->adv_instance_expire,
2361 msecs_to_jiffies(timeout * 1000));
2364 /* If we're just re-scheduling the same instance again then do not
2365 * execute any HCI commands. This happens when a single instance is
2368 if (!force && hdev->cur_adv_instance == instance &&
2369 hci_dev_test_flag(hdev, HCI_LE_ADV))
2372 hdev->cur_adv_instance = instance;
2373 if (ext_adv_capable(hdev)) {
2374 __hci_req_start_ext_adv(req, instance);
2376 __hci_req_update_adv_data(req, instance);
2377 __hci_req_update_scan_rsp_data(req, instance);
2378 __hci_req_enable_advertising(req);
2384 /* For a single instance:
2385 * - force == true: The instance will be removed even when its remaining
2386 * lifetime is not zero.
2387 * - force == false: the instance will be deactivated but kept stored unless
2388 * the remaining lifetime is zero.
2390 * For instance == 0x00:
2391 * - force == true: All instances will be removed regardless of their timeout
2393 * - force == false: Only instances that have a timeout will be removed.
2395 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2396 struct hci_request *req, u8 instance,
2399 struct adv_info *adv_instance, *n, *next_instance = NULL;
2403 /* Cancel any timeout concerning the removed instance(s). */
2404 if (!instance || hdev->cur_adv_instance == instance)
2405 cancel_adv_timeout(hdev);
2407 /* Get the next instance to advertise BEFORE we remove
2408 * the current one. This can be the same instance again
2409 * if there is only one instance.
2411 if (instance && hdev->cur_adv_instance == instance)
2412 next_instance = hci_get_next_instance(hdev, instance);
2414 if (instance == 0x00) {
2415 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2417 if (!(force || adv_instance->timeout))
2420 rem_inst = adv_instance->instance;
2421 err = hci_remove_adv_instance(hdev, rem_inst);
2423 mgmt_advertising_removed(sk, hdev, rem_inst);
2426 adv_instance = hci_find_adv_instance(hdev, instance);
2428 if (force || (adv_instance && adv_instance->timeout &&
2429 !adv_instance->remaining_time)) {
2430 /* Don't advertise a removed instance. */
2431 if (next_instance &&
2432 next_instance->instance == instance)
2433 next_instance = NULL;
2435 err = hci_remove_adv_instance(hdev, instance);
2437 mgmt_advertising_removed(sk, hdev, instance);
2441 if (!req || !hdev_is_powered(hdev) ||
2442 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2445 if (next_instance && !ext_adv_capable(hdev))
2446 __hci_req_schedule_adv_instance(req, next_instance->instance,
2450 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2452 struct hci_dev *hdev = req->hdev;
2454 /* If we're advertising or initiating an LE connection we can't
2455 * go ahead and change the random address at this time. This is
2456 * because the eventual initiator address used for the
2457 * subsequently created connection will be undefined (some
2458 * controllers use the new address and others the one we had
2459 * when the operation started).
2461 * In this kind of scenario skip the update and let the random
2462 * address be updated at the next cycle.
2464 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2465 hci_lookup_le_connect(hdev)) {
2466 BT_DBG("Deferring random address update");
2467 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2471 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2474 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2475 bool use_rpa, u8 *own_addr_type)
2477 struct hci_dev *hdev = req->hdev;
2480 /* If privacy is enabled use a resolvable private address. If
2481 * current RPA has expired or there is something else than
2482 * the current RPA in use, then generate a new one.
2487 /* If Controller supports LL Privacy use own address type is
2490 if (use_ll_privacy(hdev))
2491 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2493 *own_addr_type = ADDR_LE_DEV_RANDOM;
2495 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2496 !bacmp(&hdev->random_addr, &hdev->rpa))
2499 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2501 bt_dev_err(hdev, "failed to generate new RPA");
2505 set_random_addr(req, &hdev->rpa);
2507 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2508 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2513 /* In case of required privacy without resolvable private address,
2514 * use an non-resolvable private address. This is useful for active
2515 * scanning and non-connectable advertising.
2517 if (require_privacy) {
2521 /* The non-resolvable private address is generated
2522 * from random six bytes with the two most significant
2525 get_random_bytes(&nrpa, 6);
2528 /* The non-resolvable private address shall not be
2529 * equal to the public address.
2531 if (bacmp(&hdev->bdaddr, &nrpa))
2535 *own_addr_type = ADDR_LE_DEV_RANDOM;
2536 set_random_addr(req, &nrpa);
2540 /* If forcing static address is in use or there is no public
2541 * address use the static address as random address (but skip
2542 * the HCI command if the current random address is already the
2545 * In case BR/EDR has been disabled on a dual-mode controller
2546 * and a static address has been configured, then use that
2547 * address instead of the public BR/EDR address.
2549 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2550 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2551 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2552 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2553 *own_addr_type = ADDR_LE_DEV_RANDOM;
2554 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2555 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2556 &hdev->static_addr);
2560 /* Neither privacy nor static address is being used so use a
2563 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2568 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2570 struct bdaddr_list *b;
2572 list_for_each_entry(b, &hdev->accept_list, list) {
2573 struct hci_conn *conn;
2575 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2579 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2586 void __hci_req_update_scan(struct hci_request *req)
2588 struct hci_dev *hdev = req->hdev;
2591 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2594 if (!hdev_is_powered(hdev))
2597 if (mgmt_powering_down(hdev))
2600 if (hdev->scanning_paused)
2603 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2604 disconnected_accept_list_entries(hdev))
2607 scan = SCAN_DISABLED;
2609 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2610 scan |= SCAN_INQUIRY;
2612 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2613 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2616 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2619 static int update_scan(struct hci_request *req, unsigned long opt)
2621 hci_dev_lock(req->hdev);
2622 __hci_req_update_scan(req);
2623 hci_dev_unlock(req->hdev);
2627 static void scan_update_work(struct work_struct *work)
2629 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2631 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2634 static int connectable_update(struct hci_request *req, unsigned long opt)
2636 struct hci_dev *hdev = req->hdev;
2640 __hci_req_update_scan(req);
2642 /* If BR/EDR is not enabled and we disable advertising as a
2643 * by-product of disabling connectable, we need to update the
2644 * advertising flags.
2646 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2647 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2649 /* Update the advertising parameters if necessary */
2650 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2651 !list_empty(&hdev->adv_instances)) {
2652 if (ext_adv_capable(hdev))
2653 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2655 __hci_req_enable_advertising(req);
2658 __hci_update_background_scan(req);
2660 hci_dev_unlock(hdev);
2665 static void connectable_update_work(struct work_struct *work)
2667 struct hci_dev *hdev = container_of(work, struct hci_dev,
2668 connectable_update);
2671 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2672 mgmt_set_connectable_complete(hdev, status);
2675 static u8 get_service_classes(struct hci_dev *hdev)
2677 struct bt_uuid *uuid;
2680 list_for_each_entry(uuid, &hdev->uuids, list)
2681 val |= uuid->svc_hint;
2686 void __hci_req_update_class(struct hci_request *req)
2688 struct hci_dev *hdev = req->hdev;
2691 BT_DBG("%s", hdev->name);
2693 if (!hdev_is_powered(hdev))
2696 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2699 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2702 cod[0] = hdev->minor_class;
2703 cod[1] = hdev->major_class;
2704 cod[2] = get_service_classes(hdev);
2706 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2709 if (memcmp(cod, hdev->dev_class, 3) == 0)
2712 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2715 static void write_iac(struct hci_request *req)
2717 struct hci_dev *hdev = req->hdev;
2718 struct hci_cp_write_current_iac_lap cp;
2720 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2723 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2724 /* Limited discoverable mode */
2725 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2726 cp.iac_lap[0] = 0x00; /* LIAC */
2727 cp.iac_lap[1] = 0x8b;
2728 cp.iac_lap[2] = 0x9e;
2729 cp.iac_lap[3] = 0x33; /* GIAC */
2730 cp.iac_lap[4] = 0x8b;
2731 cp.iac_lap[5] = 0x9e;
2733 /* General discoverable mode */
2735 cp.iac_lap[0] = 0x33; /* GIAC */
2736 cp.iac_lap[1] = 0x8b;
2737 cp.iac_lap[2] = 0x9e;
2740 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2741 (cp.num_iac * 3) + 1, &cp);
2744 static int discoverable_update(struct hci_request *req, unsigned long opt)
2746 struct hci_dev *hdev = req->hdev;
2750 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2752 __hci_req_update_scan(req);
2753 __hci_req_update_class(req);
2756 /* Advertising instances don't use the global discoverable setting, so
2757 * only update AD if advertising was enabled using Set Advertising.
2759 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2760 __hci_req_update_adv_data(req, 0x00);
2762 /* Discoverable mode affects the local advertising
2763 * address in limited privacy mode.
2765 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2766 if (ext_adv_capable(hdev))
2767 __hci_req_start_ext_adv(req, 0x00);
2769 __hci_req_enable_advertising(req);
2773 hci_dev_unlock(hdev);
2778 static void discoverable_update_work(struct work_struct *work)
2780 struct hci_dev *hdev = container_of(work, struct hci_dev,
2781 discoverable_update);
2784 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2785 mgmt_set_discoverable_complete(hdev, status);
2788 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2791 switch (conn->state) {
2794 if (conn->type == AMP_LINK) {
2795 struct hci_cp_disconn_phy_link cp;
2797 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2799 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2802 struct hci_cp_disconnect dc;
2804 dc.handle = cpu_to_le16(conn->handle);
2806 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2809 conn->state = BT_DISCONN;
2813 if (conn->type == LE_LINK) {
2814 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2816 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2818 } else if (conn->type == ACL_LINK) {
2819 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2821 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2826 if (conn->type == ACL_LINK) {
2827 struct hci_cp_reject_conn_req rej;
2829 bacpy(&rej.bdaddr, &conn->dst);
2830 rej.reason = reason;
2832 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2834 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2835 struct hci_cp_reject_sync_conn_req rej;
2837 bacpy(&rej.bdaddr, &conn->dst);
2839 /* SCO rejection has its own limited set of
2840 * allowed error values (0x0D-0x0F) which isn't
2841 * compatible with most values passed to this
2842 * function. To be safe hard-code one of the
2843 * values that's suitable for SCO.
2845 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2847 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2852 conn->state = BT_CLOSED;
2857 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2860 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2863 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2865 struct hci_request req;
2868 hci_req_init(&req, conn->hdev);
2870 __hci_abort_conn(&req, conn, reason);
2872 err = hci_req_run(&req, abort_conn_complete);
2873 if (err && err != -ENODATA) {
2874 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2881 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2883 hci_dev_lock(req->hdev);
2884 __hci_update_background_scan(req);
2885 hci_dev_unlock(req->hdev);
2889 static void bg_scan_update(struct work_struct *work)
2891 struct hci_dev *hdev = container_of(work, struct hci_dev,
2893 struct hci_conn *conn;
2897 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2903 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2905 hci_le_conn_failed(conn, status);
2907 hci_dev_unlock(hdev);
2910 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2912 hci_req_add_le_scan_disable(req, false);
2916 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2919 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2920 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2921 struct hci_cp_inquiry cp;
2923 BT_DBG("%s", req->hdev->name);
2925 hci_dev_lock(req->hdev);
2926 hci_inquiry_cache_flush(req->hdev);
2927 hci_dev_unlock(req->hdev);
2929 memset(&cp, 0, sizeof(cp));
2931 if (req->hdev->discovery.limited)
2932 memcpy(&cp.lap, liac, sizeof(cp.lap));
2934 memcpy(&cp.lap, giac, sizeof(cp.lap));
2938 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2943 static void le_scan_disable_work(struct work_struct *work)
2945 struct hci_dev *hdev = container_of(work, struct hci_dev,
2946 le_scan_disable.work);
2949 BT_DBG("%s", hdev->name);
2951 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2954 cancel_delayed_work(&hdev->le_scan_restart);
2956 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2958 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2963 hdev->discovery.scan_start = 0;
2965 /* If we were running LE only scan, change discovery state. If
2966 * we were running both LE and BR/EDR inquiry simultaneously,
2967 * and BR/EDR inquiry is already finished, stop discovery,
2968 * otherwise BR/EDR inquiry will stop discovery when finished.
2969 * If we will resolve remote device name, do not change
2973 if (hdev->discovery.type == DISCOV_TYPE_LE)
2974 goto discov_stopped;
2976 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2979 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2980 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2981 hdev->discovery.state != DISCOVERY_RESOLVING)
2982 goto discov_stopped;
2987 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2988 HCI_CMD_TIMEOUT, &status);
2990 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2991 goto discov_stopped;
2998 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2999 hci_dev_unlock(hdev);
3002 static int le_scan_restart(struct hci_request *req, unsigned long opt)
3004 struct hci_dev *hdev = req->hdev;
3006 /* If controller is not scanning we are done. */
3007 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3010 if (hdev->scanning_paused) {
3011 bt_dev_dbg(hdev, "Scanning is paused for suspend");
3015 hci_req_add_le_scan_disable(req, false);
3017 if (use_ext_scan(hdev)) {
3018 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
3020 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
3021 ext_enable_cp.enable = LE_SCAN_ENABLE;
3022 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3024 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
3025 sizeof(ext_enable_cp), &ext_enable_cp);
3027 struct hci_cp_le_set_scan_enable cp;
3029 memset(&cp, 0, sizeof(cp));
3030 cp.enable = LE_SCAN_ENABLE;
3031 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3032 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3038 static void le_scan_restart_work(struct work_struct *work)
3040 struct hci_dev *hdev = container_of(work, struct hci_dev,
3041 le_scan_restart.work);
3042 unsigned long timeout, duration, scan_start, now;
3045 BT_DBG("%s", hdev->name);
3047 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
3049 bt_dev_err(hdev, "failed to restart LE scan: status %d",
3056 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3057 !hdev->discovery.scan_start)
3060 /* When the scan was started, hdev->le_scan_disable has been queued
3061 * after duration from scan_start. During scan restart this job
3062 * has been canceled, and we need to queue it again after proper
3063 * timeout, to make sure that scan does not run indefinitely.
3065 duration = hdev->discovery.scan_duration;
3066 scan_start = hdev->discovery.scan_start;
3068 if (now - scan_start <= duration) {
3071 if (now >= scan_start)
3072 elapsed = now - scan_start;
3074 elapsed = ULONG_MAX - scan_start + now;
3076 timeout = duration - elapsed;
3081 queue_delayed_work(hdev->req_workqueue,
3082 &hdev->le_scan_disable, timeout);
3085 hci_dev_unlock(hdev);
3088 static int active_scan(struct hci_request *req, unsigned long opt)
3090 uint16_t interval = opt;
3091 struct hci_dev *hdev = req->hdev;
3093 /* Accept list is not used for discovery */
3094 u8 filter_policy = 0x00;
3095 /* Discovery doesn't require controller address resolution */
3096 bool addr_resolv = false;
3099 BT_DBG("%s", hdev->name);
3101 /* If controller is scanning, it means the background scanning is
3102 * running. Thus, we should temporarily stop it in order to set the
3103 * discovery scanning parameters.
3105 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3106 hci_req_add_le_scan_disable(req, false);
3108 /* All active scans will be done with either a resolvable private
3109 * address (when privacy feature has been enabled) or non-resolvable
3112 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
3115 own_addr_type = ADDR_LE_DEV_PUBLIC;
3117 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
3118 hdev->le_scan_window_discovery, own_addr_type,
3119 filter_policy, addr_resolv);
3123 static int interleaved_discov(struct hci_request *req, unsigned long opt)
3127 BT_DBG("%s", req->hdev->name);
3129 err = active_scan(req, opt);
3133 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
3136 static void start_discovery(struct hci_dev *hdev, u8 *status)
3138 unsigned long timeout;
3140 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
3142 switch (hdev->discovery.type) {
3143 case DISCOV_TYPE_BREDR:
3144 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
3145 hci_req_sync(hdev, bredr_inquiry,
3146 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
3149 case DISCOV_TYPE_INTERLEAVED:
3150 /* When running simultaneous discovery, the LE scanning time
3151 * should occupy the whole discovery time sine BR/EDR inquiry
3152 * and LE scanning are scheduled by the controller.
3154 * For interleaving discovery in comparison, BR/EDR inquiry
3155 * and LE scanning are done sequentially with separate
3158 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3160 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3161 /* During simultaneous discovery, we double LE scan
3162 * interval. We must leave some time for the controller
3163 * to do BR/EDR inquiry.
3165 hci_req_sync(hdev, interleaved_discov,
3166 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
3171 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3172 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3173 HCI_CMD_TIMEOUT, status);
3175 case DISCOV_TYPE_LE:
3176 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3177 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
3178 HCI_CMD_TIMEOUT, status);
3181 *status = HCI_ERROR_UNSPECIFIED;
3188 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
3190 /* When service discovery is used and the controller has a
3191 * strict duplicate filter, it is important to remember the
3192 * start and duration of the scan. This is required for
3193 * restarting scanning during the discovery phase.
3195 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
3196 hdev->discovery.result_filtering) {
3197 hdev->discovery.scan_start = jiffies;
3198 hdev->discovery.scan_duration = timeout;
3201 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
3205 bool hci_req_stop_discovery(struct hci_request *req)
3207 struct hci_dev *hdev = req->hdev;
3208 struct discovery_state *d = &hdev->discovery;
3209 struct hci_cp_remote_name_req_cancel cp;
3210 struct inquiry_entry *e;
3213 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
3215 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
3216 if (test_bit(HCI_INQUIRY, &hdev->flags))
3217 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3219 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3220 cancel_delayed_work(&hdev->le_scan_disable);
3221 hci_req_add_le_scan_disable(req, false);
3226 /* Passive scanning */
3227 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
3228 hci_req_add_le_scan_disable(req, false);
3233 /* No further actions needed for LE-only discovery */
3234 if (d->type == DISCOV_TYPE_LE)
3237 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
3238 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3243 bacpy(&cp.bdaddr, &e->data.bdaddr);
3244 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3252 static int stop_discovery(struct hci_request *req, unsigned long opt)
3254 hci_dev_lock(req->hdev);
3255 hci_req_stop_discovery(req);
3256 hci_dev_unlock(req->hdev);
3261 static void discov_update(struct work_struct *work)
3263 struct hci_dev *hdev = container_of(work, struct hci_dev,
3267 switch (hdev->discovery.state) {
3268 case DISCOVERY_STARTING:
3269 start_discovery(hdev, &status);
3270 mgmt_start_discovery_complete(hdev, status);
3272 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3274 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3276 case DISCOVERY_STOPPING:
3277 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3278 mgmt_stop_discovery_complete(hdev, status);
3280 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3282 case DISCOVERY_STOPPED:
3288 static void discov_off(struct work_struct *work)
3290 struct hci_dev *hdev = container_of(work, struct hci_dev,
3293 BT_DBG("%s", hdev->name);
3297 /* When discoverable timeout triggers, then just make sure
3298 * the limited discoverable flag is cleared. Even in the case
3299 * of a timeout triggered from general discoverable, it is
3300 * safe to unconditionally clear the flag.
3302 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3303 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3304 hdev->discov_timeout = 0;
3306 hci_dev_unlock(hdev);
3308 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3309 mgmt_new_settings(hdev);
3312 static int powered_update_hci(struct hci_request *req, unsigned long opt)
3314 struct hci_dev *hdev = req->hdev;
3319 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3320 !lmp_host_ssp_capable(hdev)) {
3323 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3325 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3328 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3329 sizeof(support), &support);
3333 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3334 lmp_bredr_capable(hdev)) {
3335 struct hci_cp_write_le_host_supported cp;
3340 /* Check first if we already have the right
3341 * host state (host features set)
3343 if (cp.le != lmp_host_le_capable(hdev) ||
3344 cp.simul != lmp_host_le_br_capable(hdev))
3345 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3349 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3350 /* Make sure the controller has a good default for
3351 * advertising data. This also applies to the case
3352 * where BR/EDR was toggled during the AUTO_OFF phase.
3354 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3355 list_empty(&hdev->adv_instances)) {
3358 if (ext_adv_capable(hdev)) {
3359 err = __hci_req_setup_ext_adv_instance(req,
3362 __hci_req_update_scan_rsp_data(req,
3366 __hci_req_update_adv_data(req, 0x00);
3367 __hci_req_update_scan_rsp_data(req, 0x00);
3370 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3371 if (!ext_adv_capable(hdev))
3372 __hci_req_enable_advertising(req);
3374 __hci_req_enable_ext_advertising(req,
3377 } else if (!list_empty(&hdev->adv_instances)) {
3378 struct adv_info *adv_instance;
3380 adv_instance = list_first_entry(&hdev->adv_instances,
3381 struct adv_info, list);
3382 __hci_req_schedule_adv_instance(req,
3383 adv_instance->instance,
3388 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3389 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3390 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3391 sizeof(link_sec), &link_sec);
3393 if (lmp_bredr_capable(hdev)) {
3394 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3395 __hci_req_write_fast_connectable(req, true);
3397 __hci_req_write_fast_connectable(req, false);
3398 __hci_req_update_scan(req);
3399 __hci_req_update_class(req);
3400 __hci_req_update_name(req);
3401 __hci_req_update_eir(req);
3404 hci_dev_unlock(hdev);
3408 int __hci_req_hci_power_on(struct hci_dev *hdev)
3410 /* Register the available SMP channels (BR/EDR and LE) only when
3411 * successfully powering on the controller. This late
3412 * registration is required so that LE SMP can clearly decide if
3413 * the public address or static address is used.
3417 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3421 void hci_request_setup(struct hci_dev *hdev)
3423 INIT_WORK(&hdev->discov_update, discov_update);
3424 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3425 INIT_WORK(&hdev->scan_update, scan_update_work);
3426 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3427 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3428 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3429 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3430 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3431 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3432 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3435 void hci_request_cancel_all(struct hci_dev *hdev)
3437 hci_req_sync_cancel(hdev, ENODEV);
3439 cancel_work_sync(&hdev->discov_update);
3440 cancel_work_sync(&hdev->bg_scan_update);
3441 cancel_work_sync(&hdev->scan_update);
3442 cancel_work_sync(&hdev->connectable_update);
3443 cancel_work_sync(&hdev->discoverable_update);
3444 cancel_delayed_work_sync(&hdev->discov_off);
3445 cancel_delayed_work_sync(&hdev->le_scan_disable);
3446 cancel_delayed_work_sync(&hdev->le_scan_restart);
3448 if (hdev->adv_instance_timeout) {
3449 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3450 hdev->adv_instance_timeout = 0;
3453 cancel_interleave_scan(hdev);