2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/mgmt.h>
29 #include "hci_request.h"
31 #define HCI_REQ_DONE 0
32 #define HCI_REQ_PEND 1
33 #define HCI_REQ_CANCELED 2
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
37 skb_queue_head_init(&req->cmd_q);
42 static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
45 struct hci_dev *hdev = req->hdev;
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
55 skb_queue_purge(&req->cmd_q);
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req->cmd_q))
63 skb = skb_peek_tail(&req->cmd_q);
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
75 queue_work(hdev->workqueue, &hdev->cmd_work);
80 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
82 return req_run(req, complete, NULL);
85 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
87 return req_run(req, NULL, complete);
90 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
104 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
115 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
123 BT_DBG("%s", hdev->name);
125 hci_req_init(&req, hdev);
127 hci_req_add_ev(&req, opcode, plen, param, event);
129 hdev->req_status = HCI_REQ_PEND;
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
141 schedule_timeout(timeout);
143 remove_wait_queue(&hdev->req_wait_q, &wait);
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
148 switch (hdev->req_status) {
150 err = -bt_to_errno(hdev->req_result);
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
162 hdev->req_status = hdev->req_result = 0;
164 hdev->req_skb = NULL;
166 BT_DBG("%s end: err %d", hdev->name, err);
174 return ERR_PTR(-ENODATA);
178 EXPORT_SYMBOL(__hci_cmd_sync_ev);
180 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
185 EXPORT_SYMBOL(__hci_cmd_sync);
187 /* Execute request and wait for completion. */
188 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
190 unsigned long opt, u32 timeout, u8 *hci_status)
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
196 BT_DBG("%s start", hdev->name);
198 hci_req_init(&req, hdev);
200 hdev->req_status = HCI_REQ_PEND;
202 err = func(&req, opt);
205 *hci_status = HCI_ERROR_UNSPECIFIED;
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
214 hdev->req_status = 0;
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
224 if (err == -ENODATA) {
231 *hci_status = HCI_ERROR_UNSPECIFIED;
236 schedule_timeout(timeout);
238 remove_wait_queue(&hdev->req_wait_q, &wait);
240 if (signal_pending(current))
243 switch (hdev->req_status) {
245 err = -bt_to_errno(hdev->req_result);
247 *hci_status = hdev->req_result;
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
253 *hci_status = HCI_ERROR_UNSPECIFIED;
259 *hci_status = HCI_ERROR_UNSPECIFIED;
263 kfree_skb(hdev->req_skb);
264 hdev->req_skb = NULL;
265 hdev->req_status = hdev->req_result = 0;
267 BT_DBG("%s end: err %d", hdev->name, err);
272 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
274 unsigned long opt, u32 timeout, u8 *hci_status)
278 /* Serialize all requests */
279 hci_req_sync_lock(hdev);
280 /* check the state after obtaing the lock to protect the HCI_UP
281 * against any races from hci_dev_do_close when the controller
284 if (test_bit(HCI_UP, &hdev->flags))
285 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
288 hci_req_sync_unlock(hdev);
293 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
296 int len = HCI_COMMAND_HDR_SIZE + plen;
297 struct hci_command_hdr *hdr;
300 skb = bt_skb_alloc(len, GFP_ATOMIC);
304 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
305 hdr->opcode = cpu_to_le16(opcode);
309 memcpy(skb_put(skb, plen), param, plen);
311 BT_DBG("skb len %d", skb->len);
313 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
314 hci_skb_opcode(skb) = opcode;
319 /* Queue a command to an asynchronous HCI request */
320 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
321 const void *param, u8 event)
323 struct hci_dev *hdev = req->hdev;
326 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
328 /* If an error occurred during request building, there is no point in
329 * queueing the HCI command. We can simply return.
334 skb = hci_prepare_cmd(hdev, opcode, plen, param);
336 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
342 if (skb_queue_empty(&req->cmd_q))
343 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
345 bt_cb(skb)->hci.req_event = event;
347 skb_queue_tail(&req->cmd_q, skb);
350 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
353 hci_req_add_ev(req, opcode, plen, param, 0);
356 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
358 struct hci_dev *hdev = req->hdev;
359 struct hci_cp_write_page_scan_activity acp;
362 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
365 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
369 type = PAGE_SCAN_TYPE_INTERLACED;
371 /* 160 msec page scan interval */
372 acp.interval = cpu_to_le16(0x0100);
374 type = PAGE_SCAN_TYPE_STANDARD; /* default */
376 /* default 1.28 sec page scan */
377 acp.interval = cpu_to_le16(0x0800);
380 acp.window = cpu_to_le16(0x0012);
382 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
383 __cpu_to_le16(hdev->page_scan_window) != acp.window)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
387 if (hdev->page_scan_type != type)
388 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
391 /* This function controls the background scanning based on hdev->pend_le_conns
392 * list. If there are pending LE connection we start the background scanning,
393 * otherwise we stop it.
395 * This function requires the caller holds hdev->lock.
397 static void __hci_update_background_scan(struct hci_request *req)
399 struct hci_dev *hdev = req->hdev;
401 if (!test_bit(HCI_UP, &hdev->flags) ||
402 test_bit(HCI_INIT, &hdev->flags) ||
403 hci_dev_test_flag(hdev, HCI_SETUP) ||
404 hci_dev_test_flag(hdev, HCI_CONFIG) ||
405 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
406 hci_dev_test_flag(hdev, HCI_UNREGISTER))
409 /* No point in doing scanning if LE support hasn't been enabled */
410 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
413 /* If discovery is active don't interfere with it */
414 if (hdev->discovery.state != DISCOVERY_STOPPED)
417 /* Reset RSSI and UUID filters when starting background scanning
418 * since these filters are meant for service discovery only.
420 * The Start Discovery and Start Service Discovery operations
421 * ensure to set proper values for RSSI threshold and UUID
422 * filter list. So it is safe to just reset them here.
424 hci_discovery_filter_clear(hdev);
426 if (list_empty(&hdev->pend_le_conns) &&
427 list_empty(&hdev->pend_le_reports)) {
428 /* If there is no pending LE connections or devices
429 * to be scanned for, we should stop the background
433 /* If controller is not scanning we are done. */
434 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
437 hci_req_add_le_scan_disable(req);
439 BT_DBG("%s stopping background scanning", hdev->name);
441 /* If there is at least one pending LE connection, we should
442 * keep the background scan running.
445 /* If controller is connecting, we should not start scanning
446 * since some controllers are not able to scan and connect at
449 if (hci_lookup_le_connect(hdev))
452 /* If controller is currently scanning, we stop it to ensure we
453 * don't miss any advertising (due to duplicates filter).
455 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
456 hci_req_add_le_scan_disable(req);
458 hci_req_add_le_passive_scan(req);
460 BT_DBG("%s starting background scanning", hdev->name);
464 void __hci_req_update_name(struct hci_request *req)
466 struct hci_dev *hdev = req->hdev;
467 struct hci_cp_write_local_name cp;
469 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
471 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
474 #define PNP_INFO_SVCLASS_ID 0x1200
476 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
478 u8 *ptr = data, *uuids_start = NULL;
479 struct bt_uuid *uuid;
484 list_for_each_entry(uuid, &hdev->uuids, list) {
487 if (uuid->size != 16)
490 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
494 if (uuid16 == PNP_INFO_SVCLASS_ID)
500 uuids_start[1] = EIR_UUID16_ALL;
504 /* Stop if not enough space to put next UUID */
505 if ((ptr - data) + sizeof(u16) > len) {
506 uuids_start[1] = EIR_UUID16_SOME;
510 *ptr++ = (uuid16 & 0x00ff);
511 *ptr++ = (uuid16 & 0xff00) >> 8;
512 uuids_start[0] += sizeof(uuid16);
518 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
520 u8 *ptr = data, *uuids_start = NULL;
521 struct bt_uuid *uuid;
526 list_for_each_entry(uuid, &hdev->uuids, list) {
527 if (uuid->size != 32)
533 uuids_start[1] = EIR_UUID32_ALL;
537 /* Stop if not enough space to put next UUID */
538 if ((ptr - data) + sizeof(u32) > len) {
539 uuids_start[1] = EIR_UUID32_SOME;
543 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
545 uuids_start[0] += sizeof(u32);
551 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
553 u8 *ptr = data, *uuids_start = NULL;
554 struct bt_uuid *uuid;
559 list_for_each_entry(uuid, &hdev->uuids, list) {
560 if (uuid->size != 128)
566 uuids_start[1] = EIR_UUID128_ALL;
570 /* Stop if not enough space to put next UUID */
571 if ((ptr - data) + 16 > len) {
572 uuids_start[1] = EIR_UUID128_SOME;
576 memcpy(ptr, uuid->uuid, 16);
578 uuids_start[0] += 16;
584 static void create_eir(struct hci_dev *hdev, u8 *data)
589 name_len = strlen(hdev->dev_name);
595 ptr[1] = EIR_NAME_SHORT;
597 ptr[1] = EIR_NAME_COMPLETE;
599 /* EIR Data length */
600 ptr[0] = name_len + 1;
602 memcpy(ptr + 2, hdev->dev_name, name_len);
604 ptr += (name_len + 2);
607 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
609 ptr[1] = EIR_TX_POWER;
610 ptr[2] = (u8) hdev->inq_tx_power;
615 if (hdev->devid_source > 0) {
617 ptr[1] = EIR_DEVICE_ID;
619 put_unaligned_le16(hdev->devid_source, ptr + 2);
620 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
621 put_unaligned_le16(hdev->devid_product, ptr + 6);
622 put_unaligned_le16(hdev->devid_version, ptr + 8);
627 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
629 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
632 void __hci_req_update_eir(struct hci_request *req)
634 struct hci_dev *hdev = req->hdev;
635 struct hci_cp_write_eir cp;
637 if (!hdev_is_powered(hdev))
640 if (!lmp_ext_inq_capable(hdev))
643 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
646 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
649 memset(&cp, 0, sizeof(cp));
651 create_eir(hdev, cp.data);
653 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
656 memcpy(hdev->eir, cp.data, sizeof(cp.data));
658 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
661 void hci_req_add_le_scan_disable(struct hci_request *req)
663 struct hci_cp_le_set_scan_enable cp;
665 memset(&cp, 0, sizeof(cp));
666 cp.enable = LE_SCAN_DISABLE;
667 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 static void add_to_white_list(struct hci_request *req,
671 struct hci_conn_params *params)
673 struct hci_cp_le_add_to_white_list cp;
675 cp.bdaddr_type = params->addr_type;
676 bacpy(&cp.bdaddr, ¶ms->addr);
678 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
681 static u8 update_white_list(struct hci_request *req)
683 struct hci_dev *hdev = req->hdev;
684 struct hci_conn_params *params;
685 struct bdaddr_list *b;
686 uint8_t white_list_entries = 0;
688 /* Go through the current white list programmed into the
689 * controller one by one and check if that address is still
690 * in the list of pending connections or list of devices to
691 * report. If not present in either list, then queue the
692 * command to remove it from the controller.
694 list_for_each_entry(b, &hdev->le_white_list, list) {
695 /* If the device is neither in pend_le_conns nor
696 * pend_le_reports then remove it from the whitelist.
698 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
699 &b->bdaddr, b->bdaddr_type) &&
700 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
701 &b->bdaddr, b->bdaddr_type)) {
702 struct hci_cp_le_del_from_white_list cp;
704 cp.bdaddr_type = b->bdaddr_type;
705 bacpy(&cp.bdaddr, &b->bdaddr);
707 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
712 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
713 /* White list can not be used with RPAs */
717 white_list_entries++;
720 /* Since all no longer valid white list entries have been
721 * removed, walk through the list of pending connections
722 * and ensure that any new device gets programmed into
725 * If the list of the devices is larger than the list of
726 * available white list entries in the controller, then
727 * just abort and return filer policy value to not use the
730 list_for_each_entry(params, &hdev->pend_le_conns, action) {
731 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
732 ¶ms->addr, params->addr_type))
735 if (white_list_entries >= hdev->le_white_list_size) {
736 /* Select filter policy to accept all advertising */
740 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
741 params->addr_type)) {
742 /* White list can not be used with RPAs */
746 white_list_entries++;
747 add_to_white_list(req, params);
750 /* After adding all new pending connections, walk through
751 * the list of pending reports and also add these to the
752 * white list if there is still space.
754 list_for_each_entry(params, &hdev->pend_le_reports, action) {
755 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
756 ¶ms->addr, params->addr_type))
759 if (white_list_entries >= hdev->le_white_list_size) {
760 /* Select filter policy to accept all advertising */
764 if (hci_find_irk_by_addr(hdev, ¶ms->addr,
765 params->addr_type)) {
766 /* White list can not be used with RPAs */
770 white_list_entries++;
771 add_to_white_list(req, params);
774 /* Select filter policy to use white list */
778 static bool scan_use_rpa(struct hci_dev *hdev)
780 return hci_dev_test_flag(hdev, HCI_PRIVACY);
783 void hci_req_add_le_passive_scan(struct hci_request *req)
785 struct hci_cp_le_set_scan_param param_cp;
786 struct hci_cp_le_set_scan_enable enable_cp;
787 struct hci_dev *hdev = req->hdev;
791 /* Set require_privacy to false since no SCAN_REQ are send
792 * during passive scanning. Not using an non-resolvable address
793 * here is important so that peer devices using direct
794 * advertising with our address will be correctly reported
797 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
801 /* Adding or removing entries from the white list must
802 * happen before enabling scanning. The controller does
803 * not allow white list modification while scanning.
805 filter_policy = update_white_list(req);
807 /* When the controller is using random resolvable addresses and
808 * with that having LE privacy enabled, then controllers with
809 * Extended Scanner Filter Policies support can now enable support
810 * for handling directed advertising.
812 * So instead of using filter polices 0x00 (no whitelist)
813 * and 0x01 (whitelist enabled) use the new filter policies
814 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
816 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
817 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
818 filter_policy |= 0x02;
820 memset(¶m_cp, 0, sizeof(param_cp));
821 param_cp.type = LE_SCAN_PASSIVE;
822 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
823 param_cp.window = cpu_to_le16(hdev->le_scan_window);
824 param_cp.own_address_type = own_addr_type;
825 param_cp.filter_policy = filter_policy;
826 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
829 memset(&enable_cp, 0, sizeof(enable_cp));
830 enable_cp.enable = LE_SCAN_ENABLE;
831 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
832 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
836 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
838 u8 instance = hdev->cur_adv_instance;
839 struct adv_info *adv_instance;
841 /* Ignore instance 0 */
842 if (instance == 0x00)
845 adv_instance = hci_find_adv_instance(hdev, instance);
849 /* TODO: Take into account the "appearance" and "local-name" flags here.
850 * These are currently being ignored as they are not supported.
852 return adv_instance->scan_rsp_len;
855 void __hci_req_disable_advertising(struct hci_request *req)
859 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
862 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
865 struct adv_info *adv_instance;
867 if (instance == 0x00) {
868 /* Instance 0 always manages the "Tx Power" and "Flags"
871 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
873 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
874 * corresponds to the "connectable" instance flag.
876 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
877 flags |= MGMT_ADV_FLAG_CONNECTABLE;
879 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
880 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
881 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
882 flags |= MGMT_ADV_FLAG_DISCOV;
887 adv_instance = hci_find_adv_instance(hdev, instance);
889 /* Return 0 when we got an invalid instance identifier. */
893 return adv_instance->flags;
896 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
898 /* If privacy is not enabled don't use RPA */
899 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
902 /* If basic privacy mode is enabled use RPA */
903 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
906 /* If limited privacy mode is enabled don't use RPA if we're
907 * both discoverable and bondable.
909 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
910 hci_dev_test_flag(hdev, HCI_BONDABLE))
913 /* We're neither bondable nor discoverable in the limited
914 * privacy mode, therefore use RPA.
919 void __hci_req_enable_advertising(struct hci_request *req)
921 struct hci_dev *hdev = req->hdev;
922 struct hci_cp_le_set_adv_param cp;
923 u8 own_addr_type, enable = 0x01;
927 if (hci_conn_num(hdev, LE_LINK) > 0)
930 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
931 __hci_req_disable_advertising(req);
933 /* Clear the HCI_LE_ADV bit temporarily so that the
934 * hci_update_random_address knows that it's safe to go ahead
935 * and write a new random address. The flag will be set back on
936 * as soon as the SET_ADV_ENABLE HCI command completes.
938 hci_dev_clear_flag(hdev, HCI_LE_ADV);
940 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
942 /* If the "connectable" instance flag was not set, then choose between
943 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
945 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
946 mgmt_get_connectable(hdev);
948 /* Set require_privacy to true only when non-connectable
949 * advertising is used. In that case it is fine to use a
950 * non-resolvable private address.
952 if (hci_update_random_address(req, !connectable,
953 adv_use_rpa(hdev, flags),
957 memset(&cp, 0, sizeof(cp));
958 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
959 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
962 cp.type = LE_ADV_IND;
963 else if (get_cur_adv_instance_scan_rsp_len(hdev))
964 cp.type = LE_ADV_SCAN_IND;
966 cp.type = LE_ADV_NONCONN_IND;
968 cp.own_address_type = own_addr_type;
969 cp.channel_map = hdev->le_adv_channel_map;
971 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
973 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
976 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
981 /* no space left for name (+ NULL + type + len) */
982 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
985 /* use complete name if present and fits */
986 complete_len = strlen(hdev->dev_name);
987 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
988 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
989 hdev->dev_name, complete_len + 1);
991 /* use short name if present */
992 short_len = strlen(hdev->short_name);
994 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
995 hdev->short_name, short_len + 1);
997 /* use shortened full name if present, we already know that name
998 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1001 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1003 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1004 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1006 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1013 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1015 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1018 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1020 u8 scan_rsp_len = 0;
1022 if (hdev->appearance) {
1023 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1026 return append_local_name(hdev, ptr, scan_rsp_len);
1029 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1032 struct adv_info *adv_instance;
1034 u8 scan_rsp_len = 0;
1036 adv_instance = hci_find_adv_instance(hdev, instance);
1040 instance_flags = adv_instance->flags;
1042 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1043 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1046 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1047 adv_instance->scan_rsp_len);
1049 scan_rsp_len += adv_instance->scan_rsp_len;
1051 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1052 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1054 return scan_rsp_len;
1057 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1059 struct hci_dev *hdev = req->hdev;
1060 struct hci_cp_le_set_scan_rsp_data cp;
1063 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1066 memset(&cp, 0, sizeof(cp));
1069 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1071 len = create_default_scan_rsp_data(hdev, cp.data);
1073 if (hdev->scan_rsp_data_len == len &&
1074 !memcmp(cp.data, hdev->scan_rsp_data, len))
1077 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1078 hdev->scan_rsp_data_len = len;
1082 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1085 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1087 struct adv_info *adv_instance = NULL;
1088 u8 ad_len = 0, flags = 0;
1091 /* Return 0 when the current instance identifier is invalid. */
1093 adv_instance = hci_find_adv_instance(hdev, instance);
1098 instance_flags = get_adv_instance_flags(hdev, instance);
1100 /* If instance already has the flags set skip adding it once
1103 if (adv_instance && eir_get_data(adv_instance->adv_data,
1104 adv_instance->adv_data_len, EIR_FLAGS,
1108 /* The Add Advertising command allows userspace to set both the general
1109 * and limited discoverable flags.
1111 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1112 flags |= LE_AD_GENERAL;
1114 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1115 flags |= LE_AD_LIMITED;
1117 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1118 flags |= LE_AD_NO_BREDR;
1120 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1121 /* If a discovery flag wasn't provided, simply use the global
1125 flags |= mgmt_get_adv_discov_flags(hdev);
1127 /* If flags would still be empty, then there is no need to
1128 * include the "Flags" AD field".
1142 memcpy(ptr, adv_instance->adv_data,
1143 adv_instance->adv_data_len);
1144 ad_len += adv_instance->adv_data_len;
1145 ptr += adv_instance->adv_data_len;
1148 /* Provide Tx Power only if we can provide a valid value for it */
1149 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1150 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1152 ptr[1] = EIR_TX_POWER;
1153 ptr[2] = (u8)hdev->adv_tx_power;
1162 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1164 struct hci_dev *hdev = req->hdev;
1165 struct hci_cp_le_set_adv_data cp;
1168 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1171 memset(&cp, 0, sizeof(cp));
1173 len = create_instance_adv_data(hdev, instance, cp.data);
1175 /* There's nothing to do if the data hasn't changed */
1176 if (hdev->adv_data_len == len &&
1177 memcmp(cp.data, hdev->adv_data, len) == 0)
1180 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1181 hdev->adv_data_len = len;
1185 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1188 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1190 struct hci_request req;
1192 hci_req_init(&req, hdev);
1193 __hci_req_update_adv_data(&req, instance);
1195 return hci_req_run(&req, NULL);
1198 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1200 BT_DBG("%s status %u", hdev->name, status);
1203 void hci_req_reenable_advertising(struct hci_dev *hdev)
1205 struct hci_request req;
1207 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1208 list_empty(&hdev->adv_instances))
1211 hci_req_init(&req, hdev);
1213 if (hdev->cur_adv_instance) {
1214 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1217 __hci_req_update_adv_data(&req, 0x00);
1218 __hci_req_update_scan_rsp_data(&req, 0x00);
1219 __hci_req_enable_advertising(&req);
1222 hci_req_run(&req, adv_enable_complete);
1225 static void adv_timeout_expire(struct work_struct *work)
1227 struct hci_dev *hdev = container_of(work, struct hci_dev,
1228 adv_instance_expire.work);
1230 struct hci_request req;
1233 BT_DBG("%s", hdev->name);
1237 hdev->adv_instance_timeout = 0;
1239 instance = hdev->cur_adv_instance;
1240 if (instance == 0x00)
1243 hci_req_init(&req, hdev);
1245 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1247 if (list_empty(&hdev->adv_instances))
1248 __hci_req_disable_advertising(&req);
1250 hci_req_run(&req, NULL);
1253 hci_dev_unlock(hdev);
1256 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1259 struct hci_dev *hdev = req->hdev;
1260 struct adv_info *adv_instance = NULL;
1263 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1264 list_empty(&hdev->adv_instances))
1267 if (hdev->adv_instance_timeout)
1270 adv_instance = hci_find_adv_instance(hdev, instance);
1274 /* A zero timeout means unlimited advertising. As long as there is
1275 * only one instance, duration should be ignored. We still set a timeout
1276 * in case further instances are being added later on.
1278 * If the remaining lifetime of the instance is more than the duration
1279 * then the timeout corresponds to the duration, otherwise it will be
1280 * reduced to the remaining instance lifetime.
1282 if (adv_instance->timeout == 0 ||
1283 adv_instance->duration <= adv_instance->remaining_time)
1284 timeout = adv_instance->duration;
1286 timeout = adv_instance->remaining_time;
1288 /* The remaining time is being reduced unless the instance is being
1289 * advertised without time limit.
1291 if (adv_instance->timeout)
1292 adv_instance->remaining_time =
1293 adv_instance->remaining_time - timeout;
1295 hdev->adv_instance_timeout = timeout;
1296 queue_delayed_work(hdev->req_workqueue,
1297 &hdev->adv_instance_expire,
1298 msecs_to_jiffies(timeout * 1000));
1300 /* If we're just re-scheduling the same instance again then do not
1301 * execute any HCI commands. This happens when a single instance is
1304 if (!force && hdev->cur_adv_instance == instance &&
1305 hci_dev_test_flag(hdev, HCI_LE_ADV))
1308 hdev->cur_adv_instance = instance;
1309 __hci_req_update_adv_data(req, instance);
1310 __hci_req_update_scan_rsp_data(req, instance);
1311 __hci_req_enable_advertising(req);
1316 static void cancel_adv_timeout(struct hci_dev *hdev)
1318 if (hdev->adv_instance_timeout) {
1319 hdev->adv_instance_timeout = 0;
1320 cancel_delayed_work(&hdev->adv_instance_expire);
1324 /* For a single instance:
1325 * - force == true: The instance will be removed even when its remaining
1326 * lifetime is not zero.
1327 * - force == false: the instance will be deactivated but kept stored unless
1328 * the remaining lifetime is zero.
1330 * For instance == 0x00:
1331 * - force == true: All instances will be removed regardless of their timeout
1333 * - force == false: Only instances that have a timeout will be removed.
1335 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1336 struct hci_request *req, u8 instance,
1339 struct adv_info *adv_instance, *n, *next_instance = NULL;
1343 /* Cancel any timeout concerning the removed instance(s). */
1344 if (!instance || hdev->cur_adv_instance == instance)
1345 cancel_adv_timeout(hdev);
1347 /* Get the next instance to advertise BEFORE we remove
1348 * the current one. This can be the same instance again
1349 * if there is only one instance.
1351 if (instance && hdev->cur_adv_instance == instance)
1352 next_instance = hci_get_next_instance(hdev, instance);
1354 if (instance == 0x00) {
1355 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1357 if (!(force || adv_instance->timeout))
1360 rem_inst = adv_instance->instance;
1361 err = hci_remove_adv_instance(hdev, rem_inst);
1363 mgmt_advertising_removed(sk, hdev, rem_inst);
1366 adv_instance = hci_find_adv_instance(hdev, instance);
1368 if (force || (adv_instance && adv_instance->timeout &&
1369 !adv_instance->remaining_time)) {
1370 /* Don't advertise a removed instance. */
1371 if (next_instance &&
1372 next_instance->instance == instance)
1373 next_instance = NULL;
1375 err = hci_remove_adv_instance(hdev, instance);
1377 mgmt_advertising_removed(sk, hdev, instance);
1381 if (!req || !hdev_is_powered(hdev) ||
1382 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1386 __hci_req_schedule_adv_instance(req, next_instance->instance,
1390 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1392 struct hci_dev *hdev = req->hdev;
1394 /* If we're advertising or initiating an LE connection we can't
1395 * go ahead and change the random address at this time. This is
1396 * because the eventual initiator address used for the
1397 * subsequently created connection will be undefined (some
1398 * controllers use the new address and others the one we had
1399 * when the operation started).
1401 * In this kind of scenario skip the update and let the random
1402 * address be updated at the next cycle.
1404 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1405 hci_lookup_le_connect(hdev)) {
1406 BT_DBG("Deferring random address update");
1407 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1411 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1414 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1415 bool use_rpa, u8 *own_addr_type)
1417 struct hci_dev *hdev = req->hdev;
1420 /* If privacy is enabled use a resolvable private address. If
1421 * current RPA has expired or there is something else than
1422 * the current RPA in use, then generate a new one.
1427 *own_addr_type = ADDR_LE_DEV_RANDOM;
1429 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1430 !bacmp(&hdev->random_addr, &hdev->rpa))
1433 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1435 BT_ERR("%s failed to generate new RPA", hdev->name);
1439 set_random_addr(req, &hdev->rpa);
1441 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1442 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1447 /* In case of required privacy without resolvable private address,
1448 * use an non-resolvable private address. This is useful for active
1449 * scanning and non-connectable advertising.
1451 if (require_privacy) {
1455 /* The non-resolvable private address is generated
1456 * from random six bytes with the two most significant
1459 get_random_bytes(&nrpa, 6);
1462 /* The non-resolvable private address shall not be
1463 * equal to the public address.
1465 if (bacmp(&hdev->bdaddr, &nrpa))
1469 *own_addr_type = ADDR_LE_DEV_RANDOM;
1470 set_random_addr(req, &nrpa);
1474 /* If forcing static address is in use or there is no public
1475 * address use the static address as random address (but skip
1476 * the HCI command if the current random address is already the
1479 * In case BR/EDR has been disabled on a dual-mode controller
1480 * and a static address has been configured, then use that
1481 * address instead of the public BR/EDR address.
1483 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1484 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1485 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1486 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1487 *own_addr_type = ADDR_LE_DEV_RANDOM;
1488 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1489 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1490 &hdev->static_addr);
1494 /* Neither privacy nor static address is being used so use a
1497 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1502 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1504 struct bdaddr_list *b;
1506 list_for_each_entry(b, &hdev->whitelist, list) {
1507 struct hci_conn *conn;
1509 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1513 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1520 void __hci_req_update_scan(struct hci_request *req)
1522 struct hci_dev *hdev = req->hdev;
1525 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1528 if (!hdev_is_powered(hdev))
1531 if (mgmt_powering_down(hdev))
1534 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1535 disconnected_whitelist_entries(hdev))
1538 scan = SCAN_DISABLED;
1540 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1541 scan |= SCAN_INQUIRY;
1543 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1544 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1547 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1550 static int update_scan(struct hci_request *req, unsigned long opt)
1552 hci_dev_lock(req->hdev);
1553 __hci_req_update_scan(req);
1554 hci_dev_unlock(req->hdev);
1558 static void scan_update_work(struct work_struct *work)
1560 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1562 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1565 static int connectable_update(struct hci_request *req, unsigned long opt)
1567 struct hci_dev *hdev = req->hdev;
1571 __hci_req_update_scan(req);
1573 /* If BR/EDR is not enabled and we disable advertising as a
1574 * by-product of disabling connectable, we need to update the
1575 * advertising flags.
1577 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1578 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1580 /* Update the advertising parameters if necessary */
1581 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1582 !list_empty(&hdev->adv_instances))
1583 __hci_req_enable_advertising(req);
1585 __hci_update_background_scan(req);
1587 hci_dev_unlock(hdev);
1592 static void connectable_update_work(struct work_struct *work)
1594 struct hci_dev *hdev = container_of(work, struct hci_dev,
1595 connectable_update);
1598 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1599 mgmt_set_connectable_complete(hdev, status);
1602 static u8 get_service_classes(struct hci_dev *hdev)
1604 struct bt_uuid *uuid;
1607 list_for_each_entry(uuid, &hdev->uuids, list)
1608 val |= uuid->svc_hint;
1613 void __hci_req_update_class(struct hci_request *req)
1615 struct hci_dev *hdev = req->hdev;
1618 BT_DBG("%s", hdev->name);
1620 if (!hdev_is_powered(hdev))
1623 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1626 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1629 cod[0] = hdev->minor_class;
1630 cod[1] = hdev->major_class;
1631 cod[2] = get_service_classes(hdev);
1633 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1636 if (memcmp(cod, hdev->dev_class, 3) == 0)
1639 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1642 static void write_iac(struct hci_request *req)
1644 struct hci_dev *hdev = req->hdev;
1645 struct hci_cp_write_current_iac_lap cp;
1647 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1650 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1651 /* Limited discoverable mode */
1652 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1653 cp.iac_lap[0] = 0x00; /* LIAC */
1654 cp.iac_lap[1] = 0x8b;
1655 cp.iac_lap[2] = 0x9e;
1656 cp.iac_lap[3] = 0x33; /* GIAC */
1657 cp.iac_lap[4] = 0x8b;
1658 cp.iac_lap[5] = 0x9e;
1660 /* General discoverable mode */
1662 cp.iac_lap[0] = 0x33; /* GIAC */
1663 cp.iac_lap[1] = 0x8b;
1664 cp.iac_lap[2] = 0x9e;
1667 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1668 (cp.num_iac * 3) + 1, &cp);
1671 static int discoverable_update(struct hci_request *req, unsigned long opt)
1673 struct hci_dev *hdev = req->hdev;
1677 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1679 __hci_req_update_scan(req);
1680 __hci_req_update_class(req);
1683 /* Advertising instances don't use the global discoverable setting, so
1684 * only update AD if advertising was enabled using Set Advertising.
1686 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1687 __hci_req_update_adv_data(req, 0x00);
1689 /* Discoverable mode affects the local advertising
1690 * address in limited privacy mode.
1692 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1693 __hci_req_enable_advertising(req);
1696 hci_dev_unlock(hdev);
1701 static void discoverable_update_work(struct work_struct *work)
1703 struct hci_dev *hdev = container_of(work, struct hci_dev,
1704 discoverable_update);
1707 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1708 mgmt_set_discoverable_complete(hdev, status);
1711 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1714 switch (conn->state) {
1717 if (conn->type == AMP_LINK) {
1718 struct hci_cp_disconn_phy_link cp;
1720 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1722 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1725 struct hci_cp_disconnect dc;
1727 dc.handle = cpu_to_le16(conn->handle);
1729 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1732 conn->state = BT_DISCONN;
1736 if (conn->type == LE_LINK) {
1737 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1739 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1741 } else if (conn->type == ACL_LINK) {
1742 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1744 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1749 if (conn->type == ACL_LINK) {
1750 struct hci_cp_reject_conn_req rej;
1752 bacpy(&rej.bdaddr, &conn->dst);
1753 rej.reason = reason;
1755 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1757 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1758 struct hci_cp_reject_sync_conn_req rej;
1760 bacpy(&rej.bdaddr, &conn->dst);
1762 /* SCO rejection has its own limited set of
1763 * allowed error values (0x0D-0x0F) which isn't
1764 * compatible with most values passed to this
1765 * function. To be safe hard-code one of the
1766 * values that's suitable for SCO.
1768 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1770 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1775 conn->state = BT_CLOSED;
1780 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1783 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1786 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1788 struct hci_request req;
1791 hci_req_init(&req, conn->hdev);
1793 __hci_abort_conn(&req, conn, reason);
1795 err = hci_req_run(&req, abort_conn_complete);
1796 if (err && err != -ENODATA) {
1797 BT_ERR("Failed to run HCI request: err %d", err);
1804 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1806 hci_dev_lock(req->hdev);
1807 __hci_update_background_scan(req);
1808 hci_dev_unlock(req->hdev);
1812 static void bg_scan_update(struct work_struct *work)
1814 struct hci_dev *hdev = container_of(work, struct hci_dev,
1816 struct hci_conn *conn;
1820 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1826 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1828 hci_le_conn_failed(conn, status);
1830 hci_dev_unlock(hdev);
1833 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1835 hci_req_add_le_scan_disable(req);
1839 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1842 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1843 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1844 struct hci_cp_inquiry cp;
1846 BT_DBG("%s", req->hdev->name);
1848 hci_dev_lock(req->hdev);
1849 hci_inquiry_cache_flush(req->hdev);
1850 hci_dev_unlock(req->hdev);
1852 memset(&cp, 0, sizeof(cp));
1854 if (req->hdev->discovery.limited)
1855 memcpy(&cp.lap, liac, sizeof(cp.lap));
1857 memcpy(&cp.lap, giac, sizeof(cp.lap));
1861 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1866 static void le_scan_disable_work(struct work_struct *work)
1868 struct hci_dev *hdev = container_of(work, struct hci_dev,
1869 le_scan_disable.work);
1872 BT_DBG("%s", hdev->name);
1874 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1877 cancel_delayed_work(&hdev->le_scan_restart);
1879 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1881 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1885 hdev->discovery.scan_start = 0;
1887 /* If we were running LE only scan, change discovery state. If
1888 * we were running both LE and BR/EDR inquiry simultaneously,
1889 * and BR/EDR inquiry is already finished, stop discovery,
1890 * otherwise BR/EDR inquiry will stop discovery when finished.
1891 * If we will resolve remote device name, do not change
1895 if (hdev->discovery.type == DISCOV_TYPE_LE)
1896 goto discov_stopped;
1898 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1901 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1902 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1903 hdev->discovery.state != DISCOVERY_RESOLVING)
1904 goto discov_stopped;
1909 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1910 HCI_CMD_TIMEOUT, &status);
1912 BT_ERR("Inquiry failed: status 0x%02x", status);
1913 goto discov_stopped;
1920 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1921 hci_dev_unlock(hdev);
1924 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1926 struct hci_dev *hdev = req->hdev;
1927 struct hci_cp_le_set_scan_enable cp;
1929 /* If controller is not scanning we are done. */
1930 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1933 hci_req_add_le_scan_disable(req);
1935 memset(&cp, 0, sizeof(cp));
1936 cp.enable = LE_SCAN_ENABLE;
1937 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1938 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1943 static void le_scan_restart_work(struct work_struct *work)
1945 struct hci_dev *hdev = container_of(work, struct hci_dev,
1946 le_scan_restart.work);
1947 unsigned long timeout, duration, scan_start, now;
1950 BT_DBG("%s", hdev->name);
1952 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1954 BT_ERR("Failed to restart LE scan: status %d", status);
1960 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1961 !hdev->discovery.scan_start)
1964 /* When the scan was started, hdev->le_scan_disable has been queued
1965 * after duration from scan_start. During scan restart this job
1966 * has been canceled, and we need to queue it again after proper
1967 * timeout, to make sure that scan does not run indefinitely.
1969 duration = hdev->discovery.scan_duration;
1970 scan_start = hdev->discovery.scan_start;
1972 if (now - scan_start <= duration) {
1975 if (now >= scan_start)
1976 elapsed = now - scan_start;
1978 elapsed = ULONG_MAX - scan_start + now;
1980 timeout = duration - elapsed;
1985 queue_delayed_work(hdev->req_workqueue,
1986 &hdev->le_scan_disable, timeout);
1989 hci_dev_unlock(hdev);
1992 static void disable_advertising(struct hci_request *req)
1996 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1999 static int active_scan(struct hci_request *req, unsigned long opt)
2001 uint16_t interval = opt;
2002 struct hci_dev *hdev = req->hdev;
2003 struct hci_cp_le_set_scan_param param_cp;
2004 struct hci_cp_le_set_scan_enable enable_cp;
2008 BT_DBG("%s", hdev->name);
2010 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2013 /* Don't let discovery abort an outgoing connection attempt
2014 * that's using directed advertising.
2016 if (hci_lookup_le_connect(hdev)) {
2017 hci_dev_unlock(hdev);
2021 cancel_adv_timeout(hdev);
2022 hci_dev_unlock(hdev);
2024 disable_advertising(req);
2027 /* If controller is scanning, it means the background scanning is
2028 * running. Thus, we should temporarily stop it in order to set the
2029 * discovery scanning parameters.
2031 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2032 hci_req_add_le_scan_disable(req);
2034 /* All active scans will be done with either a resolvable private
2035 * address (when privacy feature has been enabled) or non-resolvable
2038 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2041 own_addr_type = ADDR_LE_DEV_PUBLIC;
2043 memset(¶m_cp, 0, sizeof(param_cp));
2044 param_cp.type = LE_SCAN_ACTIVE;
2045 param_cp.interval = cpu_to_le16(interval);
2046 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2047 param_cp.own_address_type = own_addr_type;
2049 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2052 memset(&enable_cp, 0, sizeof(enable_cp));
2053 enable_cp.enable = LE_SCAN_ENABLE;
2054 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2056 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2062 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2066 BT_DBG("%s", req->hdev->name);
2068 err = active_scan(req, opt);
2072 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2075 static void start_discovery(struct hci_dev *hdev, u8 *status)
2077 unsigned long timeout;
2079 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2081 switch (hdev->discovery.type) {
2082 case DISCOV_TYPE_BREDR:
2083 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2084 hci_req_sync(hdev, bredr_inquiry,
2085 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2088 case DISCOV_TYPE_INTERLEAVED:
2089 /* When running simultaneous discovery, the LE scanning time
2090 * should occupy the whole discovery time sine BR/EDR inquiry
2091 * and LE scanning are scheduled by the controller.
2093 * For interleaving discovery in comparison, BR/EDR inquiry
2094 * and LE scanning are done sequentially with separate
2097 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2099 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2100 /* During simultaneous discovery, we double LE scan
2101 * interval. We must leave some time for the controller
2102 * to do BR/EDR inquiry.
2104 hci_req_sync(hdev, interleaved_discov,
2105 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2110 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2111 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2112 HCI_CMD_TIMEOUT, status);
2114 case DISCOV_TYPE_LE:
2115 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2116 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2117 HCI_CMD_TIMEOUT, status);
2120 *status = HCI_ERROR_UNSPECIFIED;
2127 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2129 /* When service discovery is used and the controller has a
2130 * strict duplicate filter, it is important to remember the
2131 * start and duration of the scan. This is required for
2132 * restarting scanning during the discovery phase.
2134 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2135 hdev->discovery.result_filtering) {
2136 hdev->discovery.scan_start = jiffies;
2137 hdev->discovery.scan_duration = timeout;
2140 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2144 bool hci_req_stop_discovery(struct hci_request *req)
2146 struct hci_dev *hdev = req->hdev;
2147 struct discovery_state *d = &hdev->discovery;
2148 struct hci_cp_remote_name_req_cancel cp;
2149 struct inquiry_entry *e;
2152 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2154 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2155 if (test_bit(HCI_INQUIRY, &hdev->flags))
2156 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2158 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2159 cancel_delayed_work(&hdev->le_scan_disable);
2160 hci_req_add_le_scan_disable(req);
2165 /* Passive scanning */
2166 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2167 hci_req_add_le_scan_disable(req);
2172 /* No further actions needed for LE-only discovery */
2173 if (d->type == DISCOV_TYPE_LE)
2176 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2177 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2182 bacpy(&cp.bdaddr, &e->data.bdaddr);
2183 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2191 static int stop_discovery(struct hci_request *req, unsigned long opt)
2193 hci_dev_lock(req->hdev);
2194 hci_req_stop_discovery(req);
2195 hci_dev_unlock(req->hdev);
2200 static void discov_update(struct work_struct *work)
2202 struct hci_dev *hdev = container_of(work, struct hci_dev,
2206 switch (hdev->discovery.state) {
2207 case DISCOVERY_STARTING:
2208 start_discovery(hdev, &status);
2209 mgmt_start_discovery_complete(hdev, status);
2211 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2213 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2215 case DISCOVERY_STOPPING:
2216 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2217 mgmt_stop_discovery_complete(hdev, status);
2219 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2221 case DISCOVERY_STOPPED:
2227 static void discov_off(struct work_struct *work)
2229 struct hci_dev *hdev = container_of(work, struct hci_dev,
2232 BT_DBG("%s", hdev->name);
2236 /* When discoverable timeout triggers, then just make sure
2237 * the limited discoverable flag is cleared. Even in the case
2238 * of a timeout triggered from general discoverable, it is
2239 * safe to unconditionally clear the flag.
2241 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2242 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2243 hdev->discov_timeout = 0;
2245 hci_dev_unlock(hdev);
2247 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2248 mgmt_new_settings(hdev);
2251 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2253 struct hci_dev *hdev = req->hdev;
2258 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2259 !lmp_host_ssp_capable(hdev)) {
2262 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2264 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2267 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2268 sizeof(support), &support);
2272 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2273 lmp_bredr_capable(hdev)) {
2274 struct hci_cp_write_le_host_supported cp;
2279 /* Check first if we already have the right
2280 * host state (host features set)
2282 if (cp.le != lmp_host_le_capable(hdev) ||
2283 cp.simul != lmp_host_le_br_capable(hdev))
2284 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2288 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2289 /* Make sure the controller has a good default for
2290 * advertising data. This also applies to the case
2291 * where BR/EDR was toggled during the AUTO_OFF phase.
2293 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2294 list_empty(&hdev->adv_instances)) {
2295 __hci_req_update_adv_data(req, 0x00);
2296 __hci_req_update_scan_rsp_data(req, 0x00);
2298 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2299 __hci_req_enable_advertising(req);
2300 } else if (!list_empty(&hdev->adv_instances)) {
2301 struct adv_info *adv_instance;
2303 adv_instance = list_first_entry(&hdev->adv_instances,
2304 struct adv_info, list);
2305 __hci_req_schedule_adv_instance(req,
2306 adv_instance->instance,
2311 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2312 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2313 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2314 sizeof(link_sec), &link_sec);
2316 if (lmp_bredr_capable(hdev)) {
2317 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2318 __hci_req_write_fast_connectable(req, true);
2320 __hci_req_write_fast_connectable(req, false);
2321 __hci_req_update_scan(req);
2322 __hci_req_update_class(req);
2323 __hci_req_update_name(req);
2324 __hci_req_update_eir(req);
2327 hci_dev_unlock(hdev);
2331 int __hci_req_hci_power_on(struct hci_dev *hdev)
2333 /* Register the available SMP channels (BR/EDR and LE) only when
2334 * successfully powering on the controller. This late
2335 * registration is required so that LE SMP can clearly decide if
2336 * the public address or static address is used.
2340 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2344 void hci_request_setup(struct hci_dev *hdev)
2346 INIT_WORK(&hdev->discov_update, discov_update);
2347 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2348 INIT_WORK(&hdev->scan_update, scan_update_work);
2349 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2350 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2351 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2352 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2353 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2354 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2357 void hci_request_cancel_all(struct hci_dev *hdev)
2359 hci_req_sync_cancel(hdev, ENODEV);
2361 cancel_work_sync(&hdev->discov_update);
2362 cancel_work_sync(&hdev->bg_scan_update);
2363 cancel_work_sync(&hdev->scan_update);
2364 cancel_work_sync(&hdev->connectable_update);
2365 cancel_work_sync(&hdev->discoverable_update);
2366 cancel_delayed_work_sync(&hdev->discov_off);
2367 cancel_delayed_work_sync(&hdev->le_scan_disable);
2368 cancel_delayed_work_sync(&hdev->le_scan_restart);
2370 if (hdev->adv_instance_timeout) {
2371 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2372 hdev->adv_instance_timeout = 0;