2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
48 #include "hci_codec.h"
50 static void hci_rx_work(struct work_struct *work);
51 static void hci_cmd_work(struct work_struct *work);
52 static void hci_tx_work(struct work_struct *work);
55 LIST_HEAD(hci_dev_list);
56 DEFINE_RWLOCK(hci_dev_list_lock);
58 /* HCI callback list */
59 LIST_HEAD(hci_cb_list);
60 DEFINE_MUTEX(hci_cb_list_lock);
62 /* HCI ID Numbering */
63 static DEFINE_IDA(hci_index_ida);
65 static int hci_scan_req(struct hci_request *req, unsigned long opt)
69 BT_DBG("%s %x", req->hdev->name, scan);
71 /* Inquiry and Page scans */
72 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
76 static int hci_auth_req(struct hci_request *req, unsigned long opt)
80 BT_DBG("%s %x", req->hdev->name, auth);
83 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
87 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
91 BT_DBG("%s %x", req->hdev->name, encrypt);
94 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
98 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 __le16 policy = cpu_to_le16(opt);
102 BT_DBG("%s %x", req->hdev->name, policy);
104 /* Default link policy */
105 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
109 /* Get HCI device by index.
110 * Device is held on return. */
111 struct hci_dev *hci_dev_get(int index)
113 struct hci_dev *hdev = NULL, *d;
120 read_lock(&hci_dev_list_lock);
121 list_for_each_entry(d, &hci_dev_list, list) {
122 if (d->id == index) {
123 hdev = hci_dev_hold(d);
127 read_unlock(&hci_dev_list_lock);
131 /* ---- Inquiry support ---- */
133 bool hci_discovery_active(struct hci_dev *hdev)
135 struct discovery_state *discov = &hdev->discovery;
137 switch (discov->state) {
138 case DISCOVERY_FINDING:
139 case DISCOVERY_RESOLVING:
147 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 int old_state = hdev->discovery.state;
151 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153 if (old_state == state)
156 hdev->discovery.state = state;
159 case DISCOVERY_STOPPED:
160 hci_update_passive_scan(hdev);
162 if (old_state != DISCOVERY_STARTING)
163 mgmt_discovering(hdev, 0);
165 case DISCOVERY_STARTING:
167 case DISCOVERY_FINDING:
168 mgmt_discovering(hdev, 1);
170 case DISCOVERY_RESOLVING:
172 case DISCOVERY_STOPPING:
177 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 struct discovery_state *cache = &hdev->discovery;
180 struct inquiry_entry *p, *n;
182 list_for_each_entry_safe(p, n, &cache->all, all) {
187 INIT_LIST_HEAD(&cache->unknown);
188 INIT_LIST_HEAD(&cache->resolve);
191 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
194 struct discovery_state *cache = &hdev->discovery;
195 struct inquiry_entry *e;
197 BT_DBG("cache %p, %pMR", cache, bdaddr);
199 list_for_each_entry(e, &cache->all, all) {
200 if (!bacmp(&e->data.bdaddr, bdaddr))
207 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
210 struct discovery_state *cache = &hdev->discovery;
211 struct inquiry_entry *e;
213 BT_DBG("cache %p, %pMR", cache, bdaddr);
215 list_for_each_entry(e, &cache->unknown, list) {
216 if (!bacmp(&e->data.bdaddr, bdaddr))
223 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
230 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232 list_for_each_entry(e, &cache->resolve, list) {
233 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235 if (!bacmp(&e->data.bdaddr, bdaddr))
242 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
243 struct inquiry_entry *ie)
245 struct discovery_state *cache = &hdev->discovery;
246 struct list_head *pos = &cache->resolve;
247 struct inquiry_entry *p;
251 list_for_each_entry(p, &cache->resolve, list) {
252 if (p->name_state != NAME_PENDING &&
253 abs(p->data.rssi) >= abs(ie->data.rssi))
258 list_add(&ie->list, pos);
261 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
264 struct discovery_state *cache = &hdev->discovery;
265 struct inquiry_entry *ie;
268 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
273 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277 if (!ie->data.ssp_mode)
278 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280 if (ie->name_state == NAME_NEEDED &&
281 data->rssi != ie->data.rssi) {
282 ie->data.rssi = data->rssi;
283 hci_inquiry_cache_update_resolve(hdev, ie);
289 /* Entry not in the cache. Add new one. */
290 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
296 list_add(&ie->all, &cache->all);
299 ie->name_state = NAME_KNOWN;
301 ie->name_state = NAME_NOT_KNOWN;
302 list_add(&ie->list, &cache->unknown);
306 if (name_known && ie->name_state != NAME_KNOWN &&
307 ie->name_state != NAME_PENDING) {
308 ie->name_state = NAME_KNOWN;
312 memcpy(&ie->data, data, sizeof(*data));
313 ie->timestamp = jiffies;
314 cache->timestamp = jiffies;
316 if (ie->name_state == NAME_NOT_KNOWN)
317 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
323 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 struct discovery_state *cache = &hdev->discovery;
326 struct inquiry_info *info = (struct inquiry_info *) buf;
327 struct inquiry_entry *e;
330 list_for_each_entry(e, &cache->all, all) {
331 struct inquiry_data *data = &e->data;
336 bacpy(&info->bdaddr, &data->bdaddr);
337 info->pscan_rep_mode = data->pscan_rep_mode;
338 info->pscan_period_mode = data->pscan_period_mode;
339 info->pscan_mode = data->pscan_mode;
340 memcpy(info->dev_class, data->dev_class, 3);
341 info->clock_offset = data->clock_offset;
347 BT_DBG("cache %p, copied %d", cache, copied);
351 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_inquiry cp;
357 BT_DBG("%s", hdev->name);
359 if (test_bit(HCI_INQUIRY, &hdev->flags))
363 memcpy(&cp.lap, &ir->lap, 3);
364 cp.length = ir->length;
365 cp.num_rsp = ir->num_rsp;
366 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
371 int hci_inquiry(void __user *arg)
373 __u8 __user *ptr = arg;
374 struct hci_inquiry_req ir;
375 struct hci_dev *hdev;
376 int err = 0, do_inquiry = 0, max_rsp;
380 if (copy_from_user(&ir, ptr, sizeof(ir)))
383 hdev = hci_dev_get(ir.dev_id);
387 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
392 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
397 if (hdev->dev_type != HCI_PRIMARY) {
402 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
407 /* Restrict maximum inquiry length to 60 seconds */
408 if (ir.length > 60) {
414 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
415 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
416 hci_inquiry_cache_flush(hdev);
419 hci_dev_unlock(hdev);
421 timeo = ir.length * msecs_to_jiffies(2000);
424 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
429 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
430 * cleared). If it is interrupted by a signal, return -EINTR.
432 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
433 TASK_INTERRUPTIBLE)) {
439 /* for unlimited number of responses we will use buffer with
442 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
445 * copy it to the user space.
447 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
454 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
455 hci_dev_unlock(hdev);
457 BT_DBG("num_rsp %d", ir.num_rsp);
459 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474 static int hci_dev_do_open(struct hci_dev *hdev)
478 BT_DBG("%s %p", hdev->name, hdev);
480 hci_req_sync_lock(hdev);
482 ret = hci_dev_open_sync(hdev);
484 hci_req_sync_unlock(hdev);
488 /* ---- HCI ioctl helpers ---- */
490 int hci_dev_open(__u16 dev)
492 struct hci_dev *hdev;
495 hdev = hci_dev_get(dev);
499 /* Devices that are marked as unconfigured can only be powered
500 * up as user channel. Trying to bring them up as normal devices
501 * will result into a failure. Only user channel operation is
504 * When this function is called for a user channel, the flag
505 * HCI_USER_CHANNEL will be set first before attempting to
508 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
509 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
514 /* We need to ensure that no other power on/off work is pending
515 * before proceeding to call hci_dev_do_open. This is
516 * particularly important if the setup procedure has not yet
519 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
520 cancel_delayed_work(&hdev->power_off);
522 /* After this call it is guaranteed that the setup procedure
523 * has finished. This means that error conditions like RFKILL
524 * or no valid public or static random address apply.
526 flush_workqueue(hdev->req_workqueue);
528 /* For controllers not using the management interface and that
529 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
530 * so that pairing works for them. Once the management interface
531 * is in use this bit will be cleared again and userspace has
532 * to explicitly enable it.
534 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
535 !hci_dev_test_flag(hdev, HCI_MGMT))
536 hci_dev_set_flag(hdev, HCI_BONDABLE);
538 err = hci_dev_do_open(hdev);
545 int hci_dev_do_close(struct hci_dev *hdev)
549 BT_DBG("%s %p", hdev->name, hdev);
551 hci_req_sync_lock(hdev);
553 err = hci_dev_close_sync(hdev);
555 hci_req_sync_unlock(hdev);
560 int hci_dev_close(__u16 dev)
562 struct hci_dev *hdev;
565 hdev = hci_dev_get(dev);
569 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
574 cancel_work_sync(&hdev->power_on);
575 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
576 cancel_delayed_work(&hdev->power_off);
578 err = hci_dev_do_close(hdev);
585 static int hci_dev_do_reset(struct hci_dev *hdev)
589 BT_DBG("%s %p", hdev->name, hdev);
591 hci_req_sync_lock(hdev);
594 skb_queue_purge(&hdev->rx_q);
595 skb_queue_purge(&hdev->cmd_q);
597 /* Avoid potential lockdep warnings from the *_flush() calls by
598 * ensuring the workqueue is empty up front.
600 drain_workqueue(hdev->workqueue);
603 hci_inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock(hdev);
610 atomic_set(&hdev->cmd_cnt, 1);
611 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
613 ret = hci_reset_sync(hdev);
615 hci_req_sync_unlock(hdev);
619 int hci_dev_reset(__u16 dev)
621 struct hci_dev *hdev;
624 hdev = hci_dev_get(dev);
628 if (!test_bit(HCI_UP, &hdev->flags)) {
633 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
638 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
643 err = hci_dev_do_reset(hdev);
650 int hci_dev_reset_stat(__u16 dev)
652 struct hci_dev *hdev;
655 hdev = hci_dev_get(dev);
659 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
664 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
669 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
676 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
678 bool conn_changed, discov_changed;
680 BT_DBG("%s scan 0x%02x", hdev->name, scan);
682 if ((scan & SCAN_PAGE))
683 conn_changed = !hci_dev_test_and_set_flag(hdev,
686 conn_changed = hci_dev_test_and_clear_flag(hdev,
689 if ((scan & SCAN_INQUIRY)) {
690 discov_changed = !hci_dev_test_and_set_flag(hdev,
693 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
694 discov_changed = hci_dev_test_and_clear_flag(hdev,
698 if (!hci_dev_test_flag(hdev, HCI_MGMT))
701 if (conn_changed || discov_changed) {
702 /* In case this was disabled through mgmt */
703 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
705 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
706 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
708 mgmt_new_settings(hdev);
712 int hci_dev_cmd(unsigned int cmd, void __user *arg)
714 struct hci_dev *hdev;
715 struct hci_dev_req dr;
718 if (copy_from_user(&dr, arg, sizeof(dr)))
721 hdev = hci_dev_get(dr.dev_id);
725 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
730 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
735 if (hdev->dev_type != HCI_PRIMARY) {
740 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
747 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
748 HCI_INIT_TIMEOUT, NULL);
752 if (!lmp_encrypt_capable(hdev)) {
757 if (!test_bit(HCI_AUTH, &hdev->flags)) {
758 /* Auth must be enabled first */
759 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
760 HCI_INIT_TIMEOUT, NULL);
765 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
766 HCI_INIT_TIMEOUT, NULL);
770 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
771 HCI_INIT_TIMEOUT, NULL);
773 /* Ensure that the connectable and discoverable states
774 * get correctly modified as this was a non-mgmt change.
777 hci_update_passive_scan_state(hdev, dr.dev_opt);
781 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
782 HCI_INIT_TIMEOUT, NULL);
786 hdev->link_mode = ((__u16) dr.dev_opt) &
787 (HCI_LM_MASTER | HCI_LM_ACCEPT);
791 if (hdev->pkt_type == (__u16) dr.dev_opt)
794 hdev->pkt_type = (__u16) dr.dev_opt;
795 mgmt_phy_configuration_changed(hdev, NULL);
799 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
800 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
804 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
805 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
818 int hci_get_dev_list(void __user *arg)
820 struct hci_dev *hdev;
821 struct hci_dev_list_req *dl;
822 struct hci_dev_req *dr;
823 int n = 0, size, err;
826 if (get_user(dev_num, (__u16 __user *) arg))
829 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
832 size = sizeof(*dl) + dev_num * sizeof(*dr);
834 dl = kzalloc(size, GFP_KERNEL);
840 read_lock(&hci_dev_list_lock);
841 list_for_each_entry(hdev, &hci_dev_list, list) {
842 unsigned long flags = hdev->flags;
844 /* When the auto-off is configured it means the transport
845 * is running, but in that case still indicate that the
846 * device is actually down.
848 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
849 flags &= ~BIT(HCI_UP);
851 (dr + n)->dev_id = hdev->id;
852 (dr + n)->dev_opt = flags;
857 read_unlock(&hci_dev_list_lock);
860 size = sizeof(*dl) + n * sizeof(*dr);
862 err = copy_to_user(arg, dl, size);
865 return err ? -EFAULT : 0;
868 int hci_get_dev_info(void __user *arg)
870 struct hci_dev *hdev;
871 struct hci_dev_info di;
875 if (copy_from_user(&di, arg, sizeof(di)))
878 hdev = hci_dev_get(di.dev_id);
882 /* When the auto-off is configured it means the transport
883 * is running, but in that case still indicate that the
884 * device is actually down.
886 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
887 flags = hdev->flags & ~BIT(HCI_UP);
891 strcpy(di.name, hdev->name);
892 di.bdaddr = hdev->bdaddr;
893 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
895 di.pkt_type = hdev->pkt_type;
896 if (lmp_bredr_capable(hdev)) {
897 di.acl_mtu = hdev->acl_mtu;
898 di.acl_pkts = hdev->acl_pkts;
899 di.sco_mtu = hdev->sco_mtu;
900 di.sco_pkts = hdev->sco_pkts;
902 di.acl_mtu = hdev->le_mtu;
903 di.acl_pkts = hdev->le_pkts;
907 di.link_policy = hdev->link_policy;
908 di.link_mode = hdev->link_mode;
910 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
911 memcpy(&di.features, &hdev->features, sizeof(di.features));
913 if (copy_to_user(arg, &di, sizeof(di)))
921 /* ---- Interface to HCI drivers ---- */
923 static int hci_rfkill_set_block(void *data, bool blocked)
925 struct hci_dev *hdev = data;
927 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
929 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
933 hci_dev_set_flag(hdev, HCI_RFKILLED);
934 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
935 !hci_dev_test_flag(hdev, HCI_CONFIG))
936 hci_dev_do_close(hdev);
938 hci_dev_clear_flag(hdev, HCI_RFKILLED);
944 static const struct rfkill_ops hci_rfkill_ops = {
945 .set_block = hci_rfkill_set_block,
948 static void hci_power_on(struct work_struct *work)
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
953 BT_DBG("%s", hdev->name);
955 if (test_bit(HCI_UP, &hdev->flags) &&
956 hci_dev_test_flag(hdev, HCI_MGMT) &&
957 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
958 cancel_delayed_work(&hdev->power_off);
959 err = hci_powered_update_sync(hdev);
960 mgmt_power_on(hdev, err);
964 err = hci_dev_do_open(hdev);
967 mgmt_set_powered_failed(hdev, err);
968 hci_dev_unlock(hdev);
972 /* During the HCI setup phase, a few error conditions are
973 * ignored and they need to be checked now. If they are still
974 * valid, it is important to turn the device back off.
976 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
977 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
978 (hdev->dev_type == HCI_PRIMARY &&
979 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
980 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
981 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
982 hci_dev_do_close(hdev);
983 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
984 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
985 HCI_AUTO_OFF_TIMEOUT);
988 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
989 /* For unconfigured devices, set the HCI_RAW flag
990 * so that userspace can easily identify them.
992 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
993 set_bit(HCI_RAW, &hdev->flags);
995 /* For fully configured devices, this will send
996 * the Index Added event. For unconfigured devices,
997 * it will send Unconfigued Index Added event.
999 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1000 * and no event will be send.
1002 mgmt_index_added(hdev);
1003 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1004 /* When the controller is now configured, then it
1005 * is important to clear the HCI_RAW flag.
1007 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1008 clear_bit(HCI_RAW, &hdev->flags);
1010 /* Powering on the controller with HCI_CONFIG set only
1011 * happens with the transition from unconfigured to
1012 * configured. This will send the Index Added event.
1014 mgmt_index_added(hdev);
1018 static void hci_power_off(struct work_struct *work)
1020 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 BT_DBG("%s", hdev->name);
1025 hci_dev_do_close(hdev);
1028 static void hci_error_reset(struct work_struct *work)
1030 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1032 BT_DBG("%s", hdev->name);
1035 hdev->hw_error(hdev, hdev->hw_error_code);
1037 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1039 if (hci_dev_do_close(hdev))
1042 hci_dev_do_open(hdev);
1045 void hci_uuids_clear(struct hci_dev *hdev)
1047 struct bt_uuid *uuid, *tmp;
1049 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1050 list_del(&uuid->list);
1055 void hci_link_keys_clear(struct hci_dev *hdev)
1057 struct link_key *key;
1059 list_for_each_entry(key, &hdev->link_keys, list) {
1060 list_del_rcu(&key->list);
1061 kfree_rcu(key, rcu);
1065 void hci_smp_ltks_clear(struct hci_dev *hdev)
1069 list_for_each_entry(k, &hdev->long_term_keys, list) {
1070 list_del_rcu(&k->list);
1075 void hci_smp_irks_clear(struct hci_dev *hdev)
1079 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1080 list_del_rcu(&k->list);
1085 void hci_blocked_keys_clear(struct hci_dev *hdev)
1087 struct blocked_key *b;
1089 list_for_each_entry(b, &hdev->blocked_keys, list) {
1090 list_del_rcu(&b->list);
1095 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1097 bool blocked = false;
1098 struct blocked_key *b;
1101 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1102 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1112 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1117 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1118 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1121 if (hci_is_blocked_key(hdev,
1122 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1124 bt_dev_warn_ratelimited(hdev,
1125 "Link key blocked for %pMR",
1138 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1139 u8 key_type, u8 old_key_type)
1142 if (key_type < 0x03)
1145 /* Debug keys are insecure so don't store them persistently */
1146 if (key_type == HCI_LK_DEBUG_COMBINATION)
1149 /* Changed combination key and there's no previous one */
1150 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1153 /* Security mode 3 case */
1157 /* BR/EDR key derived using SC from an LE link */
1158 if (conn->type == LE_LINK)
1161 /* Neither local nor remote side had no-bonding as requirement */
1162 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1165 /* Local side had dedicated bonding as requirement */
1166 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1169 /* Remote side had dedicated bonding as requirement */
1170 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1173 /* If none of the above criteria match, then don't store the key
1178 static u8 ltk_role(u8 type)
1180 if (type == SMP_LTK)
1181 return HCI_ROLE_MASTER;
1183 return HCI_ROLE_SLAVE;
1186 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1187 u8 addr_type, u8 role)
1192 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1193 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1196 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1199 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1201 bt_dev_warn_ratelimited(hdev,
1202 "LTK blocked for %pMR",
1215 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1217 struct smp_irk *irk_to_return = NULL;
1218 struct smp_irk *irk;
1221 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1222 if (!bacmp(&irk->rpa, rpa)) {
1223 irk_to_return = irk;
1228 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1229 if (smp_irk_matches(hdev, irk->val, rpa)) {
1230 bacpy(&irk->rpa, rpa);
1231 irk_to_return = irk;
1237 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1238 irk_to_return->val)) {
1239 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1240 &irk_to_return->bdaddr);
1241 irk_to_return = NULL;
1246 return irk_to_return;
1249 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1252 struct smp_irk *irk_to_return = NULL;
1253 struct smp_irk *irk;
1255 /* Identity Address must be public or static random */
1256 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1260 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1261 if (addr_type == irk->addr_type &&
1262 bacmp(bdaddr, &irk->bdaddr) == 0) {
1263 irk_to_return = irk;
1270 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1271 irk_to_return->val)) {
1272 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1273 &irk_to_return->bdaddr);
1274 irk_to_return = NULL;
1279 return irk_to_return;
1282 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1283 bdaddr_t *bdaddr, u8 *val, u8 type,
1284 u8 pin_len, bool *persistent)
1286 struct link_key *key, *old_key;
1289 old_key = hci_find_link_key(hdev, bdaddr);
1291 old_key_type = old_key->type;
1294 old_key_type = conn ? conn->key_type : 0xff;
1295 key = kzalloc(sizeof(*key), GFP_KERNEL);
1298 list_add_rcu(&key->list, &hdev->link_keys);
1301 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1303 /* Some buggy controller combinations generate a changed
1304 * combination key for legacy pairing even when there's no
1306 if (type == HCI_LK_CHANGED_COMBINATION &&
1307 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1308 type = HCI_LK_COMBINATION;
1310 conn->key_type = type;
1313 bacpy(&key->bdaddr, bdaddr);
1314 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1315 key->pin_len = pin_len;
1317 if (type == HCI_LK_CHANGED_COMBINATION)
1318 key->type = old_key_type;
1323 *persistent = hci_persistent_key(hdev, conn, type,
1329 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1330 u8 addr_type, u8 type, u8 authenticated,
1331 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1333 struct smp_ltk *key, *old_key;
1334 u8 role = ltk_role(type);
1336 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1340 key = kzalloc(sizeof(*key), GFP_KERNEL);
1343 list_add_rcu(&key->list, &hdev->long_term_keys);
1346 bacpy(&key->bdaddr, bdaddr);
1347 key->bdaddr_type = addr_type;
1348 memcpy(key->val, tk, sizeof(key->val));
1349 key->authenticated = authenticated;
1352 key->enc_size = enc_size;
1358 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1359 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1361 struct smp_irk *irk;
1363 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1365 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1369 bacpy(&irk->bdaddr, bdaddr);
1370 irk->addr_type = addr_type;
1372 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1375 memcpy(irk->val, val, 16);
1376 bacpy(&irk->rpa, rpa);
1381 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1383 struct link_key *key;
1385 key = hci_find_link_key(hdev, bdaddr);
1389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1391 list_del_rcu(&key->list);
1392 kfree_rcu(key, rcu);
1397 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1402 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1403 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1406 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1408 list_del_rcu(&k->list);
1413 return removed ? 0 : -ENOENT;
1416 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1420 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
1421 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1424 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1426 list_del_rcu(&k->list);
1431 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1434 struct smp_irk *irk;
1437 if (type == BDADDR_BREDR) {
1438 if (hci_find_link_key(hdev, bdaddr))
1443 /* Convert to HCI addr type which struct smp_ltk uses */
1444 if (type == BDADDR_LE_PUBLIC)
1445 addr_type = ADDR_LE_DEV_PUBLIC;
1447 addr_type = ADDR_LE_DEV_RANDOM;
1449 irk = hci_get_irk(hdev, bdaddr, addr_type);
1451 bdaddr = &irk->bdaddr;
1452 addr_type = irk->addr_type;
1456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1457 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1467 /* HCI command timer function */
1468 static void hci_cmd_timeout(struct work_struct *work)
1470 struct hci_dev *hdev = container_of(work, struct hci_dev,
1473 if (hdev->sent_cmd) {
1474 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1475 u16 opcode = __le16_to_cpu(sent->opcode);
1477 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1479 bt_dev_err(hdev, "command tx timeout");
1482 if (hdev->cmd_timeout)
1483 hdev->cmd_timeout(hdev);
1485 atomic_set(&hdev->cmd_cnt, 1);
1486 queue_work(hdev->workqueue, &hdev->cmd_work);
1489 /* HCI ncmd timer function */
1490 static void hci_ncmd_timeout(struct work_struct *work)
1492 struct hci_dev *hdev = container_of(work, struct hci_dev,
1495 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1497 /* During HCI_INIT phase no events can be injected if the ncmd timer
1498 * triggers since the procedure has its own timeout handling.
1500 if (test_bit(HCI_INIT, &hdev->flags))
1503 /* This is an irrecoverable state, inject hardware error event */
1504 hci_reset_dev(hdev);
1507 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1508 bdaddr_t *bdaddr, u8 bdaddr_type)
1510 struct oob_data *data;
1512 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1513 if (bacmp(bdaddr, &data->bdaddr) != 0)
1515 if (data->bdaddr_type != bdaddr_type)
1523 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1526 struct oob_data *data;
1528 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1532 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1534 list_del(&data->list);
1540 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1542 struct oob_data *data, *n;
1544 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1545 list_del(&data->list);
1550 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1551 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1552 u8 *hash256, u8 *rand256)
1554 struct oob_data *data;
1556 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1558 data = kmalloc(sizeof(*data), GFP_KERNEL);
1562 bacpy(&data->bdaddr, bdaddr);
1563 data->bdaddr_type = bdaddr_type;
1564 list_add(&data->list, &hdev->remote_oob_data);
1567 if (hash192 && rand192) {
1568 memcpy(data->hash192, hash192, sizeof(data->hash192));
1569 memcpy(data->rand192, rand192, sizeof(data->rand192));
1570 if (hash256 && rand256)
1571 data->present = 0x03;
1573 memset(data->hash192, 0, sizeof(data->hash192));
1574 memset(data->rand192, 0, sizeof(data->rand192));
1575 if (hash256 && rand256)
1576 data->present = 0x02;
1578 data->present = 0x00;
1581 if (hash256 && rand256) {
1582 memcpy(data->hash256, hash256, sizeof(data->hash256));
1583 memcpy(data->rand256, rand256, sizeof(data->rand256));
1585 memset(data->hash256, 0, sizeof(data->hash256));
1586 memset(data->rand256, 0, sizeof(data->rand256));
1587 if (hash192 && rand192)
1588 data->present = 0x01;
1591 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1596 /* This function requires the caller holds hdev->lock */
1597 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1599 struct adv_info *adv_instance;
1601 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1602 if (adv_instance->instance == instance)
1603 return adv_instance;
1609 /* This function requires the caller holds hdev->lock */
1610 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1612 struct adv_info *cur_instance;
1614 cur_instance = hci_find_adv_instance(hdev, instance);
1618 if (cur_instance == list_last_entry(&hdev->adv_instances,
1619 struct adv_info, list))
1620 return list_first_entry(&hdev->adv_instances,
1621 struct adv_info, list);
1623 return list_next_entry(cur_instance, list);
1626 /* This function requires the caller holds hdev->lock */
1627 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1629 struct adv_info *adv_instance;
1631 adv_instance = hci_find_adv_instance(hdev, instance);
1635 BT_DBG("%s removing %dMR", hdev->name, instance);
1637 if (hdev->cur_adv_instance == instance) {
1638 if (hdev->adv_instance_timeout) {
1639 cancel_delayed_work(&hdev->adv_instance_expire);
1640 hdev->adv_instance_timeout = 0;
1642 hdev->cur_adv_instance = 0x00;
1645 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1647 list_del(&adv_instance->list);
1648 kfree(adv_instance);
1650 hdev->adv_instance_cnt--;
1655 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1657 struct adv_info *adv_instance, *n;
1659 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1660 adv_instance->rpa_expired = rpa_expired;
1663 /* This function requires the caller holds hdev->lock */
1664 void hci_adv_instances_clear(struct hci_dev *hdev)
1666 struct adv_info *adv_instance, *n;
1668 if (hdev->adv_instance_timeout) {
1669 cancel_delayed_work(&hdev->adv_instance_expire);
1670 hdev->adv_instance_timeout = 0;
1673 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1674 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1675 list_del(&adv_instance->list);
1676 kfree(adv_instance);
1679 hdev->adv_instance_cnt = 0;
1680 hdev->cur_adv_instance = 0x00;
1683 static void adv_instance_rpa_expired(struct work_struct *work)
1685 struct adv_info *adv_instance = container_of(work, struct adv_info,
1686 rpa_expired_cb.work);
1690 adv_instance->rpa_expired = true;
1693 /* This function requires the caller holds hdev->lock */
1694 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
1695 u16 adv_data_len, u8 *adv_data,
1696 u16 scan_rsp_len, u8 *scan_rsp_data,
1697 u16 timeout, u16 duration, s8 tx_power,
1698 u32 min_interval, u32 max_interval)
1700 struct adv_info *adv_instance;
1702 adv_instance = hci_find_adv_instance(hdev, instance);
1704 memset(adv_instance->adv_data, 0,
1705 sizeof(adv_instance->adv_data));
1706 memset(adv_instance->scan_rsp_data, 0,
1707 sizeof(adv_instance->scan_rsp_data));
1709 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1710 instance < 1 || instance > hdev->le_num_of_adv_sets)
1713 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
1717 adv_instance->pending = true;
1718 adv_instance->instance = instance;
1719 list_add(&adv_instance->list, &hdev->adv_instances);
1720 hdev->adv_instance_cnt++;
1723 adv_instance->flags = flags;
1724 adv_instance->adv_data_len = adv_data_len;
1725 adv_instance->scan_rsp_len = scan_rsp_len;
1726 adv_instance->min_interval = min_interval;
1727 adv_instance->max_interval = max_interval;
1728 adv_instance->tx_power = tx_power;
1731 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1734 memcpy(adv_instance->scan_rsp_data,
1735 scan_rsp_data, scan_rsp_len);
1737 adv_instance->timeout = timeout;
1738 adv_instance->remaining_time = timeout;
1741 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
1743 adv_instance->duration = duration;
1745 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
1746 adv_instance_rpa_expired);
1748 BT_DBG("%s for %dMR", hdev->name, instance);
1753 /* This function requires the caller holds hdev->lock */
1754 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1755 u16 adv_data_len, u8 *adv_data,
1756 u16 scan_rsp_len, u8 *scan_rsp_data)
1758 struct adv_info *adv_instance;
1760 adv_instance = hci_find_adv_instance(hdev, instance);
1762 /* If advertisement doesn't exist, we can't modify its data */
1767 memset(adv_instance->adv_data, 0,
1768 sizeof(adv_instance->adv_data));
1769 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
1770 adv_instance->adv_data_len = adv_data_len;
1774 memset(adv_instance->scan_rsp_data, 0,
1775 sizeof(adv_instance->scan_rsp_data));
1776 memcpy(adv_instance->scan_rsp_data,
1777 scan_rsp_data, scan_rsp_len);
1778 adv_instance->scan_rsp_len = scan_rsp_len;
1784 /* This function requires the caller holds hdev->lock */
1785 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1788 struct adv_info *adv;
1790 if (instance == 0x00) {
1791 /* Instance 0 always manages the "Tx Power" and "Flags"
1794 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1796 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1797 * corresponds to the "connectable" instance flag.
1799 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1800 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1802 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1803 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1804 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1805 flags |= MGMT_ADV_FLAG_DISCOV;
1810 adv = hci_find_adv_instance(hdev, instance);
1812 /* Return 0 when we got an invalid instance identifier. */
1819 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1821 struct adv_info *adv;
1823 /* Instance 0x00 always set local name */
1824 if (instance == 0x00)
1827 adv = hci_find_adv_instance(hdev, instance);
1831 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1832 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1835 return adv->scan_rsp_len ? true : false;
1838 /* This function requires the caller holds hdev->lock */
1839 void hci_adv_monitors_clear(struct hci_dev *hdev)
1841 struct adv_monitor *monitor;
1844 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1845 hci_free_adv_monitor(hdev, monitor);
1847 idr_destroy(&hdev->adv_monitors_idr);
1850 /* Frees the monitor structure and do some bookkeepings.
1851 * This function requires the caller holds hdev->lock.
1853 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1855 struct adv_pattern *pattern;
1856 struct adv_pattern *tmp;
1861 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1862 list_del(&pattern->list);
1866 if (monitor->handle)
1867 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1869 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1870 hdev->adv_monitors_cnt--;
1871 mgmt_adv_monitor_removed(hdev, monitor->handle);
1877 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
1879 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
1882 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
1884 return mgmt_remove_adv_monitor_complete(hdev, status);
1887 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1888 * also attempts to forward the request to the controller.
1889 * Returns true if request is forwarded (result is pending), false otherwise.
1890 * This function requires the caller holds hdev->lock.
1892 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
1895 int min, max, handle;
1904 min = HCI_MIN_ADV_MONITOR_HANDLE;
1905 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1906 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1913 monitor->handle = handle;
1915 if (!hdev_is_powered(hdev))
1918 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1919 case HCI_ADV_MONITOR_EXT_NONE:
1920 hci_update_passive_scan(hdev);
1921 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
1922 /* Message was not forwarded to controller - not an error */
1924 case HCI_ADV_MONITOR_EXT_MSFT:
1925 *err = msft_add_monitor_pattern(hdev, monitor);
1926 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
1934 /* Attempts to tell the controller and free the monitor. If somehow the
1935 * controller doesn't have a corresponding handle, remove anyway.
1936 * Returns true if request is forwarded (result is pending), false otherwise.
1937 * This function requires the caller holds hdev->lock.
1939 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
1940 struct adv_monitor *monitor,
1941 u16 handle, int *err)
1945 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1946 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1948 case HCI_ADV_MONITOR_EXT_MSFT:
1949 *err = msft_remove_monitor(hdev, monitor, handle);
1953 /* In case no matching handle registered, just free the monitor */
1954 if (*err == -ENOENT)
1960 if (*err == -ENOENT)
1961 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1963 hci_free_adv_monitor(hdev, monitor);
1969 /* Returns true if request is forwarded (result is pending), false otherwise.
1970 * This function requires the caller holds hdev->lock.
1972 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
1974 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1982 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
1983 if (!*err && !pending)
1984 hci_update_passive_scan(hdev);
1986 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
1987 hdev->name, handle, *err, pending ? "" : "not ");
1992 /* Returns true if request is forwarded (result is pending), false otherwise.
1993 * This function requires the caller holds hdev->lock.
1995 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
1997 struct adv_monitor *monitor;
1998 int idr_next_id = 0;
1999 bool pending = false;
2000 bool update = false;
2004 while (!*err && !pending) {
2005 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2009 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
2011 if (!*err && !pending)
2016 hci_update_passive_scan(hdev);
2018 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
2019 hdev->name, *err, pending ? "" : "not ");
2024 /* This function requires the caller holds hdev->lock */
2025 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2027 return !idr_is_empty(&hdev->adv_monitors_idr);
2030 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2032 if (msft_monitor_supported(hdev))
2033 return HCI_ADV_MONITOR_EXT_MSFT;
2035 return HCI_ADV_MONITOR_EXT_NONE;
2038 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2039 bdaddr_t *bdaddr, u8 type)
2041 struct bdaddr_list *b;
2043 list_for_each_entry(b, bdaddr_list, list) {
2044 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2051 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2052 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2055 struct bdaddr_list_with_irk *b;
2057 list_for_each_entry(b, bdaddr_list, list) {
2058 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2065 struct bdaddr_list_with_flags *
2066 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2067 bdaddr_t *bdaddr, u8 type)
2069 struct bdaddr_list_with_flags *b;
2071 list_for_each_entry(b, bdaddr_list, list) {
2072 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2079 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2081 struct bdaddr_list *b, *n;
2083 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2089 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2091 struct bdaddr_list *entry;
2093 if (!bacmp(bdaddr, BDADDR_ANY))
2096 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2099 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2103 bacpy(&entry->bdaddr, bdaddr);
2104 entry->bdaddr_type = type;
2106 list_add(&entry->list, list);
2111 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2112 u8 type, u8 *peer_irk, u8 *local_irk)
2114 struct bdaddr_list_with_irk *entry;
2116 if (!bacmp(bdaddr, BDADDR_ANY))
2119 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2122 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2126 bacpy(&entry->bdaddr, bdaddr);
2127 entry->bdaddr_type = type;
2130 memcpy(entry->peer_irk, peer_irk, 16);
2133 memcpy(entry->local_irk, local_irk, 16);
2135 list_add(&entry->list, list);
2140 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2143 struct bdaddr_list_with_flags *entry;
2145 if (!bacmp(bdaddr, BDADDR_ANY))
2148 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2151 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2155 bacpy(&entry->bdaddr, bdaddr);
2156 entry->bdaddr_type = type;
2157 entry->flags = flags;
2159 list_add(&entry->list, list);
2164 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2166 struct bdaddr_list *entry;
2168 if (!bacmp(bdaddr, BDADDR_ANY)) {
2169 hci_bdaddr_list_clear(list);
2173 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2177 list_del(&entry->list);
2183 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2186 struct bdaddr_list_with_irk *entry;
2188 if (!bacmp(bdaddr, BDADDR_ANY)) {
2189 hci_bdaddr_list_clear(list);
2193 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2197 list_del(&entry->list);
2203 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2206 struct bdaddr_list_with_flags *entry;
2208 if (!bacmp(bdaddr, BDADDR_ANY)) {
2209 hci_bdaddr_list_clear(list);
2213 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2217 list_del(&entry->list);
2223 /* This function requires the caller holds hdev->lock */
2224 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2225 bdaddr_t *addr, u8 addr_type)
2227 struct hci_conn_params *params;
2229 list_for_each_entry(params, &hdev->le_conn_params, list) {
2230 if (bacmp(¶ms->addr, addr) == 0 &&
2231 params->addr_type == addr_type) {
2239 /* This function requires the caller holds hdev->lock */
2240 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2241 bdaddr_t *addr, u8 addr_type)
2243 struct hci_conn_params *param;
2245 list_for_each_entry(param, list, action) {
2246 if (bacmp(¶m->addr, addr) == 0 &&
2247 param->addr_type == addr_type)
2254 /* This function requires the caller holds hdev->lock */
2255 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2256 bdaddr_t *addr, u8 addr_type)
2258 struct hci_conn_params *params;
2260 params = hci_conn_params_lookup(hdev, addr, addr_type);
2264 params = kzalloc(sizeof(*params), GFP_KERNEL);
2266 bt_dev_err(hdev, "out of memory");
2270 bacpy(¶ms->addr, addr);
2271 params->addr_type = addr_type;
2273 list_add(¶ms->list, &hdev->le_conn_params);
2274 INIT_LIST_HEAD(¶ms->action);
2276 params->conn_min_interval = hdev->le_conn_min_interval;
2277 params->conn_max_interval = hdev->le_conn_max_interval;
2278 params->conn_latency = hdev->le_conn_latency;
2279 params->supervision_timeout = hdev->le_supv_timeout;
2280 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2282 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2287 static void hci_conn_params_free(struct hci_conn_params *params)
2290 hci_conn_drop(params->conn);
2291 hci_conn_put(params->conn);
2294 list_del(¶ms->action);
2295 list_del(¶ms->list);
2299 /* This function requires the caller holds hdev->lock */
2300 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2302 struct hci_conn_params *params;
2304 params = hci_conn_params_lookup(hdev, addr, addr_type);
2308 hci_conn_params_free(params);
2310 hci_update_passive_scan(hdev);
2312 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2315 /* This function requires the caller holds hdev->lock */
2316 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2318 struct hci_conn_params *params, *tmp;
2320 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2321 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2324 /* If trying to establish one time connection to disabled
2325 * device, leave the params, but mark them as just once.
2327 if (params->explicit_connect) {
2328 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2332 list_del(¶ms->list);
2336 BT_DBG("All LE disabled connection parameters were removed");
2339 /* This function requires the caller holds hdev->lock */
2340 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2342 struct hci_conn_params *params, *tmp;
2344 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2345 hci_conn_params_free(params);
2347 BT_DBG("All LE connection parameters were removed");
2350 /* Copy the Identity Address of the controller.
2352 * If the controller has a public BD_ADDR, then by default use that one.
2353 * If this is a LE only controller without a public address, default to
2354 * the static random address.
2356 * For debugging purposes it is possible to force controllers with a
2357 * public address to use the static random address instead.
2359 * In case BR/EDR has been disabled on a dual-mode controller and
2360 * userspace has configured a static address, then that address
2361 * becomes the identity address instead of the public BR/EDR address.
2363 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2366 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2367 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2368 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2369 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2370 bacpy(bdaddr, &hdev->static_addr);
2371 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2373 bacpy(bdaddr, &hdev->bdaddr);
2374 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2378 static void hci_clear_wake_reason(struct hci_dev *hdev)
2382 hdev->wake_reason = 0;
2383 bacpy(&hdev->wake_addr, BDADDR_ANY);
2384 hdev->wake_addr_type = 0;
2386 hci_dev_unlock(hdev);
2389 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2392 struct hci_dev *hdev =
2393 container_of(nb, struct hci_dev, suspend_notifier);
2396 if (action == PM_SUSPEND_PREPARE)
2397 ret = hci_suspend_dev(hdev);
2398 else if (action == PM_POST_SUSPEND)
2399 ret = hci_resume_dev(hdev);
2402 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2408 /* Alloc HCI device */
2409 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2411 struct hci_dev *hdev;
2412 unsigned int alloc_size;
2414 alloc_size = sizeof(*hdev);
2416 /* Fixme: May need ALIGN-ment? */
2417 alloc_size += sizeof_priv;
2420 hdev = kzalloc(alloc_size, GFP_KERNEL);
2424 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2425 hdev->esco_type = (ESCO_HV1);
2426 hdev->link_mode = (HCI_LM_ACCEPT);
2427 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2428 hdev->io_capability = 0x03; /* No Input No Output */
2429 hdev->manufacturer = 0xffff; /* Default to internal use */
2430 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2431 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2432 hdev->adv_instance_cnt = 0;
2433 hdev->cur_adv_instance = 0x00;
2434 hdev->adv_instance_timeout = 0;
2436 hdev->advmon_allowlist_duration = 300;
2437 hdev->advmon_no_filter_duration = 500;
2438 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2440 hdev->sniff_max_interval = 800;
2441 hdev->sniff_min_interval = 80;
2443 hdev->le_adv_channel_map = 0x07;
2444 hdev->le_adv_min_interval = 0x0800;
2445 hdev->le_adv_max_interval = 0x0800;
2446 hdev->le_scan_interval = 0x0060;
2447 hdev->le_scan_window = 0x0030;
2448 hdev->le_scan_int_suspend = 0x0400;
2449 hdev->le_scan_window_suspend = 0x0012;
2450 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2451 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2452 hdev->le_scan_int_adv_monitor = 0x0060;
2453 hdev->le_scan_window_adv_monitor = 0x0030;
2454 hdev->le_scan_int_connect = 0x0060;
2455 hdev->le_scan_window_connect = 0x0060;
2456 hdev->le_conn_min_interval = 0x0018;
2457 hdev->le_conn_max_interval = 0x0028;
2458 hdev->le_conn_latency = 0x0000;
2459 hdev->le_supv_timeout = 0x002a;
2460 hdev->le_def_tx_len = 0x001b;
2461 hdev->le_def_tx_time = 0x0148;
2462 hdev->le_max_tx_len = 0x001b;
2463 hdev->le_max_tx_time = 0x0148;
2464 hdev->le_max_rx_len = 0x001b;
2465 hdev->le_max_rx_time = 0x0148;
2466 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2467 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2468 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2469 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2470 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2471 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2472 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2473 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2474 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2476 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2477 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2478 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2479 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2480 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2481 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2483 /* default 1.28 sec page scan */
2484 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2485 hdev->def_page_scan_int = 0x0800;
2486 hdev->def_page_scan_window = 0x0012;
2488 mutex_init(&hdev->lock);
2489 mutex_init(&hdev->req_lock);
2491 INIT_LIST_HEAD(&hdev->mgmt_pending);
2492 INIT_LIST_HEAD(&hdev->reject_list);
2493 INIT_LIST_HEAD(&hdev->accept_list);
2494 INIT_LIST_HEAD(&hdev->uuids);
2495 INIT_LIST_HEAD(&hdev->link_keys);
2496 INIT_LIST_HEAD(&hdev->long_term_keys);
2497 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2498 INIT_LIST_HEAD(&hdev->remote_oob_data);
2499 INIT_LIST_HEAD(&hdev->le_accept_list);
2500 INIT_LIST_HEAD(&hdev->le_resolv_list);
2501 INIT_LIST_HEAD(&hdev->le_conn_params);
2502 INIT_LIST_HEAD(&hdev->pend_le_conns);
2503 INIT_LIST_HEAD(&hdev->pend_le_reports);
2504 INIT_LIST_HEAD(&hdev->conn_hash.list);
2505 INIT_LIST_HEAD(&hdev->adv_instances);
2506 INIT_LIST_HEAD(&hdev->blocked_keys);
2507 INIT_LIST_HEAD(&hdev->monitored_devices);
2509 INIT_LIST_HEAD(&hdev->local_codecs);
2510 INIT_WORK(&hdev->rx_work, hci_rx_work);
2511 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2512 INIT_WORK(&hdev->tx_work, hci_tx_work);
2513 INIT_WORK(&hdev->power_on, hci_power_on);
2514 INIT_WORK(&hdev->error_reset, hci_error_reset);
2516 hci_cmd_sync_init(hdev);
2518 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2520 skb_queue_head_init(&hdev->rx_q);
2521 skb_queue_head_init(&hdev->cmd_q);
2522 skb_queue_head_init(&hdev->raw_q);
2524 init_waitqueue_head(&hdev->req_wait_q);
2526 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2527 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2529 hci_request_setup(hdev);
2531 hci_init_sysfs(hdev);
2532 discovery_init(hdev);
2536 EXPORT_SYMBOL(hci_alloc_dev_priv);
2538 /* Free HCI device */
2539 void hci_free_dev(struct hci_dev *hdev)
2541 /* will free via device release */
2542 put_device(&hdev->dev);
2544 EXPORT_SYMBOL(hci_free_dev);
2546 /* Register HCI device */
2547 int hci_register_dev(struct hci_dev *hdev)
2551 if (!hdev->open || !hdev->close || !hdev->send)
2554 /* Do not allow HCI_AMP devices to register at index 0,
2555 * so the index can be used as the AMP controller ID.
2557 switch (hdev->dev_type) {
2559 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2562 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2571 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2574 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2576 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2577 if (!hdev->workqueue) {
2582 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2584 if (!hdev->req_workqueue) {
2585 destroy_workqueue(hdev->workqueue);
2590 if (!IS_ERR_OR_NULL(bt_debugfs))
2591 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2593 dev_set_name(&hdev->dev, "%s", hdev->name);
2595 error = device_add(&hdev->dev);
2599 hci_leds_init(hdev);
2601 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2602 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2605 if (rfkill_register(hdev->rfkill) < 0) {
2606 rfkill_destroy(hdev->rfkill);
2607 hdev->rfkill = NULL;
2611 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2612 hci_dev_set_flag(hdev, HCI_RFKILLED);
2614 hci_dev_set_flag(hdev, HCI_SETUP);
2615 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2617 if (hdev->dev_type == HCI_PRIMARY) {
2618 /* Assume BR/EDR support until proven otherwise (such as
2619 * through reading supported features during init.
2621 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2624 write_lock(&hci_dev_list_lock);
2625 list_add(&hdev->list, &hci_dev_list);
2626 write_unlock(&hci_dev_list_lock);
2628 /* Devices that are marked for raw-only usage are unconfigured
2629 * and should not be included in normal operation.
2631 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2632 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2634 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2638 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2640 hci_sock_dev_event(hdev, HCI_DEV_REG);
2643 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2644 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2645 error = register_pm_notifier(&hdev->suspend_notifier);
2650 queue_work(hdev->req_workqueue, &hdev->power_on);
2652 idr_init(&hdev->adv_monitors_idr);
2653 msft_register(hdev);
2658 debugfs_remove_recursive(hdev->debugfs);
2659 destroy_workqueue(hdev->workqueue);
2660 destroy_workqueue(hdev->req_workqueue);
2662 ida_simple_remove(&hci_index_ida, hdev->id);
2666 EXPORT_SYMBOL(hci_register_dev);
2668 /* Unregister HCI device */
2669 void hci_unregister_dev(struct hci_dev *hdev)
2671 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2673 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2675 write_lock(&hci_dev_list_lock);
2676 list_del(&hdev->list);
2677 write_unlock(&hci_dev_list_lock);
2679 cancel_work_sync(&hdev->power_on);
2681 hci_cmd_sync_clear(hdev);
2683 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
2684 unregister_pm_notifier(&hdev->suspend_notifier);
2686 msft_unregister(hdev);
2688 hci_dev_do_close(hdev);
2690 if (!test_bit(HCI_INIT, &hdev->flags) &&
2691 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2692 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2694 mgmt_index_removed(hdev);
2695 hci_dev_unlock(hdev);
2698 /* mgmt_index_removed should take care of emptying the
2700 BUG_ON(!list_empty(&hdev->mgmt_pending));
2702 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2705 rfkill_unregister(hdev->rfkill);
2706 rfkill_destroy(hdev->rfkill);
2709 device_del(&hdev->dev);
2710 /* Actual cleanup is deferred until hci_release_dev(). */
2713 EXPORT_SYMBOL(hci_unregister_dev);
2715 /* Release HCI device */
2716 void hci_release_dev(struct hci_dev *hdev)
2718 debugfs_remove_recursive(hdev->debugfs);
2719 kfree_const(hdev->hw_info);
2720 kfree_const(hdev->fw_info);
2722 destroy_workqueue(hdev->workqueue);
2723 destroy_workqueue(hdev->req_workqueue);
2726 hci_bdaddr_list_clear(&hdev->reject_list);
2727 hci_bdaddr_list_clear(&hdev->accept_list);
2728 hci_uuids_clear(hdev);
2729 hci_link_keys_clear(hdev);
2730 hci_smp_ltks_clear(hdev);
2731 hci_smp_irks_clear(hdev);
2732 hci_remote_oob_data_clear(hdev);
2733 hci_adv_instances_clear(hdev);
2734 hci_adv_monitors_clear(hdev);
2735 hci_bdaddr_list_clear(&hdev->le_accept_list);
2736 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2737 hci_conn_params_clear_all(hdev);
2738 hci_discovery_filter_clear(hdev);
2739 hci_blocked_keys_clear(hdev);
2740 hci_dev_unlock(hdev);
2742 ida_simple_remove(&hci_index_ida, hdev->id);
2743 kfree_skb(hdev->sent_cmd);
2746 EXPORT_SYMBOL(hci_release_dev);
2748 /* Suspend HCI device */
2749 int hci_suspend_dev(struct hci_dev *hdev)
2753 bt_dev_dbg(hdev, "");
2755 /* Suspend should only act on when powered. */
2756 if (!hdev_is_powered(hdev) ||
2757 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2760 /* If powering down don't attempt to suspend */
2761 if (mgmt_powering_down(hdev))
2764 hci_req_sync_lock(hdev);
2765 ret = hci_suspend_sync(hdev);
2766 hci_req_sync_unlock(hdev);
2768 hci_clear_wake_reason(hdev);
2769 mgmt_suspending(hdev, hdev->suspend_state);
2771 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2774 EXPORT_SYMBOL(hci_suspend_dev);
2776 /* Resume HCI device */
2777 int hci_resume_dev(struct hci_dev *hdev)
2781 bt_dev_dbg(hdev, "");
2783 /* Resume should only act on when powered. */
2784 if (!hdev_is_powered(hdev) ||
2785 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2788 /* If powering down don't attempt to resume */
2789 if (mgmt_powering_down(hdev))
2792 hci_req_sync_lock(hdev);
2793 ret = hci_resume_sync(hdev);
2794 hci_req_sync_unlock(hdev);
2796 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2797 hdev->wake_addr_type);
2799 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2802 EXPORT_SYMBOL(hci_resume_dev);
2804 /* Reset HCI device */
2805 int hci_reset_dev(struct hci_dev *hdev)
2807 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2808 struct sk_buff *skb;
2810 skb = bt_skb_alloc(3, GFP_ATOMIC);
2814 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2815 skb_put_data(skb, hw_err, 3);
2817 bt_dev_err(hdev, "Injecting HCI hardware error event");
2819 /* Send Hardware Error to upper stack */
2820 return hci_recv_frame(hdev, skb);
2822 EXPORT_SYMBOL(hci_reset_dev);
2824 /* Receive frame from HCI drivers */
2825 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2827 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2828 && !test_bit(HCI_INIT, &hdev->flags))) {
2833 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
2834 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
2835 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
2836 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
2842 bt_cb(skb)->incoming = 1;
2845 __net_timestamp(skb);
2847 skb_queue_tail(&hdev->rx_q, skb);
2848 queue_work(hdev->workqueue, &hdev->rx_work);
2852 EXPORT_SYMBOL(hci_recv_frame);
2854 /* Receive diagnostic message from HCI drivers */
2855 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2857 /* Mark as diagnostic packet */
2858 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2861 __net_timestamp(skb);
2863 skb_queue_tail(&hdev->rx_q, skb);
2864 queue_work(hdev->workqueue, &hdev->rx_work);
2868 EXPORT_SYMBOL(hci_recv_diag);
2870 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2874 va_start(vargs, fmt);
2875 kfree_const(hdev->hw_info);
2876 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2879 EXPORT_SYMBOL(hci_set_hw_info);
2881 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2885 va_start(vargs, fmt);
2886 kfree_const(hdev->fw_info);
2887 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2890 EXPORT_SYMBOL(hci_set_fw_info);
2892 /* ---- Interface to upper protocols ---- */
2894 int hci_register_cb(struct hci_cb *cb)
2896 BT_DBG("%p name %s", cb, cb->name);
2898 mutex_lock(&hci_cb_list_lock);
2899 list_add_tail(&cb->list, &hci_cb_list);
2900 mutex_unlock(&hci_cb_list_lock);
2904 EXPORT_SYMBOL(hci_register_cb);
2906 int hci_unregister_cb(struct hci_cb *cb)
2908 BT_DBG("%p name %s", cb, cb->name);
2910 mutex_lock(&hci_cb_list_lock);
2911 list_del(&cb->list);
2912 mutex_unlock(&hci_cb_list_lock);
2916 EXPORT_SYMBOL(hci_unregister_cb);
2918 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2922 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
2926 __net_timestamp(skb);
2928 /* Send copy to monitor */
2929 hci_send_to_monitor(hdev, skb);
2931 if (atomic_read(&hdev->promisc)) {
2932 /* Send copy to the sockets */
2933 hci_send_to_sock(hdev, skb);
2936 /* Get rid of skb owner, prior to sending to the driver. */
2939 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
2944 err = hdev->send(hdev, skb);
2946 bt_dev_err(hdev, "sending frame failed (%d)", err);
2954 /* Send HCI command */
2955 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2958 struct sk_buff *skb;
2960 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2962 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2964 bt_dev_err(hdev, "no memory for command");
2968 /* Stand-alone HCI commands must be flagged as
2969 * single-command requests.
2971 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
2973 skb_queue_tail(&hdev->cmd_q, skb);
2974 queue_work(hdev->workqueue, &hdev->cmd_work);
2979 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
2982 struct sk_buff *skb;
2984 if (hci_opcode_ogf(opcode) != 0x3f) {
2985 /* A controller receiving a command shall respond with either
2986 * a Command Status Event or a Command Complete Event.
2987 * Therefore, all standard HCI commands must be sent via the
2988 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
2989 * Some vendors do not comply with this rule for vendor-specific
2990 * commands and do not return any event. We want to support
2991 * unresponded commands for such cases only.
2993 bt_dev_err(hdev, "unresponded command not supported");
2997 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2999 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3004 hci_send_frame(hdev, skb);
3008 EXPORT_SYMBOL(__hci_cmd_send);
3010 /* Get data from the previously sent command */
3011 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3013 struct hci_command_hdr *hdr;
3015 if (!hdev->sent_cmd)
3018 hdr = (void *) hdev->sent_cmd->data;
3020 if (hdr->opcode != cpu_to_le16(opcode))
3023 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3025 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3029 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3031 struct hci_acl_hdr *hdr;
3034 skb_push(skb, HCI_ACL_HDR_SIZE);
3035 skb_reset_transport_header(skb);
3036 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3037 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3038 hdr->dlen = cpu_to_le16(len);
3041 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3042 struct sk_buff *skb, __u16 flags)
3044 struct hci_conn *conn = chan->conn;
3045 struct hci_dev *hdev = conn->hdev;
3046 struct sk_buff *list;
3048 skb->len = skb_headlen(skb);
3051 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3053 switch (hdev->dev_type) {
3055 hci_add_acl_hdr(skb, conn->handle, flags);
3058 hci_add_acl_hdr(skb, chan->handle, flags);
3061 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3065 list = skb_shinfo(skb)->frag_list;
3067 /* Non fragmented */
3068 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3070 skb_queue_tail(queue, skb);
3073 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3075 skb_shinfo(skb)->frag_list = NULL;
3077 /* Queue all fragments atomically. We need to use spin_lock_bh
3078 * here because of 6LoWPAN links, as there this function is
3079 * called from softirq and using normal spin lock could cause
3082 spin_lock_bh(&queue->lock);
3084 __skb_queue_tail(queue, skb);
3086 flags &= ~ACL_START;
3089 skb = list; list = list->next;
3091 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3092 hci_add_acl_hdr(skb, conn->handle, flags);
3094 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3096 __skb_queue_tail(queue, skb);
3099 spin_unlock_bh(&queue->lock);
3103 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3105 struct hci_dev *hdev = chan->conn->hdev;
3107 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3109 hci_queue_acl(chan, &chan->data_q, skb, flags);
3111 queue_work(hdev->workqueue, &hdev->tx_work);
3115 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3117 struct hci_dev *hdev = conn->hdev;
3118 struct hci_sco_hdr hdr;
3120 BT_DBG("%s len %d", hdev->name, skb->len);
3122 hdr.handle = cpu_to_le16(conn->handle);
3123 hdr.dlen = skb->len;
3125 skb_push(skb, HCI_SCO_HDR_SIZE);
3126 skb_reset_transport_header(skb);
3127 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3129 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3131 skb_queue_tail(&conn->data_q, skb);
3132 queue_work(hdev->workqueue, &hdev->tx_work);
3135 /* ---- HCI TX task (outgoing data) ---- */
3137 /* HCI Connection scheduler */
3138 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3141 struct hci_conn_hash *h = &hdev->conn_hash;
3142 struct hci_conn *conn = NULL, *c;
3143 unsigned int num = 0, min = ~0;
3145 /* We don't have to lock device here. Connections are always
3146 * added and removed with TX task disabled. */
3150 list_for_each_entry_rcu(c, &h->list, list) {
3151 if (c->type != type || skb_queue_empty(&c->data_q))
3154 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3159 if (c->sent < min) {
3164 if (hci_conn_num(hdev, type) == num)
3173 switch (conn->type) {
3175 cnt = hdev->acl_cnt;
3179 cnt = hdev->sco_cnt;
3182 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3186 bt_dev_err(hdev, "unknown link type %d", conn->type);
3194 BT_DBG("conn %p quote %d", conn, *quote);
3198 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3200 struct hci_conn_hash *h = &hdev->conn_hash;
3203 bt_dev_err(hdev, "link tx timeout");
3207 /* Kill stalled connections */
3208 list_for_each_entry_rcu(c, &h->list, list) {
3209 if (c->type == type && c->sent) {
3210 bt_dev_err(hdev, "killing stalled connection %pMR",
3212 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3219 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3222 struct hci_conn_hash *h = &hdev->conn_hash;
3223 struct hci_chan *chan = NULL;
3224 unsigned int num = 0, min = ~0, cur_prio = 0;
3225 struct hci_conn *conn;
3226 int cnt, q, conn_num = 0;
3228 BT_DBG("%s", hdev->name);
3232 list_for_each_entry_rcu(conn, &h->list, list) {
3233 struct hci_chan *tmp;
3235 if (conn->type != type)
3238 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3243 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3244 struct sk_buff *skb;
3246 if (skb_queue_empty(&tmp->data_q))
3249 skb = skb_peek(&tmp->data_q);
3250 if (skb->priority < cur_prio)
3253 if (skb->priority > cur_prio) {
3256 cur_prio = skb->priority;
3261 if (conn->sent < min) {
3267 if (hci_conn_num(hdev, type) == conn_num)
3276 switch (chan->conn->type) {
3278 cnt = hdev->acl_cnt;
3281 cnt = hdev->block_cnt;
3285 cnt = hdev->sco_cnt;
3288 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3292 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3297 BT_DBG("chan %p quote %d", chan, *quote);
3301 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3303 struct hci_conn_hash *h = &hdev->conn_hash;
3304 struct hci_conn *conn;
3307 BT_DBG("%s", hdev->name);
3311 list_for_each_entry_rcu(conn, &h->list, list) {
3312 struct hci_chan *chan;
3314 if (conn->type != type)
3317 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3322 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3323 struct sk_buff *skb;
3330 if (skb_queue_empty(&chan->data_q))
3333 skb = skb_peek(&chan->data_q);
3334 if (skb->priority >= HCI_PRIO_MAX - 1)
3337 skb->priority = HCI_PRIO_MAX - 1;
3339 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3343 if (hci_conn_num(hdev, type) == num)
3351 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3353 /* Calculate count of blocks used by this packet */
3354 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3357 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3359 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3360 /* ACL tx timeout must be longer than maximum
3361 * link supervision timeout (40.9 seconds) */
3362 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3363 HCI_ACL_TX_TIMEOUT))
3364 hci_link_tx_to(hdev, ACL_LINK);
3369 static void hci_sched_sco(struct hci_dev *hdev)
3371 struct hci_conn *conn;
3372 struct sk_buff *skb;
3375 BT_DBG("%s", hdev->name);
3377 if (!hci_conn_num(hdev, SCO_LINK))
3380 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3381 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3382 BT_DBG("skb %p len %d", skb, skb->len);
3383 hci_send_frame(hdev, skb);
3386 if (conn->sent == ~0)
3392 static void hci_sched_esco(struct hci_dev *hdev)
3394 struct hci_conn *conn;
3395 struct sk_buff *skb;
3398 BT_DBG("%s", hdev->name);
3400 if (!hci_conn_num(hdev, ESCO_LINK))
3403 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3405 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3406 BT_DBG("skb %p len %d", skb, skb->len);
3407 hci_send_frame(hdev, skb);
3410 if (conn->sent == ~0)
3416 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3418 unsigned int cnt = hdev->acl_cnt;
3419 struct hci_chan *chan;
3420 struct sk_buff *skb;
3423 __check_timeout(hdev, cnt);
3425 while (hdev->acl_cnt &&
3426 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3427 u32 priority = (skb_peek(&chan->data_q))->priority;
3428 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3429 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3430 skb->len, skb->priority);
3432 /* Stop if priority has changed */
3433 if (skb->priority < priority)
3436 skb = skb_dequeue(&chan->data_q);
3438 hci_conn_enter_active_mode(chan->conn,
3439 bt_cb(skb)->force_active);
3441 hci_send_frame(hdev, skb);
3442 hdev->acl_last_tx = jiffies;
3448 /* Send pending SCO packets right away */
3449 hci_sched_sco(hdev);
3450 hci_sched_esco(hdev);
3454 if (cnt != hdev->acl_cnt)
3455 hci_prio_recalculate(hdev, ACL_LINK);
3458 static void hci_sched_acl_blk(struct hci_dev *hdev)
3460 unsigned int cnt = hdev->block_cnt;
3461 struct hci_chan *chan;
3462 struct sk_buff *skb;
3466 __check_timeout(hdev, cnt);
3468 BT_DBG("%s", hdev->name);
3470 if (hdev->dev_type == HCI_AMP)
3475 while (hdev->block_cnt > 0 &&
3476 (chan = hci_chan_sent(hdev, type, "e))) {
3477 u32 priority = (skb_peek(&chan->data_q))->priority;
3478 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3481 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3482 skb->len, skb->priority);
3484 /* Stop if priority has changed */
3485 if (skb->priority < priority)
3488 skb = skb_dequeue(&chan->data_q);
3490 blocks = __get_blocks(hdev, skb);
3491 if (blocks > hdev->block_cnt)
3494 hci_conn_enter_active_mode(chan->conn,
3495 bt_cb(skb)->force_active);
3497 hci_send_frame(hdev, skb);
3498 hdev->acl_last_tx = jiffies;
3500 hdev->block_cnt -= blocks;
3503 chan->sent += blocks;
3504 chan->conn->sent += blocks;
3508 if (cnt != hdev->block_cnt)
3509 hci_prio_recalculate(hdev, type);
3512 static void hci_sched_acl(struct hci_dev *hdev)
3514 BT_DBG("%s", hdev->name);
3516 /* No ACL link over BR/EDR controller */
3517 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3520 /* No AMP link over AMP controller */
3521 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3524 switch (hdev->flow_ctl_mode) {
3525 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3526 hci_sched_acl_pkt(hdev);
3529 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3530 hci_sched_acl_blk(hdev);
3535 static void hci_sched_le(struct hci_dev *hdev)
3537 struct hci_chan *chan;
3538 struct sk_buff *skb;
3539 int quote, cnt, tmp;
3541 BT_DBG("%s", hdev->name);
3543 if (!hci_conn_num(hdev, LE_LINK))
3546 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3548 __check_timeout(hdev, cnt);
3551 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3552 u32 priority = (skb_peek(&chan->data_q))->priority;
3553 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3554 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3555 skb->len, skb->priority);
3557 /* Stop if priority has changed */
3558 if (skb->priority < priority)
3561 skb = skb_dequeue(&chan->data_q);
3563 hci_send_frame(hdev, skb);
3564 hdev->le_last_tx = jiffies;
3570 /* Send pending SCO packets right away */
3571 hci_sched_sco(hdev);
3572 hci_sched_esco(hdev);
3579 hdev->acl_cnt = cnt;
3582 hci_prio_recalculate(hdev, LE_LINK);
3585 static void hci_tx_work(struct work_struct *work)
3587 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3588 struct sk_buff *skb;
3590 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3591 hdev->sco_cnt, hdev->le_cnt);
3593 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3594 /* Schedule queues and send stuff to HCI driver */
3595 hci_sched_sco(hdev);
3596 hci_sched_esco(hdev);
3597 hci_sched_acl(hdev);
3601 /* Send next queued raw (unknown type) packet */
3602 while ((skb = skb_dequeue(&hdev->raw_q)))
3603 hci_send_frame(hdev, skb);
3606 /* ----- HCI RX task (incoming data processing) ----- */
3608 /* ACL data packet */
3609 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3611 struct hci_acl_hdr *hdr = (void *) skb->data;
3612 struct hci_conn *conn;
3613 __u16 handle, flags;
3615 skb_pull(skb, HCI_ACL_HDR_SIZE);
3617 handle = __le16_to_cpu(hdr->handle);
3618 flags = hci_flags(handle);
3619 handle = hci_handle(handle);
3621 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3624 hdev->stat.acl_rx++;
3627 conn = hci_conn_hash_lookup_handle(hdev, handle);
3628 hci_dev_unlock(hdev);
3631 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3633 /* Send to upper protocol */
3634 l2cap_recv_acldata(conn, skb, flags);
3637 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3644 /* SCO data packet */
3645 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3647 struct hci_sco_hdr *hdr = (void *) skb->data;
3648 struct hci_conn *conn;
3649 __u16 handle, flags;
3651 skb_pull(skb, HCI_SCO_HDR_SIZE);
3653 handle = __le16_to_cpu(hdr->handle);
3654 flags = hci_flags(handle);
3655 handle = hci_handle(handle);
3657 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3660 hdev->stat.sco_rx++;
3663 conn = hci_conn_hash_lookup_handle(hdev, handle);
3664 hci_dev_unlock(hdev);
3667 /* Send to upper protocol */
3668 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3669 sco_recv_scodata(conn, skb);
3672 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3679 static bool hci_req_is_complete(struct hci_dev *hdev)
3681 struct sk_buff *skb;
3683 skb = skb_peek(&hdev->cmd_q);
3687 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3690 static void hci_resend_last(struct hci_dev *hdev)
3692 struct hci_command_hdr *sent;
3693 struct sk_buff *skb;
3696 if (!hdev->sent_cmd)
3699 sent = (void *) hdev->sent_cmd->data;
3700 opcode = __le16_to_cpu(sent->opcode);
3701 if (opcode == HCI_OP_RESET)
3704 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3708 skb_queue_head(&hdev->cmd_q, skb);
3709 queue_work(hdev->workqueue, &hdev->cmd_work);
3712 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3713 hci_req_complete_t *req_complete,
3714 hci_req_complete_skb_t *req_complete_skb)
3716 struct sk_buff *skb;
3717 unsigned long flags;
3719 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3721 /* If the completed command doesn't match the last one that was
3722 * sent we need to do special handling of it.
3724 if (!hci_sent_cmd_data(hdev, opcode)) {
3725 /* Some CSR based controllers generate a spontaneous
3726 * reset complete event during init and any pending
3727 * command will never be completed. In such a case we
3728 * need to resend whatever was the last sent
3731 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3732 hci_resend_last(hdev);
3737 /* If we reach this point this event matches the last command sent */
3738 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3740 /* If the command succeeded and there's still more commands in
3741 * this request the request is not yet complete.
3743 if (!status && !hci_req_is_complete(hdev))
3746 /* If this was the last command in a request the complete
3747 * callback would be found in hdev->sent_cmd instead of the
3748 * command queue (hdev->cmd_q).
3750 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
3751 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
3755 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
3756 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
3760 /* Remove all pending commands belonging to this request */
3761 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3762 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3763 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3764 __skb_queue_head(&hdev->cmd_q, skb);
3768 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3769 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3771 *req_complete = bt_cb(skb)->hci.req_complete;
3774 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3777 static void hci_rx_work(struct work_struct *work)
3779 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3780 struct sk_buff *skb;
3782 BT_DBG("%s", hdev->name);
3784 while ((skb = skb_dequeue(&hdev->rx_q))) {
3785 /* Send copy to monitor */
3786 hci_send_to_monitor(hdev, skb);
3788 if (atomic_read(&hdev->promisc)) {
3789 /* Send copy to the sockets */
3790 hci_send_to_sock(hdev, skb);
3793 /* If the device has been opened in HCI_USER_CHANNEL,
3794 * the userspace has exclusive access to device.
3795 * When device is HCI_INIT, we still need to process
3796 * the data packets to the driver in order
3797 * to complete its setup().
3799 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3800 !test_bit(HCI_INIT, &hdev->flags)) {
3805 if (test_bit(HCI_INIT, &hdev->flags)) {
3806 /* Don't process data packets in this states. */
3807 switch (hci_skb_pkt_type(skb)) {
3808 case HCI_ACLDATA_PKT:
3809 case HCI_SCODATA_PKT:
3810 case HCI_ISODATA_PKT:
3817 switch (hci_skb_pkt_type(skb)) {
3819 BT_DBG("%s Event packet", hdev->name);
3820 hci_event_packet(hdev, skb);
3823 case HCI_ACLDATA_PKT:
3824 BT_DBG("%s ACL data packet", hdev->name);
3825 hci_acldata_packet(hdev, skb);
3828 case HCI_SCODATA_PKT:
3829 BT_DBG("%s SCO data packet", hdev->name);
3830 hci_scodata_packet(hdev, skb);
3840 static void hci_cmd_work(struct work_struct *work)
3842 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3843 struct sk_buff *skb;
3845 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3846 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3848 /* Send queued commands */
3849 if (atomic_read(&hdev->cmd_cnt)) {
3850 skb = skb_dequeue(&hdev->cmd_q);
3854 kfree_skb(hdev->sent_cmd);
3856 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3857 if (hdev->sent_cmd) {
3859 if (hci_req_status_pend(hdev))
3860 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
3861 atomic_dec(&hdev->cmd_cnt);
3863 res = hci_send_frame(hdev, skb);
3865 __hci_cmd_sync_cancel(hdev, -res);
3867 if (test_bit(HCI_RESET, &hdev->flags))
3868 cancel_delayed_work(&hdev->cmd_timer);
3870 schedule_delayed_work(&hdev->cmd_timer,
3873 skb_queue_head(&hdev->cmd_q, skb);
3874 queue_work(hdev->workqueue, &hdev->cmd_work);