2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
183 list_for_each_entry_safe(p, n, &cache->all, all) {
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
236 if (!bacmp(&e->data.bdaddr, bdaddr))
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
259 list_add(&ie->list, pos);
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
297 list_add(&ie->all, &cache->all);
300 ie->name_state = NAME_KNOWN;
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
358 BT_DBG("%s", hdev->name);
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(void __user *arg)
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 hdev = hci_dev_get(ir.dev_id);
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
398 if (hdev->dev_type != HCI_PRIMARY) {
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
420 hci_dev_unlock(hdev);
422 timeo = ir.length * msecs_to_jiffies(2000);
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
440 /* for unlimited number of responses we will use buffer with
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
458 BT_DBG("num_rsp %d", ir.num_rsp);
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 static int hci_dev_do_open(struct hci_dev *hdev)
479 BT_DBG("%s %p", hdev->name, hdev);
481 hci_req_sync_lock(hdev);
483 ret = hci_dev_open_sync(hdev);
485 hci_req_sync_unlock(hdev);
489 /* ---- HCI ioctl helpers ---- */
491 int hci_dev_open(__u16 dev)
493 struct hci_dev *hdev;
496 hdev = hci_dev_get(dev);
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
527 flush_workqueue(hdev->req_workqueue);
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
539 err = hci_dev_do_open(hdev);
546 int hci_dev_do_close(struct hci_dev *hdev)
550 BT_DBG("%s %p", hdev->name, hdev);
552 hci_req_sync_lock(hdev);
554 err = hci_dev_close_sync(hdev);
556 hci_req_sync_unlock(hdev);
561 int hci_dev_close(__u16 dev)
563 struct hci_dev *hdev;
566 hdev = hci_dev_get(dev);
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
579 err = hci_dev_do_close(hdev);
586 static int hci_dev_do_reset(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_sync_lock(hdev);
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
605 * inside RCU section to see the flag or complete scheduling.
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(&hdev->cmd_timer);
610 cancel_delayed_work(&hdev->ncmd_timer);
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
615 drain_workqueue(hdev->workqueue);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
627 atomic_set(&hdev->cmd_cnt, 1);
633 ret = hci_reset_sync(hdev);
635 hci_req_sync_unlock(hdev);
639 int hci_dev_reset(__u16 dev)
641 struct hci_dev *hdev;
644 hdev = hci_dev_get(dev);
648 if (!test_bit(HCI_UP, &hdev->flags)) {
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
663 err = hci_dev_do_reset(hdev);
670 int hci_dev_reset_stat(__u16 dev)
672 struct hci_dev *hdev;
675 hdev = hci_dev_get(dev);
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
698 bool conn_changed, discov_changed;
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, hdev->cur_adv_instance);
728 mgmt_new_settings(hdev);
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
738 if (copy_from_user(&dr, arg, sizeof(dr)))
741 hdev = hci_dev_get(dr.dev_id);
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
755 if (hdev->dev_type != HCI_PRIMARY) {
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
772 if (!lmp_encrypt_capable(hdev)) {
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
797 hci_update_passive_scan_state(hdev, dr.dev_opt);
801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
838 int hci_get_dev_list(void __user *arg)
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
846 if (get_user(dev_num, (__u16 __user *) arg))
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
854 dl = kzalloc(size, GFP_KERNEL);
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
877 read_unlock(&hci_dev_list_lock);
880 size = sizeof(*dl) + n * sizeof(*dr);
882 err = copy_to_user(arg, dl, size);
885 return err ? -EFAULT : 0;
888 int hci_get_dev_info(void __user *arg)
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
895 if (copy_from_user(&di, arg, sizeof(di)))
898 hdev = hci_dev_get(di.dev_id);
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
911 strscpy(di.name, hdev->name, sizeof(di.name));
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
933 if (copy_to_user(arg, &di, sizeof(di)))
941 /* ---- Interface to HCI drivers ---- */
943 static int hci_rfkill_set_block(void *data, bool blocked)
945 struct hci_dev *hdev = data;
947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
953 hci_dev_set_flag(hdev, HCI_RFKILLED);
954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_dev_do_close(hdev);
958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
964 static const struct rfkill_ops hci_rfkill_ops = {
965 .set_block = hci_rfkill_set_block,
968 static void hci_power_on(struct work_struct *work)
970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
973 BT_DBG("%s", hdev->name);
975 if (test_bit(HCI_UP, &hdev->flags) &&
976 hci_dev_test_flag(hdev, HCI_MGMT) &&
977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 cancel_delayed_work(&hdev->power_off);
979 err = hci_powered_update_sync(hdev);
980 mgmt_power_on(hdev, err);
984 err = hci_dev_do_open(hdev);
987 mgmt_set_powered_failed(hdev, err);
988 hci_dev_unlock(hdev);
992 /* During the HCI setup phase, a few error conditions are
993 * ignored and they need to be checked now. If they are still
994 * valid, it is important to turn the device back off.
996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 (hdev->dev_type == HCI_PRIMARY &&
999 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 /* For unconfigured devices, set the HCI_RAW flag
1010 * so that userspace can easily identify them.
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(HCI_RAW, &hdev->flags);
1015 /* For fully configured devices, this will send
1016 * the Index Added event. For unconfigured devices,
1017 * it will send Unconfigued Index Added event.
1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 * and no event will be send.
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 /* When the controller is now configured, then it
1025 * is important to clear the HCI_RAW flag.
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(HCI_RAW, &hdev->flags);
1030 /* Powering on the controller with HCI_CONFIG set only
1031 * happens with the transition from unconfigured to
1032 * configured. This will send the Index Added event.
1034 mgmt_index_added(hdev);
1038 static void hci_power_off(struct work_struct *work)
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1043 BT_DBG("%s", hdev->name);
1045 hci_dev_do_close(hdev);
1048 static void hci_error_reset(struct work_struct *work)
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1053 BT_DBG("%s", hdev->name);
1056 hdev->hw_error(hdev, hdev->hw_error_code);
1058 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1060 if (!hci_dev_do_close(hdev))
1061 hci_dev_do_open(hdev);
1066 void hci_uuids_clear(struct hci_dev *hdev)
1068 struct bt_uuid *uuid, *tmp;
1070 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1071 list_del(&uuid->list);
1076 void hci_link_keys_clear(struct hci_dev *hdev)
1078 struct link_key *key, *tmp;
1080 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1081 list_del_rcu(&key->list);
1082 kfree_rcu(key, rcu);
1086 void hci_smp_ltks_clear(struct hci_dev *hdev)
1088 struct smp_ltk *k, *tmp;
1090 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1091 list_del_rcu(&k->list);
1096 void hci_smp_irks_clear(struct hci_dev *hdev)
1098 struct smp_irk *k, *tmp;
1100 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1101 list_del_rcu(&k->list);
1106 void hci_blocked_keys_clear(struct hci_dev *hdev)
1108 struct blocked_key *b, *tmp;
1110 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1111 list_del_rcu(&b->list);
1116 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1118 bool blocked = false;
1119 struct blocked_key *b;
1122 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1123 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1133 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1138 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1139 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1142 if (hci_is_blocked_key(hdev,
1143 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1145 bt_dev_warn_ratelimited(hdev,
1146 "Link key blocked for %pMR",
1159 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1160 u8 key_type, u8 old_key_type)
1163 if (key_type < 0x03)
1166 /* Debug keys are insecure so don't store them persistently */
1167 if (key_type == HCI_LK_DEBUG_COMBINATION)
1170 /* Changed combination key and there's no previous one */
1171 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1174 /* Security mode 3 case */
1178 /* BR/EDR key derived using SC from an LE link */
1179 if (conn->type == LE_LINK)
1182 /* Neither local nor remote side had no-bonding as requirement */
1183 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1186 /* Local side had dedicated bonding as requirement */
1187 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1190 /* Remote side had dedicated bonding as requirement */
1191 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1194 /* If none of the above criteria match, then don't store the key
1199 static u8 ltk_role(u8 type)
1201 if (type == SMP_LTK)
1202 return HCI_ROLE_MASTER;
1204 return HCI_ROLE_SLAVE;
1207 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1208 u8 addr_type, u8 role)
1213 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1214 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1217 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1220 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1222 bt_dev_warn_ratelimited(hdev,
1223 "LTK blocked for %pMR",
1236 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1238 struct smp_irk *irk_to_return = NULL;
1239 struct smp_irk *irk;
1242 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1243 if (!bacmp(&irk->rpa, rpa)) {
1244 irk_to_return = irk;
1249 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1250 if (smp_irk_matches(hdev, irk->val, rpa)) {
1251 bacpy(&irk->rpa, rpa);
1252 irk_to_return = irk;
1258 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1259 irk_to_return->val)) {
1260 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1261 &irk_to_return->bdaddr);
1262 irk_to_return = NULL;
1267 return irk_to_return;
1270 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1273 struct smp_irk *irk_to_return = NULL;
1274 struct smp_irk *irk;
1276 /* Identity Address must be public or static random */
1277 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1281 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1282 if (addr_type == irk->addr_type &&
1283 bacmp(bdaddr, &irk->bdaddr) == 0) {
1284 irk_to_return = irk;
1291 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1292 irk_to_return->val)) {
1293 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1294 &irk_to_return->bdaddr);
1295 irk_to_return = NULL;
1300 return irk_to_return;
1303 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1304 bdaddr_t *bdaddr, u8 *val, u8 type,
1305 u8 pin_len, bool *persistent)
1307 struct link_key *key, *old_key;
1310 old_key = hci_find_link_key(hdev, bdaddr);
1312 old_key_type = old_key->type;
1315 old_key_type = conn ? conn->key_type : 0xff;
1316 key = kzalloc(sizeof(*key), GFP_KERNEL);
1319 list_add_rcu(&key->list, &hdev->link_keys);
1322 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1324 /* Some buggy controller combinations generate a changed
1325 * combination key for legacy pairing even when there's no
1327 if (type == HCI_LK_CHANGED_COMBINATION &&
1328 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1329 type = HCI_LK_COMBINATION;
1331 conn->key_type = type;
1334 bacpy(&key->bdaddr, bdaddr);
1335 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1336 key->pin_len = pin_len;
1338 if (type == HCI_LK_CHANGED_COMBINATION)
1339 key->type = old_key_type;
1344 *persistent = hci_persistent_key(hdev, conn, type,
1350 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1351 u8 addr_type, u8 type, u8 authenticated,
1352 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1354 struct smp_ltk *key, *old_key;
1355 u8 role = ltk_role(type);
1357 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1361 key = kzalloc(sizeof(*key), GFP_KERNEL);
1364 list_add_rcu(&key->list, &hdev->long_term_keys);
1367 bacpy(&key->bdaddr, bdaddr);
1368 key->bdaddr_type = addr_type;
1369 memcpy(key->val, tk, sizeof(key->val));
1370 key->authenticated = authenticated;
1373 key->enc_size = enc_size;
1379 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1380 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1382 struct smp_irk *irk;
1384 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1386 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1390 bacpy(&irk->bdaddr, bdaddr);
1391 irk->addr_type = addr_type;
1393 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1396 memcpy(irk->val, val, 16);
1397 bacpy(&irk->rpa, rpa);
1402 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1404 struct link_key *key;
1406 key = hci_find_link_key(hdev, bdaddr);
1410 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1412 list_del_rcu(&key->list);
1413 kfree_rcu(key, rcu);
1418 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1420 struct smp_ltk *k, *tmp;
1423 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1424 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1427 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1429 list_del_rcu(&k->list);
1434 return removed ? 0 : -ENOENT;
1437 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1439 struct smp_irk *k, *tmp;
1441 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1442 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1445 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1447 list_del_rcu(&k->list);
1452 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1455 struct smp_irk *irk;
1458 if (type == BDADDR_BREDR) {
1459 if (hci_find_link_key(hdev, bdaddr))
1464 /* Convert to HCI addr type which struct smp_ltk uses */
1465 if (type == BDADDR_LE_PUBLIC)
1466 addr_type = ADDR_LE_DEV_PUBLIC;
1468 addr_type = ADDR_LE_DEV_RANDOM;
1470 irk = hci_get_irk(hdev, bdaddr, addr_type);
1472 bdaddr = &irk->bdaddr;
1473 addr_type = irk->addr_type;
1477 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1478 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1488 /* HCI command timer function */
1489 static void hci_cmd_timeout(struct work_struct *work)
1491 struct hci_dev *hdev = container_of(work, struct hci_dev,
1494 if (hdev->req_skb) {
1495 u16 opcode = hci_skb_opcode(hdev->req_skb);
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1499 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1501 bt_dev_err(hdev, "command tx timeout");
1504 if (hdev->cmd_timeout)
1505 hdev->cmd_timeout(hdev);
1507 atomic_set(&hdev->cmd_cnt, 1);
1508 queue_work(hdev->workqueue, &hdev->cmd_work);
1511 /* HCI ncmd timer function */
1512 static void hci_ncmd_timeout(struct work_struct *work)
1514 struct hci_dev *hdev = container_of(work, struct hci_dev,
1517 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1519 /* During HCI_INIT phase no events can be injected if the ncmd timer
1520 * triggers since the procedure has its own timeout handling.
1522 if (test_bit(HCI_INIT, &hdev->flags))
1525 /* This is an irrecoverable state, inject hardware error event */
1526 hci_reset_dev(hdev);
1529 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1530 bdaddr_t *bdaddr, u8 bdaddr_type)
1532 struct oob_data *data;
1534 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1535 if (bacmp(bdaddr, &data->bdaddr) != 0)
1537 if (data->bdaddr_type != bdaddr_type)
1545 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1548 struct oob_data *data;
1550 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1554 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1556 list_del(&data->list);
1562 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1564 struct oob_data *data, *n;
1566 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1567 list_del(&data->list);
1572 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1573 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1574 u8 *hash256, u8 *rand256)
1576 struct oob_data *data;
1578 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1580 data = kmalloc(sizeof(*data), GFP_KERNEL);
1584 bacpy(&data->bdaddr, bdaddr);
1585 data->bdaddr_type = bdaddr_type;
1586 list_add(&data->list, &hdev->remote_oob_data);
1589 if (hash192 && rand192) {
1590 memcpy(data->hash192, hash192, sizeof(data->hash192));
1591 memcpy(data->rand192, rand192, sizeof(data->rand192));
1592 if (hash256 && rand256)
1593 data->present = 0x03;
1595 memset(data->hash192, 0, sizeof(data->hash192));
1596 memset(data->rand192, 0, sizeof(data->rand192));
1597 if (hash256 && rand256)
1598 data->present = 0x02;
1600 data->present = 0x00;
1603 if (hash256 && rand256) {
1604 memcpy(data->hash256, hash256, sizeof(data->hash256));
1605 memcpy(data->rand256, rand256, sizeof(data->rand256));
1607 memset(data->hash256, 0, sizeof(data->hash256));
1608 memset(data->rand256, 0, sizeof(data->rand256));
1609 if (hash192 && rand192)
1610 data->present = 0x01;
1613 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1618 /* This function requires the caller holds hdev->lock */
1619 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1621 struct adv_info *adv_instance;
1623 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1624 if (adv_instance->instance == instance)
1625 return adv_instance;
1631 /* This function requires the caller holds hdev->lock */
1632 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1634 struct adv_info *cur_instance;
1636 cur_instance = hci_find_adv_instance(hdev, instance);
1640 if (cur_instance == list_last_entry(&hdev->adv_instances,
1641 struct adv_info, list))
1642 return list_first_entry(&hdev->adv_instances,
1643 struct adv_info, list);
1645 return list_next_entry(cur_instance, list);
1648 /* This function requires the caller holds hdev->lock */
1649 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1651 struct adv_info *adv_instance;
1653 adv_instance = hci_find_adv_instance(hdev, instance);
1657 BT_DBG("%s removing %dMR", hdev->name, instance);
1659 if (hdev->cur_adv_instance == instance) {
1660 if (hdev->adv_instance_timeout) {
1661 cancel_delayed_work(&hdev->adv_instance_expire);
1662 hdev->adv_instance_timeout = 0;
1664 hdev->cur_adv_instance = 0x00;
1667 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1669 list_del(&adv_instance->list);
1670 kfree(adv_instance);
1672 hdev->adv_instance_cnt--;
1677 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1679 struct adv_info *adv_instance, *n;
1681 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1682 adv_instance->rpa_expired = rpa_expired;
1685 /* This function requires the caller holds hdev->lock */
1686 void hci_adv_instances_clear(struct hci_dev *hdev)
1688 struct adv_info *adv_instance, *n;
1690 if (hdev->adv_instance_timeout) {
1691 cancel_delayed_work(&hdev->adv_instance_expire);
1692 hdev->adv_instance_timeout = 0;
1695 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1696 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1697 list_del(&adv_instance->list);
1698 kfree(adv_instance);
1701 hdev->adv_instance_cnt = 0;
1702 hdev->cur_adv_instance = 0x00;
1705 static void adv_instance_rpa_expired(struct work_struct *work)
1707 struct adv_info *adv_instance = container_of(work, struct adv_info,
1708 rpa_expired_cb.work);
1712 adv_instance->rpa_expired = true;
1715 /* This function requires the caller holds hdev->lock */
1716 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1717 u32 flags, u16 adv_data_len, u8 *adv_data,
1718 u16 scan_rsp_len, u8 *scan_rsp_data,
1719 u16 timeout, u16 duration, s8 tx_power,
1720 u32 min_interval, u32 max_interval,
1723 struct adv_info *adv;
1725 adv = hci_find_adv_instance(hdev, instance);
1727 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1728 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1729 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1731 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1732 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1733 return ERR_PTR(-EOVERFLOW);
1735 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1737 return ERR_PTR(-ENOMEM);
1739 adv->pending = true;
1740 adv->instance = instance;
1741 list_add(&adv->list, &hdev->adv_instances);
1742 hdev->adv_instance_cnt++;
1746 adv->min_interval = min_interval;
1747 adv->max_interval = max_interval;
1748 adv->tx_power = tx_power;
1749 /* Defining a mesh_handle changes the timing units to ms,
1750 * rather than seconds, and ties the instance to the requested
1753 adv->mesh = mesh_handle;
1755 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1756 scan_rsp_len, scan_rsp_data);
1758 adv->timeout = timeout;
1759 adv->remaining_time = timeout;
1762 adv->duration = hdev->def_multi_adv_rotation_duration;
1764 adv->duration = duration;
1766 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1768 BT_DBG("%s for %dMR", hdev->name, instance);
1773 /* This function requires the caller holds hdev->lock */
1774 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1775 u32 flags, u8 data_len, u8 *data,
1776 u32 min_interval, u32 max_interval)
1778 struct adv_info *adv;
1780 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1781 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1782 min_interval, max_interval, 0);
1786 adv->periodic = true;
1787 adv->per_adv_data_len = data_len;
1790 memcpy(adv->per_adv_data, data, data_len);
1795 /* This function requires the caller holds hdev->lock */
1796 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1797 u16 adv_data_len, u8 *adv_data,
1798 u16 scan_rsp_len, u8 *scan_rsp_data)
1800 struct adv_info *adv;
1802 adv = hci_find_adv_instance(hdev, instance);
1804 /* If advertisement doesn't exist, we can't modify its data */
1808 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1809 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1810 memcpy(adv->adv_data, adv_data, adv_data_len);
1811 adv->adv_data_len = adv_data_len;
1812 adv->adv_data_changed = true;
1815 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1816 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1817 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1818 adv->scan_rsp_len = scan_rsp_len;
1819 adv->scan_rsp_changed = true;
1822 /* Mark as changed if there are flags which would affect it */
1823 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1824 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1825 adv->scan_rsp_changed = true;
1830 /* This function requires the caller holds hdev->lock */
1831 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1834 struct adv_info *adv;
1836 if (instance == 0x00) {
1837 /* Instance 0 always manages the "Tx Power" and "Flags"
1840 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1842 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1843 * corresponds to the "connectable" instance flag.
1845 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1846 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1848 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1849 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1850 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1851 flags |= MGMT_ADV_FLAG_DISCOV;
1856 adv = hci_find_adv_instance(hdev, instance);
1858 /* Return 0 when we got an invalid instance identifier. */
1865 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1867 struct adv_info *adv;
1869 /* Instance 0x00 always set local name */
1870 if (instance == 0x00)
1873 adv = hci_find_adv_instance(hdev, instance);
1877 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1878 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1881 return adv->scan_rsp_len ? true : false;
1884 /* This function requires the caller holds hdev->lock */
1885 void hci_adv_monitors_clear(struct hci_dev *hdev)
1887 struct adv_monitor *monitor;
1890 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1891 hci_free_adv_monitor(hdev, monitor);
1893 idr_destroy(&hdev->adv_monitors_idr);
1896 /* Frees the monitor structure and do some bookkeepings.
1897 * This function requires the caller holds hdev->lock.
1899 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1901 struct adv_pattern *pattern;
1902 struct adv_pattern *tmp;
1907 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1908 list_del(&pattern->list);
1912 if (monitor->handle)
1913 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1915 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1916 hdev->adv_monitors_cnt--;
1917 mgmt_adv_monitor_removed(hdev, monitor->handle);
1923 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1924 * also attempts to forward the request to the controller.
1925 * This function requires the caller holds hci_req_sync_lock.
1927 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1929 int min, max, handle;
1937 min = HCI_MIN_ADV_MONITOR_HANDLE;
1938 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1939 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1942 hci_dev_unlock(hdev);
1947 monitor->handle = handle;
1949 if (!hdev_is_powered(hdev))
1952 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1953 case HCI_ADV_MONITOR_EXT_NONE:
1954 bt_dev_dbg(hdev, "add monitor %d status %d",
1955 monitor->handle, status);
1956 /* Message was not forwarded to controller - not an error */
1959 case HCI_ADV_MONITOR_EXT_MSFT:
1960 status = msft_add_monitor_pattern(hdev, monitor);
1961 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1969 /* Attempts to tell the controller and free the monitor. If somehow the
1970 * controller doesn't have a corresponding handle, remove anyway.
1971 * This function requires the caller holds hci_req_sync_lock.
1973 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1974 struct adv_monitor *monitor)
1979 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1980 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1981 bt_dev_dbg(hdev, "remove monitor %d status %d",
1982 monitor->handle, status);
1985 case HCI_ADV_MONITOR_EXT_MSFT:
1986 handle = monitor->handle;
1987 status = msft_remove_monitor(hdev, monitor);
1988 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1993 /* In case no matching handle registered, just free the monitor */
1994 if (status == -ENOENT)
2000 if (status == -ENOENT)
2001 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2003 hci_free_adv_monitor(hdev, monitor);
2008 /* This function requires the caller holds hci_req_sync_lock */
2009 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2011 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2016 return hci_remove_adv_monitor(hdev, monitor);
2019 /* This function requires the caller holds hci_req_sync_lock */
2020 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2022 struct adv_monitor *monitor;
2023 int idr_next_id = 0;
2027 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2031 status = hci_remove_adv_monitor(hdev, monitor);
2041 /* This function requires the caller holds hdev->lock */
2042 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2044 return !idr_is_empty(&hdev->adv_monitors_idr);
2047 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2049 if (msft_monitor_supported(hdev))
2050 return HCI_ADV_MONITOR_EXT_MSFT;
2052 return HCI_ADV_MONITOR_EXT_NONE;
2055 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2056 bdaddr_t *bdaddr, u8 type)
2058 struct bdaddr_list *b;
2060 list_for_each_entry(b, bdaddr_list, list) {
2061 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2068 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2069 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2072 struct bdaddr_list_with_irk *b;
2074 list_for_each_entry(b, bdaddr_list, list) {
2075 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2082 struct bdaddr_list_with_flags *
2083 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2084 bdaddr_t *bdaddr, u8 type)
2086 struct bdaddr_list_with_flags *b;
2088 list_for_each_entry(b, bdaddr_list, list) {
2089 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2096 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2098 struct bdaddr_list *b, *n;
2100 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2106 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2108 struct bdaddr_list *entry;
2110 if (!bacmp(bdaddr, BDADDR_ANY))
2113 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2116 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2120 bacpy(&entry->bdaddr, bdaddr);
2121 entry->bdaddr_type = type;
2123 list_add(&entry->list, list);
2128 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2129 u8 type, u8 *peer_irk, u8 *local_irk)
2131 struct bdaddr_list_with_irk *entry;
2133 if (!bacmp(bdaddr, BDADDR_ANY))
2136 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2139 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2143 bacpy(&entry->bdaddr, bdaddr);
2144 entry->bdaddr_type = type;
2147 memcpy(entry->peer_irk, peer_irk, 16);
2150 memcpy(entry->local_irk, local_irk, 16);
2152 list_add(&entry->list, list);
2157 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2160 struct bdaddr_list_with_flags *entry;
2162 if (!bacmp(bdaddr, BDADDR_ANY))
2165 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2168 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2172 bacpy(&entry->bdaddr, bdaddr);
2173 entry->bdaddr_type = type;
2174 entry->flags = flags;
2176 list_add(&entry->list, list);
2181 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2183 struct bdaddr_list *entry;
2185 if (!bacmp(bdaddr, BDADDR_ANY)) {
2186 hci_bdaddr_list_clear(list);
2190 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2194 list_del(&entry->list);
2200 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2203 struct bdaddr_list_with_irk *entry;
2205 if (!bacmp(bdaddr, BDADDR_ANY)) {
2206 hci_bdaddr_list_clear(list);
2210 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2214 list_del(&entry->list);
2220 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2223 struct bdaddr_list_with_flags *entry;
2225 if (!bacmp(bdaddr, BDADDR_ANY)) {
2226 hci_bdaddr_list_clear(list);
2230 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2234 list_del(&entry->list);
2240 /* This function requires the caller holds hdev->lock */
2241 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2242 bdaddr_t *addr, u8 addr_type)
2244 struct hci_conn_params *params;
2246 list_for_each_entry(params, &hdev->le_conn_params, list) {
2247 if (bacmp(¶ms->addr, addr) == 0 &&
2248 params->addr_type == addr_type) {
2256 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2257 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2258 bdaddr_t *addr, u8 addr_type)
2260 struct hci_conn_params *param;
2264 list_for_each_entry_rcu(param, list, action) {
2265 if (bacmp(¶m->addr, addr) == 0 &&
2266 param->addr_type == addr_type) {
2277 /* This function requires the caller holds hdev->lock */
2278 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2280 if (list_empty(¶m->action))
2283 list_del_rcu(¶m->action);
2285 INIT_LIST_HEAD(¶m->action);
2288 /* This function requires the caller holds hdev->lock */
2289 void hci_pend_le_list_add(struct hci_conn_params *param,
2290 struct list_head *list)
2292 list_add_rcu(¶m->action, list);
2295 /* This function requires the caller holds hdev->lock */
2296 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2297 bdaddr_t *addr, u8 addr_type)
2299 struct hci_conn_params *params;
2301 params = hci_conn_params_lookup(hdev, addr, addr_type);
2305 params = kzalloc(sizeof(*params), GFP_KERNEL);
2307 bt_dev_err(hdev, "out of memory");
2311 bacpy(¶ms->addr, addr);
2312 params->addr_type = addr_type;
2314 list_add(¶ms->list, &hdev->le_conn_params);
2315 INIT_LIST_HEAD(¶ms->action);
2317 params->conn_min_interval = hdev->le_conn_min_interval;
2318 params->conn_max_interval = hdev->le_conn_max_interval;
2319 params->conn_latency = hdev->le_conn_latency;
2320 params->supervision_timeout = hdev->le_supv_timeout;
2321 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2323 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2328 void hci_conn_params_free(struct hci_conn_params *params)
2330 hci_pend_le_list_del_init(params);
2333 hci_conn_drop(params->conn);
2334 hci_conn_put(params->conn);
2337 list_del(¶ms->list);
2341 /* This function requires the caller holds hdev->lock */
2342 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2344 struct hci_conn_params *params;
2346 params = hci_conn_params_lookup(hdev, addr, addr_type);
2350 hci_conn_params_free(params);
2352 hci_update_passive_scan(hdev);
2354 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2357 /* This function requires the caller holds hdev->lock */
2358 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2360 struct hci_conn_params *params, *tmp;
2362 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2363 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2366 /* If trying to establish one time connection to disabled
2367 * device, leave the params, but mark them as just once.
2369 if (params->explicit_connect) {
2370 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2374 hci_conn_params_free(params);
2377 BT_DBG("All LE disabled connection parameters were removed");
2380 /* This function requires the caller holds hdev->lock */
2381 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2383 struct hci_conn_params *params, *tmp;
2385 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2386 hci_conn_params_free(params);
2388 BT_DBG("All LE connection parameters were removed");
2391 /* Copy the Identity Address of the controller.
2393 * If the controller has a public BD_ADDR, then by default use that one.
2394 * If this is a LE only controller without a public address, default to
2395 * the static random address.
2397 * For debugging purposes it is possible to force controllers with a
2398 * public address to use the static random address instead.
2400 * In case BR/EDR has been disabled on a dual-mode controller and
2401 * userspace has configured a static address, then that address
2402 * becomes the identity address instead of the public BR/EDR address.
2404 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2407 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2408 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2409 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2410 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2411 bacpy(bdaddr, &hdev->static_addr);
2412 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2414 bacpy(bdaddr, &hdev->bdaddr);
2415 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2419 static void hci_clear_wake_reason(struct hci_dev *hdev)
2423 hdev->wake_reason = 0;
2424 bacpy(&hdev->wake_addr, BDADDR_ANY);
2425 hdev->wake_addr_type = 0;
2427 hci_dev_unlock(hdev);
2430 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2433 struct hci_dev *hdev =
2434 container_of(nb, struct hci_dev, suspend_notifier);
2437 /* Userspace has full control of this device. Do nothing. */
2438 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2441 /* To avoid a potential race with hci_unregister_dev. */
2444 if (action == PM_SUSPEND_PREPARE)
2445 ret = hci_suspend_dev(hdev);
2446 else if (action == PM_POST_SUSPEND)
2447 ret = hci_resume_dev(hdev);
2450 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2457 /* Alloc HCI device */
2458 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2460 struct hci_dev *hdev;
2461 unsigned int alloc_size;
2463 alloc_size = sizeof(*hdev);
2465 /* Fixme: May need ALIGN-ment? */
2466 alloc_size += sizeof_priv;
2469 hdev = kzalloc(alloc_size, GFP_KERNEL);
2473 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2474 hdev->esco_type = (ESCO_HV1);
2475 hdev->link_mode = (HCI_LM_ACCEPT);
2476 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2477 hdev->io_capability = 0x03; /* No Input No Output */
2478 hdev->manufacturer = 0xffff; /* Default to internal use */
2479 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2480 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2481 hdev->adv_instance_cnt = 0;
2482 hdev->cur_adv_instance = 0x00;
2483 hdev->adv_instance_timeout = 0;
2485 hdev->advmon_allowlist_duration = 300;
2486 hdev->advmon_no_filter_duration = 500;
2487 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2489 hdev->sniff_max_interval = 800;
2490 hdev->sniff_min_interval = 80;
2492 hdev->le_adv_channel_map = 0x07;
2493 hdev->le_adv_min_interval = 0x0800;
2494 hdev->le_adv_max_interval = 0x0800;
2495 hdev->le_scan_interval = 0x0060;
2496 hdev->le_scan_window = 0x0030;
2497 hdev->le_scan_int_suspend = 0x0400;
2498 hdev->le_scan_window_suspend = 0x0012;
2499 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2500 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2501 hdev->le_scan_int_adv_monitor = 0x0060;
2502 hdev->le_scan_window_adv_monitor = 0x0030;
2503 hdev->le_scan_int_connect = 0x0060;
2504 hdev->le_scan_window_connect = 0x0060;
2505 hdev->le_conn_min_interval = 0x0018;
2506 hdev->le_conn_max_interval = 0x0028;
2507 hdev->le_conn_latency = 0x0000;
2508 hdev->le_supv_timeout = 0x002a;
2509 hdev->le_def_tx_len = 0x001b;
2510 hdev->le_def_tx_time = 0x0148;
2511 hdev->le_max_tx_len = 0x001b;
2512 hdev->le_max_tx_time = 0x0148;
2513 hdev->le_max_rx_len = 0x001b;
2514 hdev->le_max_rx_time = 0x0148;
2515 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2516 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2517 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2518 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2519 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2520 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2521 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2522 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2523 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2525 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2526 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2527 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2528 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2529 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2530 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2532 /* default 1.28 sec page scan */
2533 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2534 hdev->def_page_scan_int = 0x0800;
2535 hdev->def_page_scan_window = 0x0012;
2537 mutex_init(&hdev->lock);
2538 mutex_init(&hdev->req_lock);
2540 INIT_LIST_HEAD(&hdev->mesh_pending);
2541 INIT_LIST_HEAD(&hdev->mgmt_pending);
2542 INIT_LIST_HEAD(&hdev->reject_list);
2543 INIT_LIST_HEAD(&hdev->accept_list);
2544 INIT_LIST_HEAD(&hdev->uuids);
2545 INIT_LIST_HEAD(&hdev->link_keys);
2546 INIT_LIST_HEAD(&hdev->long_term_keys);
2547 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2548 INIT_LIST_HEAD(&hdev->remote_oob_data);
2549 INIT_LIST_HEAD(&hdev->le_accept_list);
2550 INIT_LIST_HEAD(&hdev->le_resolv_list);
2551 INIT_LIST_HEAD(&hdev->le_conn_params);
2552 INIT_LIST_HEAD(&hdev->pend_le_conns);
2553 INIT_LIST_HEAD(&hdev->pend_le_reports);
2554 INIT_LIST_HEAD(&hdev->conn_hash.list);
2555 INIT_LIST_HEAD(&hdev->adv_instances);
2556 INIT_LIST_HEAD(&hdev->blocked_keys);
2557 INIT_LIST_HEAD(&hdev->monitored_devices);
2559 INIT_LIST_HEAD(&hdev->local_codecs);
2560 INIT_WORK(&hdev->rx_work, hci_rx_work);
2561 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2562 INIT_WORK(&hdev->tx_work, hci_tx_work);
2563 INIT_WORK(&hdev->power_on, hci_power_on);
2564 INIT_WORK(&hdev->error_reset, hci_error_reset);
2566 hci_cmd_sync_init(hdev);
2568 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2570 skb_queue_head_init(&hdev->rx_q);
2571 skb_queue_head_init(&hdev->cmd_q);
2572 skb_queue_head_init(&hdev->raw_q);
2574 init_waitqueue_head(&hdev->req_wait_q);
2576 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2577 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2579 hci_request_setup(hdev);
2581 hci_init_sysfs(hdev);
2582 discovery_init(hdev);
2586 EXPORT_SYMBOL(hci_alloc_dev_priv);
2588 /* Free HCI device */
2589 void hci_free_dev(struct hci_dev *hdev)
2591 /* will free via device release */
2592 put_device(&hdev->dev);
2594 EXPORT_SYMBOL(hci_free_dev);
2596 /* Register HCI device */
2597 int hci_register_dev(struct hci_dev *hdev)
2601 if (!hdev->open || !hdev->close || !hdev->send)
2604 /* Do not allow HCI_AMP devices to register at index 0,
2605 * so the index can be used as the AMP controller ID.
2607 switch (hdev->dev_type) {
2609 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2612 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2621 error = dev_set_name(&hdev->dev, "hci%u", id);
2625 hdev->name = dev_name(&hdev->dev);
2628 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2630 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2631 if (!hdev->workqueue) {
2636 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2638 if (!hdev->req_workqueue) {
2639 destroy_workqueue(hdev->workqueue);
2644 if (!IS_ERR_OR_NULL(bt_debugfs))
2645 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2647 error = device_add(&hdev->dev);
2651 hci_leds_init(hdev);
2653 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2654 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2657 if (rfkill_register(hdev->rfkill) < 0) {
2658 rfkill_destroy(hdev->rfkill);
2659 hdev->rfkill = NULL;
2663 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2664 hci_dev_set_flag(hdev, HCI_RFKILLED);
2666 hci_dev_set_flag(hdev, HCI_SETUP);
2667 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2669 if (hdev->dev_type == HCI_PRIMARY) {
2670 /* Assume BR/EDR support until proven otherwise (such as
2671 * through reading supported features during init.
2673 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2676 write_lock(&hci_dev_list_lock);
2677 list_add(&hdev->list, &hci_dev_list);
2678 write_unlock(&hci_dev_list_lock);
2680 /* Devices that are marked for raw-only usage are unconfigured
2681 * and should not be included in normal operation.
2683 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2684 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2686 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2690 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2692 hci_sock_dev_event(hdev, HCI_DEV_REG);
2695 error = hci_register_suspend_notifier(hdev);
2697 BT_WARN("register suspend notifier failed error:%d\n", error);
2699 queue_work(hdev->req_workqueue, &hdev->power_on);
2701 idr_init(&hdev->adv_monitors_idr);
2702 msft_register(hdev);
2707 debugfs_remove_recursive(hdev->debugfs);
2708 destroy_workqueue(hdev->workqueue);
2709 destroy_workqueue(hdev->req_workqueue);
2711 ida_simple_remove(&hci_index_ida, hdev->id);
2715 EXPORT_SYMBOL(hci_register_dev);
2717 /* Unregister HCI device */
2718 void hci_unregister_dev(struct hci_dev *hdev)
2720 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2722 mutex_lock(&hdev->unregister_lock);
2723 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2724 mutex_unlock(&hdev->unregister_lock);
2726 write_lock(&hci_dev_list_lock);
2727 list_del(&hdev->list);
2728 write_unlock(&hci_dev_list_lock);
2730 cancel_work_sync(&hdev->power_on);
2732 hci_cmd_sync_clear(hdev);
2734 hci_unregister_suspend_notifier(hdev);
2736 msft_unregister(hdev);
2738 hci_dev_do_close(hdev);
2740 if (!test_bit(HCI_INIT, &hdev->flags) &&
2741 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2742 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2744 mgmt_index_removed(hdev);
2745 hci_dev_unlock(hdev);
2748 /* mgmt_index_removed should take care of emptying the
2750 BUG_ON(!list_empty(&hdev->mgmt_pending));
2752 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2755 rfkill_unregister(hdev->rfkill);
2756 rfkill_destroy(hdev->rfkill);
2759 device_del(&hdev->dev);
2760 /* Actual cleanup is deferred until hci_release_dev(). */
2763 EXPORT_SYMBOL(hci_unregister_dev);
2765 /* Release HCI device */
2766 void hci_release_dev(struct hci_dev *hdev)
2768 debugfs_remove_recursive(hdev->debugfs);
2769 kfree_const(hdev->hw_info);
2770 kfree_const(hdev->fw_info);
2772 destroy_workqueue(hdev->workqueue);
2773 destroy_workqueue(hdev->req_workqueue);
2776 hci_bdaddr_list_clear(&hdev->reject_list);
2777 hci_bdaddr_list_clear(&hdev->accept_list);
2778 hci_uuids_clear(hdev);
2779 hci_link_keys_clear(hdev);
2780 hci_smp_ltks_clear(hdev);
2781 hci_smp_irks_clear(hdev);
2782 hci_remote_oob_data_clear(hdev);
2783 hci_adv_instances_clear(hdev);
2784 hci_adv_monitors_clear(hdev);
2785 hci_bdaddr_list_clear(&hdev->le_accept_list);
2786 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2787 hci_conn_params_clear_all(hdev);
2788 hci_discovery_filter_clear(hdev);
2789 hci_blocked_keys_clear(hdev);
2790 hci_codec_list_clear(&hdev->local_codecs);
2791 hci_dev_unlock(hdev);
2793 ida_simple_remove(&hci_index_ida, hdev->id);
2794 kfree_skb(hdev->sent_cmd);
2795 kfree_skb(hdev->req_skb);
2796 kfree_skb(hdev->recv_event);
2799 EXPORT_SYMBOL(hci_release_dev);
2801 int hci_register_suspend_notifier(struct hci_dev *hdev)
2805 if (!hdev->suspend_notifier.notifier_call &&
2806 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2807 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2808 ret = register_pm_notifier(&hdev->suspend_notifier);
2814 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2818 if (hdev->suspend_notifier.notifier_call) {
2819 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2821 hdev->suspend_notifier.notifier_call = NULL;
2827 /* Cancel ongoing command synchronously:
2829 * - Cancel command timer
2830 * - Reset command counter
2831 * - Cancel command request
2833 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2835 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2837 cancel_delayed_work_sync(&hdev->cmd_timer);
2838 cancel_delayed_work_sync(&hdev->ncmd_timer);
2839 atomic_set(&hdev->cmd_cnt, 1);
2841 hci_cmd_sync_cancel_sync(hdev, err);
2844 /* Suspend HCI device */
2845 int hci_suspend_dev(struct hci_dev *hdev)
2849 bt_dev_dbg(hdev, "");
2851 /* Suspend should only act on when powered. */
2852 if (!hdev_is_powered(hdev) ||
2853 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2856 /* If powering down don't attempt to suspend */
2857 if (mgmt_powering_down(hdev))
2860 /* Cancel potentially blocking sync operation before suspend */
2861 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2863 hci_req_sync_lock(hdev);
2864 ret = hci_suspend_sync(hdev);
2865 hci_req_sync_unlock(hdev);
2867 hci_clear_wake_reason(hdev);
2868 mgmt_suspending(hdev, hdev->suspend_state);
2870 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2873 EXPORT_SYMBOL(hci_suspend_dev);
2875 /* Resume HCI device */
2876 int hci_resume_dev(struct hci_dev *hdev)
2880 bt_dev_dbg(hdev, "");
2882 /* Resume should only act on when powered. */
2883 if (!hdev_is_powered(hdev) ||
2884 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2887 /* If powering down don't attempt to resume */
2888 if (mgmt_powering_down(hdev))
2891 hci_req_sync_lock(hdev);
2892 ret = hci_resume_sync(hdev);
2893 hci_req_sync_unlock(hdev);
2895 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2896 hdev->wake_addr_type);
2898 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2901 EXPORT_SYMBOL(hci_resume_dev);
2903 /* Reset HCI device */
2904 int hci_reset_dev(struct hci_dev *hdev)
2906 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2907 struct sk_buff *skb;
2909 skb = bt_skb_alloc(3, GFP_ATOMIC);
2913 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2914 skb_put_data(skb, hw_err, 3);
2916 bt_dev_err(hdev, "Injecting HCI hardware error event");
2918 /* Send Hardware Error to upper stack */
2919 return hci_recv_frame(hdev, skb);
2921 EXPORT_SYMBOL(hci_reset_dev);
2923 /* Receive frame from HCI drivers */
2924 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2926 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2927 && !test_bit(HCI_INIT, &hdev->flags))) {
2932 switch (hci_skb_pkt_type(skb)) {
2935 case HCI_ACLDATA_PKT:
2936 /* Detect if ISO packet has been sent as ACL */
2937 if (hci_conn_num(hdev, ISO_LINK)) {
2938 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2941 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2942 if (type == ISO_LINK)
2943 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2946 case HCI_SCODATA_PKT:
2948 case HCI_ISODATA_PKT:
2956 bt_cb(skb)->incoming = 1;
2959 __net_timestamp(skb);
2961 skb_queue_tail(&hdev->rx_q, skb);
2962 queue_work(hdev->workqueue, &hdev->rx_work);
2966 EXPORT_SYMBOL(hci_recv_frame);
2968 /* Receive diagnostic message from HCI drivers */
2969 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2971 /* Mark as diagnostic packet */
2972 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2975 __net_timestamp(skb);
2977 skb_queue_tail(&hdev->rx_q, skb);
2978 queue_work(hdev->workqueue, &hdev->rx_work);
2982 EXPORT_SYMBOL(hci_recv_diag);
2984 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2988 va_start(vargs, fmt);
2989 kfree_const(hdev->hw_info);
2990 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2993 EXPORT_SYMBOL(hci_set_hw_info);
2995 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2999 va_start(vargs, fmt);
3000 kfree_const(hdev->fw_info);
3001 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3004 EXPORT_SYMBOL(hci_set_fw_info);
3006 /* ---- Interface to upper protocols ---- */
3008 int hci_register_cb(struct hci_cb *cb)
3010 BT_DBG("%p name %s", cb, cb->name);
3012 mutex_lock(&hci_cb_list_lock);
3013 list_add_tail(&cb->list, &hci_cb_list);
3014 mutex_unlock(&hci_cb_list_lock);
3018 EXPORT_SYMBOL(hci_register_cb);
3020 int hci_unregister_cb(struct hci_cb *cb)
3022 BT_DBG("%p name %s", cb, cb->name);
3024 mutex_lock(&hci_cb_list_lock);
3025 list_del(&cb->list);
3026 mutex_unlock(&hci_cb_list_lock);
3030 EXPORT_SYMBOL(hci_unregister_cb);
3032 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3036 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3040 __net_timestamp(skb);
3042 /* Send copy to monitor */
3043 hci_send_to_monitor(hdev, skb);
3045 if (atomic_read(&hdev->promisc)) {
3046 /* Send copy to the sockets */
3047 hci_send_to_sock(hdev, skb);
3050 /* Get rid of skb owner, prior to sending to the driver. */
3053 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3058 err = hdev->send(hdev, skb);
3060 bt_dev_err(hdev, "sending frame failed (%d)", err);
3068 /* Send HCI command */
3069 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3072 struct sk_buff *skb;
3074 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3076 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3078 bt_dev_err(hdev, "no memory for command");
3082 /* Stand-alone HCI commands must be flagged as
3083 * single-command requests.
3085 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3087 skb_queue_tail(&hdev->cmd_q, skb);
3088 queue_work(hdev->workqueue, &hdev->cmd_work);
3093 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3096 struct sk_buff *skb;
3098 if (hci_opcode_ogf(opcode) != 0x3f) {
3099 /* A controller receiving a command shall respond with either
3100 * a Command Status Event or a Command Complete Event.
3101 * Therefore, all standard HCI commands must be sent via the
3102 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3103 * Some vendors do not comply with this rule for vendor-specific
3104 * commands and do not return any event. We want to support
3105 * unresponded commands for such cases only.
3107 bt_dev_err(hdev, "unresponded command not supported");
3111 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3113 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3118 hci_send_frame(hdev, skb);
3122 EXPORT_SYMBOL(__hci_cmd_send);
3124 /* Get data from the previously sent command */
3125 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3127 struct hci_command_hdr *hdr;
3129 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3132 hdr = (void *)skb->data;
3134 if (hdr->opcode != cpu_to_le16(opcode))
3137 return skb->data + HCI_COMMAND_HDR_SIZE;
3140 /* Get data from the previously sent command */
3141 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3145 /* Check if opcode matches last sent command */
3146 data = hci_cmd_data(hdev->sent_cmd, opcode);
3148 /* Check if opcode matches last request */
3149 data = hci_cmd_data(hdev->req_skb, opcode);
3154 /* Get data from last received event */
3155 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3157 struct hci_event_hdr *hdr;
3160 if (!hdev->recv_event)
3163 hdr = (void *)hdev->recv_event->data;
3164 offset = sizeof(*hdr);
3166 if (hdr->evt != event) {
3167 /* In case of LE metaevent check the subevent match */
3168 if (hdr->evt == HCI_EV_LE_META) {
3169 struct hci_ev_le_meta *ev;
3171 ev = (void *)hdev->recv_event->data + offset;
3172 offset += sizeof(*ev);
3173 if (ev->subevent == event)
3180 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3182 return hdev->recv_event->data + offset;
3186 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3188 struct hci_acl_hdr *hdr;
3191 skb_push(skb, HCI_ACL_HDR_SIZE);
3192 skb_reset_transport_header(skb);
3193 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3194 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3195 hdr->dlen = cpu_to_le16(len);
3198 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3199 struct sk_buff *skb, __u16 flags)
3201 struct hci_conn *conn = chan->conn;
3202 struct hci_dev *hdev = conn->hdev;
3203 struct sk_buff *list;
3205 skb->len = skb_headlen(skb);
3208 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3210 switch (hdev->dev_type) {
3212 hci_add_acl_hdr(skb, conn->handle, flags);
3215 hci_add_acl_hdr(skb, chan->handle, flags);
3218 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3222 list = skb_shinfo(skb)->frag_list;
3224 /* Non fragmented */
3225 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3227 skb_queue_tail(queue, skb);
3230 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3232 skb_shinfo(skb)->frag_list = NULL;
3234 /* Queue all fragments atomically. We need to use spin_lock_bh
3235 * here because of 6LoWPAN links, as there this function is
3236 * called from softirq and using normal spin lock could cause
3239 spin_lock_bh(&queue->lock);
3241 __skb_queue_tail(queue, skb);
3243 flags &= ~ACL_START;
3246 skb = list; list = list->next;
3248 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3249 hci_add_acl_hdr(skb, conn->handle, flags);
3251 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3253 __skb_queue_tail(queue, skb);
3256 spin_unlock_bh(&queue->lock);
3260 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3262 struct hci_dev *hdev = chan->conn->hdev;
3264 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3266 hci_queue_acl(chan, &chan->data_q, skb, flags);
3268 queue_work(hdev->workqueue, &hdev->tx_work);
3272 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3274 struct hci_dev *hdev = conn->hdev;
3275 struct hci_sco_hdr hdr;
3277 BT_DBG("%s len %d", hdev->name, skb->len);
3279 hdr.handle = cpu_to_le16(conn->handle);
3280 hdr.dlen = skb->len;
3282 skb_push(skb, HCI_SCO_HDR_SIZE);
3283 skb_reset_transport_header(skb);
3284 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3286 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3288 skb_queue_tail(&conn->data_q, skb);
3289 queue_work(hdev->workqueue, &hdev->tx_work);
3293 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3295 struct hci_iso_hdr *hdr;
3298 skb_push(skb, HCI_ISO_HDR_SIZE);
3299 skb_reset_transport_header(skb);
3300 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3301 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3302 hdr->dlen = cpu_to_le16(len);
3305 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3306 struct sk_buff *skb)
3308 struct hci_dev *hdev = conn->hdev;
3309 struct sk_buff *list;
3312 skb->len = skb_headlen(skb);
3315 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3317 list = skb_shinfo(skb)->frag_list;
3319 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3320 hci_add_iso_hdr(skb, conn->handle, flags);
3323 /* Non fragmented */
3324 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3326 skb_queue_tail(queue, skb);
3329 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3331 skb_shinfo(skb)->frag_list = NULL;
3333 __skb_queue_tail(queue, skb);
3336 skb = list; list = list->next;
3338 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3339 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3341 hci_add_iso_hdr(skb, conn->handle, flags);
3343 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3345 __skb_queue_tail(queue, skb);
3350 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3352 struct hci_dev *hdev = conn->hdev;
3354 BT_DBG("%s len %d", hdev->name, skb->len);
3356 hci_queue_iso(conn, &conn->data_q, skb);
3358 queue_work(hdev->workqueue, &hdev->tx_work);
3361 /* ---- HCI TX task (outgoing data) ---- */
3363 /* HCI Connection scheduler */
3364 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3366 struct hci_dev *hdev;
3376 switch (conn->type) {
3378 cnt = hdev->acl_cnt;
3381 cnt = hdev->block_cnt;
3385 cnt = hdev->sco_cnt;
3388 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3391 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3392 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3396 bt_dev_err(hdev, "unknown link type %d", conn->type);
3403 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3406 struct hci_conn_hash *h = &hdev->conn_hash;
3407 struct hci_conn *conn = NULL, *c;
3408 unsigned int num = 0, min = ~0;
3410 /* We don't have to lock device here. Connections are always
3411 * added and removed with TX task disabled. */
3415 list_for_each_entry_rcu(c, &h->list, list) {
3416 if (c->type != type || skb_queue_empty(&c->data_q))
3419 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3424 if (c->sent < min) {
3429 if (hci_conn_num(hdev, type) == num)
3435 hci_quote_sent(conn, num, quote);
3437 BT_DBG("conn %p quote %d", conn, *quote);
3441 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3443 struct hci_conn_hash *h = &hdev->conn_hash;
3446 bt_dev_err(hdev, "link tx timeout");
3450 /* Kill stalled connections */
3451 list_for_each_entry_rcu(c, &h->list, list) {
3452 if (c->type == type && c->sent) {
3453 bt_dev_err(hdev, "killing stalled connection %pMR",
3455 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3462 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3465 struct hci_conn_hash *h = &hdev->conn_hash;
3466 struct hci_chan *chan = NULL;
3467 unsigned int num = 0, min = ~0, cur_prio = 0;
3468 struct hci_conn *conn;
3471 BT_DBG("%s", hdev->name);
3475 list_for_each_entry_rcu(conn, &h->list, list) {
3476 struct hci_chan *tmp;
3478 if (conn->type != type)
3481 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3486 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3487 struct sk_buff *skb;
3489 if (skb_queue_empty(&tmp->data_q))
3492 skb = skb_peek(&tmp->data_q);
3493 if (skb->priority < cur_prio)
3496 if (skb->priority > cur_prio) {
3499 cur_prio = skb->priority;
3504 if (conn->sent < min) {
3510 if (hci_conn_num(hdev, type) == conn_num)
3519 hci_quote_sent(chan->conn, num, quote);
3521 BT_DBG("chan %p quote %d", chan, *quote);
3525 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3527 struct hci_conn_hash *h = &hdev->conn_hash;
3528 struct hci_conn *conn;
3531 BT_DBG("%s", hdev->name);
3535 list_for_each_entry_rcu(conn, &h->list, list) {
3536 struct hci_chan *chan;
3538 if (conn->type != type)
3541 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3546 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3547 struct sk_buff *skb;
3554 if (skb_queue_empty(&chan->data_q))
3557 skb = skb_peek(&chan->data_q);
3558 if (skb->priority >= HCI_PRIO_MAX - 1)
3561 skb->priority = HCI_PRIO_MAX - 1;
3563 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3567 if (hci_conn_num(hdev, type) == num)
3575 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3577 /* Calculate count of blocks used by this packet */
3578 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3581 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3583 unsigned long last_tx;
3585 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3590 last_tx = hdev->le_last_tx;
3593 last_tx = hdev->acl_last_tx;
3597 /* tx timeout must be longer than maximum link supervision timeout
3600 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3601 hci_link_tx_to(hdev, type);
3605 static void hci_sched_sco(struct hci_dev *hdev)
3607 struct hci_conn *conn;
3608 struct sk_buff *skb;
3611 BT_DBG("%s", hdev->name);
3613 if (!hci_conn_num(hdev, SCO_LINK))
3616 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3617 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3618 BT_DBG("skb %p len %d", skb, skb->len);
3619 hci_send_frame(hdev, skb);
3622 if (conn->sent == ~0)
3628 static void hci_sched_esco(struct hci_dev *hdev)
3630 struct hci_conn *conn;
3631 struct sk_buff *skb;
3634 BT_DBG("%s", hdev->name);
3636 if (!hci_conn_num(hdev, ESCO_LINK))
3639 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3641 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3642 BT_DBG("skb %p len %d", skb, skb->len);
3643 hci_send_frame(hdev, skb);
3646 if (conn->sent == ~0)
3652 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3654 unsigned int cnt = hdev->acl_cnt;
3655 struct hci_chan *chan;
3656 struct sk_buff *skb;
3659 __check_timeout(hdev, cnt, ACL_LINK);
3661 while (hdev->acl_cnt &&
3662 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3663 u32 priority = (skb_peek(&chan->data_q))->priority;
3664 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3665 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3666 skb->len, skb->priority);
3668 /* Stop if priority has changed */
3669 if (skb->priority < priority)
3672 skb = skb_dequeue(&chan->data_q);
3674 hci_conn_enter_active_mode(chan->conn,
3675 bt_cb(skb)->force_active);
3677 hci_send_frame(hdev, skb);
3678 hdev->acl_last_tx = jiffies;
3684 /* Send pending SCO packets right away */
3685 hci_sched_sco(hdev);
3686 hci_sched_esco(hdev);
3690 if (cnt != hdev->acl_cnt)
3691 hci_prio_recalculate(hdev, ACL_LINK);
3694 static void hci_sched_acl_blk(struct hci_dev *hdev)
3696 unsigned int cnt = hdev->block_cnt;
3697 struct hci_chan *chan;
3698 struct sk_buff *skb;
3702 BT_DBG("%s", hdev->name);
3704 if (hdev->dev_type == HCI_AMP)
3709 __check_timeout(hdev, cnt, type);
3711 while (hdev->block_cnt > 0 &&
3712 (chan = hci_chan_sent(hdev, type, "e))) {
3713 u32 priority = (skb_peek(&chan->data_q))->priority;
3714 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3717 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3718 skb->len, skb->priority);
3720 /* Stop if priority has changed */
3721 if (skb->priority < priority)
3724 skb = skb_dequeue(&chan->data_q);
3726 blocks = __get_blocks(hdev, skb);
3727 if (blocks > hdev->block_cnt)
3730 hci_conn_enter_active_mode(chan->conn,
3731 bt_cb(skb)->force_active);
3733 hci_send_frame(hdev, skb);
3734 hdev->acl_last_tx = jiffies;
3736 hdev->block_cnt -= blocks;
3739 chan->sent += blocks;
3740 chan->conn->sent += blocks;
3744 if (cnt != hdev->block_cnt)
3745 hci_prio_recalculate(hdev, type);
3748 static void hci_sched_acl(struct hci_dev *hdev)
3750 BT_DBG("%s", hdev->name);
3752 /* No ACL link over BR/EDR controller */
3753 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3756 /* No AMP link over AMP controller */
3757 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3760 switch (hdev->flow_ctl_mode) {
3761 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3762 hci_sched_acl_pkt(hdev);
3765 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3766 hci_sched_acl_blk(hdev);
3771 static void hci_sched_le(struct hci_dev *hdev)
3773 struct hci_chan *chan;
3774 struct sk_buff *skb;
3775 int quote, cnt, tmp;
3777 BT_DBG("%s", hdev->name);
3779 if (!hci_conn_num(hdev, LE_LINK))
3782 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3784 __check_timeout(hdev, cnt, LE_LINK);
3787 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3788 u32 priority = (skb_peek(&chan->data_q))->priority;
3789 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3790 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3791 skb->len, skb->priority);
3793 /* Stop if priority has changed */
3794 if (skb->priority < priority)
3797 skb = skb_dequeue(&chan->data_q);
3799 hci_send_frame(hdev, skb);
3800 hdev->le_last_tx = jiffies;
3806 /* Send pending SCO packets right away */
3807 hci_sched_sco(hdev);
3808 hci_sched_esco(hdev);
3815 hdev->acl_cnt = cnt;
3818 hci_prio_recalculate(hdev, LE_LINK);
3822 static void hci_sched_iso(struct hci_dev *hdev)
3824 struct hci_conn *conn;
3825 struct sk_buff *skb;
3828 BT_DBG("%s", hdev->name);
3830 if (!hci_conn_num(hdev, ISO_LINK))
3833 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3834 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3835 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3836 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3837 BT_DBG("skb %p len %d", skb, skb->len);
3838 hci_send_frame(hdev, skb);
3841 if (conn->sent == ~0)
3848 static void hci_tx_work(struct work_struct *work)
3850 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3851 struct sk_buff *skb;
3853 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3854 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3856 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3857 /* Schedule queues and send stuff to HCI driver */
3858 hci_sched_sco(hdev);
3859 hci_sched_esco(hdev);
3860 hci_sched_iso(hdev);
3861 hci_sched_acl(hdev);
3865 /* Send next queued raw (unknown type) packet */
3866 while ((skb = skb_dequeue(&hdev->raw_q)))
3867 hci_send_frame(hdev, skb);
3870 /* ----- HCI RX task (incoming data processing) ----- */
3872 /* ACL data packet */
3873 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3875 struct hci_acl_hdr *hdr = (void *) skb->data;
3876 struct hci_conn *conn;
3877 __u16 handle, flags;
3879 skb_pull(skb, HCI_ACL_HDR_SIZE);
3881 handle = __le16_to_cpu(hdr->handle);
3882 flags = hci_flags(handle);
3883 handle = hci_handle(handle);
3885 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3888 hdev->stat.acl_rx++;
3891 conn = hci_conn_hash_lookup_handle(hdev, handle);
3892 hci_dev_unlock(hdev);
3895 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3897 /* Send to upper protocol */
3898 l2cap_recv_acldata(conn, skb, flags);
3901 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3908 /* SCO data packet */
3909 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3911 struct hci_sco_hdr *hdr = (void *) skb->data;
3912 struct hci_conn *conn;
3913 __u16 handle, flags;
3915 skb_pull(skb, HCI_SCO_HDR_SIZE);
3917 handle = __le16_to_cpu(hdr->handle);
3918 flags = hci_flags(handle);
3919 handle = hci_handle(handle);
3921 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3924 hdev->stat.sco_rx++;
3927 conn = hci_conn_hash_lookup_handle(hdev, handle);
3928 hci_dev_unlock(hdev);
3931 /* Send to upper protocol */
3932 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3933 sco_recv_scodata(conn, skb);
3936 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3943 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3945 struct hci_iso_hdr *hdr;
3946 struct hci_conn *conn;
3947 __u16 handle, flags;
3949 hdr = skb_pull_data(skb, sizeof(*hdr));
3951 bt_dev_err(hdev, "ISO packet too small");
3955 handle = __le16_to_cpu(hdr->handle);
3956 flags = hci_flags(handle);
3957 handle = hci_handle(handle);
3959 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3963 conn = hci_conn_hash_lookup_handle(hdev, handle);
3964 hci_dev_unlock(hdev);
3967 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3972 /* Send to upper protocol */
3973 iso_recv(conn, skb, flags);
3980 static bool hci_req_is_complete(struct hci_dev *hdev)
3982 struct sk_buff *skb;
3984 skb = skb_peek(&hdev->cmd_q);
3988 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3991 static void hci_resend_last(struct hci_dev *hdev)
3993 struct hci_command_hdr *sent;
3994 struct sk_buff *skb;
3997 if (!hdev->sent_cmd)
4000 sent = (void *) hdev->sent_cmd->data;
4001 opcode = __le16_to_cpu(sent->opcode);
4002 if (opcode == HCI_OP_RESET)
4005 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4009 skb_queue_head(&hdev->cmd_q, skb);
4010 queue_work(hdev->workqueue, &hdev->cmd_work);
4013 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4014 hci_req_complete_t *req_complete,
4015 hci_req_complete_skb_t *req_complete_skb)
4017 struct sk_buff *skb;
4018 unsigned long flags;
4020 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4022 /* If the completed command doesn't match the last one that was
4023 * sent we need to do special handling of it.
4025 if (!hci_sent_cmd_data(hdev, opcode)) {
4026 /* Some CSR based controllers generate a spontaneous
4027 * reset complete event during init and any pending
4028 * command will never be completed. In such a case we
4029 * need to resend whatever was the last sent
4032 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4033 hci_resend_last(hdev);
4038 /* If we reach this point this event matches the last command sent */
4039 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4041 /* If the command succeeded and there's still more commands in
4042 * this request the request is not yet complete.
4044 if (!status && !hci_req_is_complete(hdev))
4047 skb = hdev->req_skb;
4049 /* If this was the last command in a request the complete
4050 * callback would be found in hdev->req_skb instead of the
4051 * command queue (hdev->cmd_q).
4053 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4054 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4058 if (skb && bt_cb(skb)->hci.req_complete) {
4059 *req_complete = bt_cb(skb)->hci.req_complete;
4063 /* Remove all pending commands belonging to this request */
4064 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4065 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4066 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4067 __skb_queue_head(&hdev->cmd_q, skb);
4071 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4072 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4074 *req_complete = bt_cb(skb)->hci.req_complete;
4075 dev_kfree_skb_irq(skb);
4077 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4080 static void hci_rx_work(struct work_struct *work)
4082 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4083 struct sk_buff *skb;
4085 BT_DBG("%s", hdev->name);
4087 /* The kcov_remote functions used for collecting packet parsing
4088 * coverage information from this background thread and associate
4089 * the coverage with the syscall's thread which originally injected
4090 * the packet. This helps fuzzing the kernel.
4092 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4093 kcov_remote_start_common(skb_get_kcov_handle(skb));
4095 /* Send copy to monitor */
4096 hci_send_to_monitor(hdev, skb);
4098 if (atomic_read(&hdev->promisc)) {
4099 /* Send copy to the sockets */
4100 hci_send_to_sock(hdev, skb);
4103 /* If the device has been opened in HCI_USER_CHANNEL,
4104 * the userspace has exclusive access to device.
4105 * When device is HCI_INIT, we still need to process
4106 * the data packets to the driver in order
4107 * to complete its setup().
4109 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4110 !test_bit(HCI_INIT, &hdev->flags)) {
4115 if (test_bit(HCI_INIT, &hdev->flags)) {
4116 /* Don't process data packets in this states. */
4117 switch (hci_skb_pkt_type(skb)) {
4118 case HCI_ACLDATA_PKT:
4119 case HCI_SCODATA_PKT:
4120 case HCI_ISODATA_PKT:
4127 switch (hci_skb_pkt_type(skb)) {
4129 BT_DBG("%s Event packet", hdev->name);
4130 hci_event_packet(hdev, skb);
4133 case HCI_ACLDATA_PKT:
4134 BT_DBG("%s ACL data packet", hdev->name);
4135 hci_acldata_packet(hdev, skb);
4138 case HCI_SCODATA_PKT:
4139 BT_DBG("%s SCO data packet", hdev->name);
4140 hci_scodata_packet(hdev, skb);
4143 case HCI_ISODATA_PKT:
4144 BT_DBG("%s ISO data packet", hdev->name);
4145 hci_isodata_packet(hdev, skb);
4155 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4159 bt_dev_dbg(hdev, "skb %p", skb);
4161 kfree_skb(hdev->sent_cmd);
4163 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4164 if (!hdev->sent_cmd) {
4165 skb_queue_head(&hdev->cmd_q, skb);
4166 queue_work(hdev->workqueue, &hdev->cmd_work);
4170 err = hci_send_frame(hdev, skb);
4172 hci_cmd_sync_cancel_sync(hdev, -err);
4176 if (hci_req_status_pend(hdev) &&
4177 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4178 kfree_skb(hdev->req_skb);
4179 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4182 atomic_dec(&hdev->cmd_cnt);
4185 static void hci_cmd_work(struct work_struct *work)
4187 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4188 struct sk_buff *skb;
4190 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4191 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4193 /* Send queued commands */
4194 if (atomic_read(&hdev->cmd_cnt)) {
4195 skb = skb_dequeue(&hdev->cmd_q);
4199 hci_send_cmd_sync(hdev, skb);
4202 if (test_bit(HCI_RESET, &hdev->flags) ||
4203 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4204 cancel_delayed_work(&hdev->cmd_timer);
4206 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,