2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
59 /* ----- HCI requests ----- */
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
68 /* ---- HCI debugfs entries ---- */
70 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
73 struct hci_dev *hdev = file->private_data;
76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
82 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
85 struct hci_dev *hdev = file->private_data;
88 size_t buf_size = min(count, (sizeof(buf)-1));
91 if (!test_bit(HCI_UP, &hdev->flags))
94 if (copy_from_user(buf, user_buf, buf_size))
98 if (strtobool(buf, &enable))
101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
111 hci_req_unlock(hdev);
118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
123 static const struct file_operations dut_mode_fops = {
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
130 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
133 struct hci_dev *hdev = file->private_data;
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
142 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
145 struct hci_dev *hdev = file->private_data;
147 size_t buf_size = min(count, (sizeof(buf)-1));
151 if (copy_from_user(buf, user_buf, buf_size))
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
184 static const struct file_operations vendor_diag_fops = {
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
191 static void hci_debugfs_create_basic(struct hci_dev *hdev)
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
201 /* ---- HCI requests ---- */
203 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
212 hdev->req_skb = skb_get(skb);
213 wake_up_interruptible(&hdev->req_wait_q);
217 static void hci_req_cancel(struct hci_dev *hdev, int err)
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
228 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
229 const void *param, u8 event, u32 timeout)
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
236 BT_DBG("%s", hdev->name);
238 hci_req_init(&req, hdev);
240 hci_req_add_ev(&req, opcode, plen, param, event);
242 hdev->req_status = HCI_REQ_PEND;
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
247 err = hci_req_run_skb(&req, hci_req_sync_complete);
249 remove_wait_queue(&hdev->req_wait_q, &wait);
250 set_current_state(TASK_RUNNING);
254 schedule_timeout(timeout);
256 remove_wait_queue(&hdev->req_wait_q, &wait);
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
261 switch (hdev->req_status) {
263 err = -bt_to_errno(hdev->req_result);
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
275 hdev->req_status = hdev->req_result = 0;
277 hdev->req_skb = NULL;
279 BT_DBG("%s end: err %d", hdev->name, err);
287 return ERR_PTR(-ENODATA);
291 EXPORT_SYMBOL(__hci_cmd_sync_ev);
293 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
294 const void *param, u32 timeout)
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
298 EXPORT_SYMBOL(__hci_cmd_sync);
300 /* Execute request and wait for completion. */
301 static int __hci_req_sync(struct hci_dev *hdev,
302 void (*func)(struct hci_request *req,
304 unsigned long opt, __u32 timeout)
306 struct hci_request req;
307 DECLARE_WAITQUEUE(wait, current);
310 BT_DBG("%s start", hdev->name);
312 hci_req_init(&req, hdev);
314 hdev->req_status = HCI_REQ_PEND;
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
321 err = hci_req_run_skb(&req, hci_req_sync_complete);
323 hdev->req_status = 0;
325 remove_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_RUNNING);
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
339 schedule_timeout(timeout);
341 remove_wait_queue(&hdev->req_wait_q, &wait);
343 if (signal_pending(current))
346 switch (hdev->req_status) {
348 err = -bt_to_errno(hdev->req_result);
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
360 hdev->req_status = hdev->req_result = 0;
362 BT_DBG("%s end: err %d", hdev->name, err);
367 static int hci_req_sync(struct hci_dev *hdev,
368 void (*req)(struct hci_request *req,
370 unsigned long opt, __u32 timeout)
374 /* Serialize all requests */
376 /* check the state after obtaing the lock to protect the HCI_UP
377 * against any races from hci_dev_do_close when the controller
380 if (test_bit(HCI_UP, &hdev->flags))
381 ret = __hci_req_sync(hdev, req, opt, timeout);
385 hci_req_unlock(hdev);
390 static void hci_reset_req(struct hci_request *req, unsigned long opt)
392 BT_DBG("%s %ld", req->hdev->name, opt);
395 set_bit(HCI_RESET, &req->hdev->flags);
396 hci_req_add(req, HCI_OP_RESET, 0, NULL);
399 static void bredr_init(struct hci_request *req)
401 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
403 /* Read Local Supported Features */
404 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
406 /* Read Local Version */
407 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
409 /* Read BD Address */
410 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
413 static void amp_init1(struct hci_request *req)
415 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
417 /* Read Local Version */
418 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
420 /* Read Local Supported Commands */
421 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
423 /* Read Local AMP Info */
424 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
426 /* Read Data Blk size */
427 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
429 /* Read Flow Control Mode */
430 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
432 /* Read Location Data */
433 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
436 static void amp_init2(struct hci_request *req)
438 /* Read Local Supported Features. Not all AMP controllers
439 * support this so it's placed conditionally in the second
442 if (req->hdev->commands[14] & 0x20)
443 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
446 static void hci_init1_req(struct hci_request *req, unsigned long opt)
448 struct hci_dev *hdev = req->hdev;
450 BT_DBG("%s %ld", hdev->name, opt);
453 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
454 hci_reset_req(req, 0);
456 switch (hdev->dev_type) {
466 BT_ERR("Unknown device type %d", hdev->dev_type);
471 static void bredr_setup(struct hci_request *req)
476 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
477 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
479 /* Read Class of Device */
480 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
482 /* Read Local Name */
483 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
485 /* Read Voice Setting */
486 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
488 /* Read Number of Supported IAC */
489 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
491 /* Read Current IAC LAP */
492 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
494 /* Clear Event Filters */
495 flt_type = HCI_FLT_CLEAR_ALL;
496 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
498 /* Connection accept timeout ~20 secs */
499 param = cpu_to_le16(0x7d00);
500 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
503 static void le_setup(struct hci_request *req)
505 struct hci_dev *hdev = req->hdev;
507 /* Read LE Buffer Size */
508 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
510 /* Read LE Local Supported Features */
511 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
513 /* Read LE Supported States */
514 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
516 /* LE-only controllers have LE implicitly enabled */
517 if (!lmp_bredr_capable(hdev))
518 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
521 static void hci_setup_event_mask(struct hci_request *req)
523 struct hci_dev *hdev = req->hdev;
525 /* The second byte is 0xff instead of 0x9f (two reserved bits
526 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
529 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
531 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
532 * any event mask for pre 1.2 devices.
534 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
537 if (lmp_bredr_capable(hdev)) {
538 events[4] |= 0x01; /* Flow Specification Complete */
539 events[4] |= 0x02; /* Inquiry Result with RSSI */
540 events[4] |= 0x04; /* Read Remote Extended Features Complete */
541 events[5] |= 0x08; /* Synchronous Connection Complete */
542 events[5] |= 0x10; /* Synchronous Connection Changed */
544 /* Use a different default for LE-only devices */
545 memset(events, 0, sizeof(events));
546 events[0] |= 0x10; /* Disconnection Complete */
547 events[1] |= 0x08; /* Read Remote Version Information Complete */
548 events[1] |= 0x20; /* Command Complete */
549 events[1] |= 0x40; /* Command Status */
550 events[1] |= 0x80; /* Hardware Error */
551 events[2] |= 0x04; /* Number of Completed Packets */
552 events[3] |= 0x02; /* Data Buffer Overflow */
554 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
555 events[0] |= 0x80; /* Encryption Change */
556 events[5] |= 0x80; /* Encryption Key Refresh Complete */
560 if (lmp_inq_rssi_capable(hdev))
561 events[4] |= 0x02; /* Inquiry Result with RSSI */
563 if (lmp_sniffsubr_capable(hdev))
564 events[5] |= 0x20; /* Sniff Subrating */
566 if (lmp_pause_enc_capable(hdev))
567 events[5] |= 0x80; /* Encryption Key Refresh Complete */
569 if (lmp_ext_inq_capable(hdev))
570 events[5] |= 0x40; /* Extended Inquiry Result */
572 if (lmp_no_flush_capable(hdev))
573 events[7] |= 0x01; /* Enhanced Flush Complete */
575 if (lmp_lsto_capable(hdev))
576 events[6] |= 0x80; /* Link Supervision Timeout Changed */
578 if (lmp_ssp_capable(hdev)) {
579 events[6] |= 0x01; /* IO Capability Request */
580 events[6] |= 0x02; /* IO Capability Response */
581 events[6] |= 0x04; /* User Confirmation Request */
582 events[6] |= 0x08; /* User Passkey Request */
583 events[6] |= 0x10; /* Remote OOB Data Request */
584 events[6] |= 0x20; /* Simple Pairing Complete */
585 events[7] |= 0x04; /* User Passkey Notification */
586 events[7] |= 0x08; /* Keypress Notification */
587 events[7] |= 0x10; /* Remote Host Supported
588 * Features Notification
592 if (lmp_le_capable(hdev))
593 events[7] |= 0x20; /* LE Meta-Event */
595 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
598 static void hci_init2_req(struct hci_request *req, unsigned long opt)
600 struct hci_dev *hdev = req->hdev;
602 if (hdev->dev_type == HCI_AMP)
603 return amp_init2(req);
605 if (lmp_bredr_capable(hdev))
608 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
610 if (lmp_le_capable(hdev))
613 /* All Bluetooth 1.2 and later controllers should support the
614 * HCI command for reading the local supported commands.
616 * Unfortunately some controllers indicate Bluetooth 1.2 support,
617 * but do not have support for this command. If that is the case,
618 * the driver can quirk the behavior and skip reading the local
619 * supported commands.
621 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
622 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
623 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
625 if (lmp_ssp_capable(hdev)) {
626 /* When SSP is available, then the host features page
627 * should also be available as well. However some
628 * controllers list the max_page as 0 as long as SSP
629 * has not been enabled. To achieve proper debugging
630 * output, force the minimum max_page to 1 at least.
632 hdev->max_page = 0x01;
634 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
637 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
638 sizeof(mode), &mode);
640 struct hci_cp_write_eir cp;
642 memset(hdev->eir, 0, sizeof(hdev->eir));
643 memset(&cp, 0, sizeof(cp));
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
649 if (lmp_inq_rssi_capable(hdev) ||
650 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
653 /* If Extended Inquiry Result events are supported, then
654 * they are clearly preferred over Inquiry Result with RSSI
657 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
659 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
662 if (lmp_inq_tx_pwr_capable(hdev))
663 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
665 if (lmp_ext_feat_capable(hdev)) {
666 struct hci_cp_read_local_ext_features cp;
669 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
673 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
675 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
680 static void hci_setup_link_policy(struct hci_request *req)
682 struct hci_dev *hdev = req->hdev;
683 struct hci_cp_write_def_link_policy cp;
686 if (lmp_rswitch_capable(hdev))
687 link_policy |= HCI_LP_RSWITCH;
688 if (lmp_hold_capable(hdev))
689 link_policy |= HCI_LP_HOLD;
690 if (lmp_sniff_capable(hdev))
691 link_policy |= HCI_LP_SNIFF;
692 if (lmp_park_capable(hdev))
693 link_policy |= HCI_LP_PARK;
695 cp.policy = cpu_to_le16(link_policy);
696 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
699 static void hci_set_le_support(struct hci_request *req)
701 struct hci_dev *hdev = req->hdev;
702 struct hci_cp_write_le_host_supported cp;
704 /* LE-only devices do not support explicit enablement */
705 if (!lmp_bredr_capable(hdev))
708 memset(&cp, 0, sizeof(cp));
710 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
715 if (cp.le != lmp_host_le_capable(hdev))
716 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
720 static void hci_set_event_mask_page_2(struct hci_request *req)
722 struct hci_dev *hdev = req->hdev;
723 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
724 bool changed = false;
726 /* If Connectionless Slave Broadcast master role is supported
727 * enable all necessary events for it.
729 if (lmp_csb_master_capable(hdev)) {
730 events[1] |= 0x40; /* Triggered Clock Capture */
731 events[1] |= 0x80; /* Synchronization Train Complete */
732 events[2] |= 0x10; /* Slave Page Response Timeout */
733 events[2] |= 0x20; /* CSB Channel Map Change */
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
740 if (lmp_csb_slave_capable(hdev)) {
741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
748 /* Enable Authenticated Payload Timeout Expired event if supported */
749 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
754 /* Some Broadcom based controllers indicate support for Set Event
755 * Mask Page 2 command, but then actually do not support it. Since
756 * the default value is all bits set to zero, the command is only
757 * required if the event mask has to be changed. In case no change
758 * to the event mask is needed, skip this command.
761 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
762 sizeof(events), events);
765 static void hci_init3_req(struct hci_request *req, unsigned long opt)
767 struct hci_dev *hdev = req->hdev;
770 hci_setup_event_mask(req);
772 if (hdev->commands[6] & 0x20 &&
773 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
774 struct hci_cp_read_stored_link_key cp;
776 bacpy(&cp.bdaddr, BDADDR_ANY);
778 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
781 if (hdev->commands[5] & 0x10)
782 hci_setup_link_policy(req);
784 if (hdev->commands[8] & 0x01)
785 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
787 /* Some older Broadcom based Bluetooth 1.2 controllers do not
788 * support the Read Page Scan Type command. Check support for
789 * this command in the bit mask of supported commands.
791 if (hdev->commands[13] & 0x01)
792 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
794 if (lmp_le_capable(hdev)) {
797 memset(events, 0, sizeof(events));
800 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
801 events[0] |= 0x10; /* LE Long Term Key Request */
803 /* If controller supports the Connection Parameters Request
804 * Link Layer Procedure, enable the corresponding event.
806 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
807 events[0] |= 0x20; /* LE Remote Connection
811 /* If the controller supports the Data Length Extension
812 * feature, enable the corresponding event.
814 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
815 events[0] |= 0x40; /* LE Data Length Change */
817 /* If the controller supports Extended Scanner Filter
818 * Policies, enable the correspondig event.
820 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
821 events[1] |= 0x04; /* LE Direct Advertising
825 /* If the controller supports the LE Read Local P-256
826 * Public Key command, enable the corresponding event.
828 if (hdev->commands[34] & 0x02)
829 events[0] |= 0x80; /* LE Read Local P-256
830 * Public Key Complete
833 /* If the controller supports the LE Generate DHKey
834 * command, enable the corresponding event.
836 if (hdev->commands[34] & 0x04)
837 events[1] |= 0x01; /* LE Generate DHKey Complete */
839 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
842 if (hdev->commands[25] & 0x40) {
843 /* Read LE Advertising Channel TX Power */
844 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
847 if (hdev->commands[26] & 0x40) {
848 /* Read LE White List Size */
849 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
853 if (hdev->commands[26] & 0x80) {
854 /* Clear LE White List */
855 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
858 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
859 /* Read LE Maximum Data Length */
860 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
862 /* Read LE Suggested Default Data Length */
863 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
866 hci_set_le_support(req);
869 /* Read features beyond page 1 if available */
870 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
871 struct hci_cp_read_local_ext_features cp;
874 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
879 static void hci_init4_req(struct hci_request *req, unsigned long opt)
881 struct hci_dev *hdev = req->hdev;
883 /* Some Broadcom based Bluetooth controllers do not support the
884 * Delete Stored Link Key command. They are clearly indicating its
885 * absence in the bit mask of supported commands.
887 * Check the supported commands and only if the the command is marked
888 * as supported send it. If not supported assume that the controller
889 * does not have actual support for stored link keys which makes this
890 * command redundant anyway.
892 * Some controllers indicate that they support handling deleting
893 * stored link keys, but they don't. The quirk lets a driver
894 * just disable this command.
896 if (hdev->commands[6] & 0x80 &&
897 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
898 struct hci_cp_delete_stored_link_key cp;
900 bacpy(&cp.bdaddr, BDADDR_ANY);
901 cp.delete_all = 0x01;
902 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
906 /* Set event mask page 2 if the HCI command for it is supported */
907 if (hdev->commands[22] & 0x04)
908 hci_set_event_mask_page_2(req);
910 /* Read local codec list if the HCI command is supported */
911 if (hdev->commands[29] & 0x20)
912 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
914 /* Get MWS transport configuration if the HCI command is supported */
915 if (hdev->commands[30] & 0x08)
916 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
918 /* Check for Synchronization Train support */
919 if (lmp_sync_train_capable(hdev))
920 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
922 /* Enable Secure Connections if supported and configured */
923 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
924 bredr_sc_enabled(hdev)) {
927 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
928 sizeof(support), &support);
932 static int __hci_init(struct hci_dev *hdev)
936 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
940 if (hci_dev_test_flag(hdev, HCI_SETUP))
941 hci_debugfs_create_basic(hdev);
943 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
947 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
948 * BR/EDR/LE type controllers. AMP controllers only need the
949 * first two stages of init.
951 if (hdev->dev_type != HCI_BREDR)
954 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
958 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
962 /* This function is only called when the controller is actually in
963 * configured state. When the controller is marked as unconfigured,
964 * this initialization procedure is not run.
966 * It means that it is possible that a controller runs through its
967 * setup phase and then discovers missing settings. If that is the
968 * case, then this function will not be called. It then will only
969 * be called during the config phase.
971 * So only when in setup phase or config phase, create the debugfs
972 * entries and register the SMP channels.
974 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
975 !hci_dev_test_flag(hdev, HCI_CONFIG))
978 hci_debugfs_create_common(hdev);
980 if (lmp_bredr_capable(hdev))
981 hci_debugfs_create_bredr(hdev);
983 if (lmp_le_capable(hdev))
984 hci_debugfs_create_le(hdev);
989 static void hci_init0_req(struct hci_request *req, unsigned long opt)
991 struct hci_dev *hdev = req->hdev;
993 BT_DBG("%s %ld", hdev->name, opt);
996 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
997 hci_reset_req(req, 0);
999 /* Read Local Version */
1000 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1002 /* Read BD Address */
1003 if (hdev->set_bdaddr)
1004 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1007 static int __hci_unconf_init(struct hci_dev *hdev)
1011 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1014 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1018 if (hci_dev_test_flag(hdev, HCI_SETUP))
1019 hci_debugfs_create_basic(hdev);
1024 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1028 BT_DBG("%s %x", req->hdev->name, scan);
1030 /* Inquiry and Page scans */
1031 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1034 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1038 BT_DBG("%s %x", req->hdev->name, auth);
1040 /* Authentication */
1041 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1044 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1048 BT_DBG("%s %x", req->hdev->name, encrypt);
1051 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1054 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1056 __le16 policy = cpu_to_le16(opt);
1058 BT_DBG("%s %x", req->hdev->name, policy);
1060 /* Default link policy */
1061 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1064 /* Get HCI device by index.
1065 * Device is held on return. */
1066 struct hci_dev *hci_dev_get(int index)
1068 struct hci_dev *hdev = NULL, *d;
1070 BT_DBG("%d", index);
1075 read_lock(&hci_dev_list_lock);
1076 list_for_each_entry(d, &hci_dev_list, list) {
1077 if (d->id == index) {
1078 hdev = hci_dev_hold(d);
1082 read_unlock(&hci_dev_list_lock);
1086 /* ---- Inquiry support ---- */
1088 bool hci_discovery_active(struct hci_dev *hdev)
1090 struct discovery_state *discov = &hdev->discovery;
1092 switch (discov->state) {
1093 case DISCOVERY_FINDING:
1094 case DISCOVERY_RESOLVING:
1102 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1104 int old_state = hdev->discovery.state;
1106 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1108 if (old_state == state)
1111 hdev->discovery.state = state;
1114 case DISCOVERY_STOPPED:
1115 hci_update_background_scan(hdev);
1117 if (old_state != DISCOVERY_STARTING)
1118 mgmt_discovering(hdev, 0);
1120 case DISCOVERY_STARTING:
1122 case DISCOVERY_FINDING:
1123 mgmt_discovering(hdev, 1);
1125 case DISCOVERY_RESOLVING:
1127 case DISCOVERY_STOPPING:
1132 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *p, *n;
1137 list_for_each_entry_safe(p, n, &cache->all, all) {
1142 INIT_LIST_HEAD(&cache->unknown);
1143 INIT_LIST_HEAD(&cache->resolve);
1146 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1149 struct discovery_state *cache = &hdev->discovery;
1150 struct inquiry_entry *e;
1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
1154 list_for_each_entry(e, &cache->all, all) {
1155 if (!bacmp(&e->data.bdaddr, bdaddr))
1162 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1165 struct discovery_state *cache = &hdev->discovery;
1166 struct inquiry_entry *e;
1168 BT_DBG("cache %p, %pMR", cache, bdaddr);
1170 list_for_each_entry(e, &cache->unknown, list) {
1171 if (!bacmp(&e->data.bdaddr, bdaddr))
1178 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1182 struct discovery_state *cache = &hdev->discovery;
1183 struct inquiry_entry *e;
1185 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1187 list_for_each_entry(e, &cache->resolve, list) {
1188 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1190 if (!bacmp(&e->data.bdaddr, bdaddr))
1197 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1198 struct inquiry_entry *ie)
1200 struct discovery_state *cache = &hdev->discovery;
1201 struct list_head *pos = &cache->resolve;
1202 struct inquiry_entry *p;
1204 list_del(&ie->list);
1206 list_for_each_entry(p, &cache->resolve, list) {
1207 if (p->name_state != NAME_PENDING &&
1208 abs(p->data.rssi) >= abs(ie->data.rssi))
1213 list_add(&ie->list, pos);
1216 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1219 struct discovery_state *cache = &hdev->discovery;
1220 struct inquiry_entry *ie;
1223 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1225 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1227 if (!data->ssp_mode)
1228 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1230 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1232 if (!ie->data.ssp_mode)
1233 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1235 if (ie->name_state == NAME_NEEDED &&
1236 data->rssi != ie->data.rssi) {
1237 ie->data.rssi = data->rssi;
1238 hci_inquiry_cache_update_resolve(hdev, ie);
1244 /* Entry not in the cache. Add new one. */
1245 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1247 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1251 list_add(&ie->all, &cache->all);
1254 ie->name_state = NAME_KNOWN;
1256 ie->name_state = NAME_NOT_KNOWN;
1257 list_add(&ie->list, &cache->unknown);
1261 if (name_known && ie->name_state != NAME_KNOWN &&
1262 ie->name_state != NAME_PENDING) {
1263 ie->name_state = NAME_KNOWN;
1264 list_del(&ie->list);
1267 memcpy(&ie->data, data, sizeof(*data));
1268 ie->timestamp = jiffies;
1269 cache->timestamp = jiffies;
1271 if (ie->name_state == NAME_NOT_KNOWN)
1272 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1278 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1280 struct discovery_state *cache = &hdev->discovery;
1281 struct inquiry_info *info = (struct inquiry_info *) buf;
1282 struct inquiry_entry *e;
1285 list_for_each_entry(e, &cache->all, all) {
1286 struct inquiry_data *data = &e->data;
1291 bacpy(&info->bdaddr, &data->bdaddr);
1292 info->pscan_rep_mode = data->pscan_rep_mode;
1293 info->pscan_period_mode = data->pscan_period_mode;
1294 info->pscan_mode = data->pscan_mode;
1295 memcpy(info->dev_class, data->dev_class, 3);
1296 info->clock_offset = data->clock_offset;
1302 BT_DBG("cache %p, copied %d", cache, copied);
1306 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1308 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1309 struct hci_dev *hdev = req->hdev;
1310 struct hci_cp_inquiry cp;
1312 BT_DBG("%s", hdev->name);
1314 if (test_bit(HCI_INQUIRY, &hdev->flags))
1318 memcpy(&cp.lap, &ir->lap, 3);
1319 cp.length = ir->length;
1320 cp.num_rsp = ir->num_rsp;
1321 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1324 int hci_inquiry(void __user *arg)
1326 __u8 __user *ptr = arg;
1327 struct hci_inquiry_req ir;
1328 struct hci_dev *hdev;
1329 int err = 0, do_inquiry = 0, max_rsp;
1333 if (copy_from_user(&ir, ptr, sizeof(ir)))
1336 hdev = hci_dev_get(ir.dev_id);
1340 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1345 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1350 if (hdev->dev_type != HCI_BREDR) {
1355 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1360 /* Restrict maximum inquiry length to 60 seconds */
1361 if (ir.length > 60) {
1367 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1368 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1369 hci_inquiry_cache_flush(hdev);
1372 hci_dev_unlock(hdev);
1374 timeo = ir.length * msecs_to_jiffies(2000);
1377 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1382 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1383 * cleared). If it is interrupted by a signal, return -EINTR.
1385 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1386 TASK_INTERRUPTIBLE)) {
1392 /* for unlimited number of responses we will use buffer with
1395 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1397 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1398 * copy it to the user space.
1400 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1407 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1408 hci_dev_unlock(hdev);
1410 BT_DBG("num_rsp %d", ir.num_rsp);
1412 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1414 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1427 static int hci_dev_do_open(struct hci_dev *hdev)
1431 BT_DBG("%s %p", hdev->name, hdev);
1435 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1440 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1441 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1442 /* Check for rfkill but allow the HCI setup stage to
1443 * proceed (which in itself doesn't cause any RF activity).
1445 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1450 /* Check for valid public address or a configured static
1451 * random adddress, but let the HCI setup proceed to
1452 * be able to determine if there is a public address
1455 * In case of user channel usage, it is not important
1456 * if a public address or static random address is
1459 * This check is only valid for BR/EDR controllers
1460 * since AMP controllers do not have an address.
1462 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1463 hdev->dev_type == HCI_BREDR &&
1464 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1465 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1466 ret = -EADDRNOTAVAIL;
1471 if (test_bit(HCI_UP, &hdev->flags)) {
1476 if (hdev->open(hdev)) {
1481 set_bit(HCI_RUNNING, &hdev->flags);
1482 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1484 atomic_set(&hdev->cmd_cnt, 1);
1485 set_bit(HCI_INIT, &hdev->flags);
1487 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1488 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1491 ret = hdev->setup(hdev);
1493 /* The transport driver can set these quirks before
1494 * creating the HCI device or in its setup callback.
1496 * In case any of them is set, the controller has to
1497 * start up as unconfigured.
1499 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1500 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1501 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1503 /* For an unconfigured controller it is required to
1504 * read at least the version information provided by
1505 * the Read Local Version Information command.
1507 * If the set_bdaddr driver callback is provided, then
1508 * also the original Bluetooth public device address
1509 * will be read using the Read BD Address command.
1511 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1512 ret = __hci_unconf_init(hdev);
1515 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1516 /* If public address change is configured, ensure that
1517 * the address gets programmed. If the driver does not
1518 * support changing the public address, fail the power
1521 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1523 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1525 ret = -EADDRNOTAVAIL;
1529 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1530 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1531 ret = __hci_init(hdev);
1532 if (!ret && hdev->post_init)
1533 ret = hdev->post_init(hdev);
1537 /* If the HCI Reset command is clearing all diagnostic settings,
1538 * then they need to be reprogrammed after the init procedure
1541 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1542 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1543 ret = hdev->set_diag(hdev, true);
1545 clear_bit(HCI_INIT, &hdev->flags);
1549 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1550 set_bit(HCI_UP, &hdev->flags);
1551 hci_sock_dev_event(hdev, HCI_DEV_UP);
1552 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1553 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1554 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1555 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1556 hdev->dev_type == HCI_BREDR) {
1558 mgmt_powered(hdev, 1);
1559 hci_dev_unlock(hdev);
1562 /* Init failed, cleanup */
1563 flush_work(&hdev->tx_work);
1565 /* Since hci_rx_work() is possible to awake new cmd_work
1566 * it should be flushed first to avoid unexpected call of
1569 flush_work(&hdev->rx_work);
1570 flush_work(&hdev->cmd_work);
1572 skb_queue_purge(&hdev->cmd_q);
1573 skb_queue_purge(&hdev->rx_q);
1578 if (hdev->sent_cmd) {
1579 kfree_skb(hdev->sent_cmd);
1580 hdev->sent_cmd = NULL;
1583 clear_bit(HCI_RUNNING, &hdev->flags);
1584 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1587 hdev->flags &= BIT(HCI_RAW);
1591 hci_req_unlock(hdev);
1595 /* ---- HCI ioctl helpers ---- */
1597 int hci_dev_open(__u16 dev)
1599 struct hci_dev *hdev;
1602 hdev = hci_dev_get(dev);
1606 /* Devices that are marked as unconfigured can only be powered
1607 * up as user channel. Trying to bring them up as normal devices
1608 * will result into a failure. Only user channel operation is
1611 * When this function is called for a user channel, the flag
1612 * HCI_USER_CHANNEL will be set first before attempting to
1615 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1616 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1621 /* We need to ensure that no other power on/off work is pending
1622 * before proceeding to call hci_dev_do_open. This is
1623 * particularly important if the setup procedure has not yet
1626 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1627 cancel_delayed_work(&hdev->power_off);
1629 /* After this call it is guaranteed that the setup procedure
1630 * has finished. This means that error conditions like RFKILL
1631 * or no valid public or static random address apply.
1633 flush_workqueue(hdev->req_workqueue);
1635 /* For controllers not using the management interface and that
1636 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1637 * so that pairing works for them. Once the management interface
1638 * is in use this bit will be cleared again and userspace has
1639 * to explicitly enable it.
1641 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1642 !hci_dev_test_flag(hdev, HCI_MGMT))
1643 hci_dev_set_flag(hdev, HCI_BONDABLE);
1645 err = hci_dev_do_open(hdev);
1652 /* This function requires the caller holds hdev->lock */
1653 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1655 struct hci_conn_params *p;
1657 list_for_each_entry(p, &hdev->le_conn_params, list) {
1659 hci_conn_drop(p->conn);
1660 hci_conn_put(p->conn);
1663 list_del_init(&p->action);
1666 BT_DBG("All LE pending actions cleared");
1669 int hci_dev_do_close(struct hci_dev *hdev)
1673 BT_DBG("%s %p", hdev->name, hdev);
1675 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1676 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1677 test_bit(HCI_UP, &hdev->flags)) {
1678 /* Execute vendor specific shutdown routine */
1680 hdev->shutdown(hdev);
1683 cancel_delayed_work(&hdev->power_off);
1685 hci_req_cancel(hdev, ENODEV);
1688 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1689 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1690 test_bit(HCI_UP, &hdev->flags)) {
1691 /* Execute vendor specific shutdown routine */
1693 hdev->shutdown(hdev);
1696 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1697 cancel_delayed_work_sync(&hdev->cmd_timer);
1698 hci_req_unlock(hdev);
1702 /* Flush RX and TX works */
1703 flush_work(&hdev->tx_work);
1704 flush_work(&hdev->rx_work);
1706 if (hdev->discov_timeout > 0) {
1707 cancel_delayed_work(&hdev->discov_off);
1708 hdev->discov_timeout = 0;
1709 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1710 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1713 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1714 cancel_delayed_work(&hdev->service_cache);
1716 cancel_delayed_work_sync(&hdev->le_scan_disable);
1717 cancel_delayed_work_sync(&hdev->le_scan_restart);
1719 if (hci_dev_test_flag(hdev, HCI_MGMT))
1720 cancel_delayed_work_sync(&hdev->rpa_expired);
1722 if (hdev->adv_instance_timeout) {
1723 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1724 hdev->adv_instance_timeout = 0;
1727 /* Avoid potential lockdep warnings from the *_flush() calls by
1728 * ensuring the workqueue is empty up front.
1730 drain_workqueue(hdev->workqueue);
1734 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1736 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1738 if (!auto_off && hdev->dev_type == HCI_BREDR)
1739 mgmt_powered(hdev, 0);
1741 hci_inquiry_cache_flush(hdev);
1742 hci_pend_le_actions_clear(hdev);
1743 hci_conn_hash_flush(hdev);
1744 hci_dev_unlock(hdev);
1746 smp_unregister(hdev);
1748 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1754 skb_queue_purge(&hdev->cmd_q);
1755 atomic_set(&hdev->cmd_cnt, 1);
1756 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1757 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1758 set_bit(HCI_INIT, &hdev->flags);
1759 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1760 clear_bit(HCI_INIT, &hdev->flags);
1763 /* flush cmd work */
1764 flush_work(&hdev->cmd_work);
1767 skb_queue_purge(&hdev->rx_q);
1768 skb_queue_purge(&hdev->cmd_q);
1769 skb_queue_purge(&hdev->raw_q);
1771 /* Drop last sent command */
1772 if (hdev->sent_cmd) {
1773 cancel_delayed_work_sync(&hdev->cmd_timer);
1774 kfree_skb(hdev->sent_cmd);
1775 hdev->sent_cmd = NULL;
1778 clear_bit(HCI_RUNNING, &hdev->flags);
1779 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1781 /* After this point our queues are empty
1782 * and no tasks are scheduled. */
1786 hdev->flags &= BIT(HCI_RAW);
1787 hci_dev_clear_volatile_flags(hdev);
1789 /* Controller radio is available but is currently powered down */
1790 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1792 memset(hdev->eir, 0, sizeof(hdev->eir));
1793 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1794 bacpy(&hdev->random_addr, BDADDR_ANY);
1796 hci_req_unlock(hdev);
1802 int hci_dev_close(__u16 dev)
1804 struct hci_dev *hdev;
1807 hdev = hci_dev_get(dev);
1811 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1816 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1817 cancel_delayed_work(&hdev->power_off);
1819 err = hci_dev_do_close(hdev);
1826 static int hci_dev_do_reset(struct hci_dev *hdev)
1830 BT_DBG("%s %p", hdev->name, hdev);
1835 skb_queue_purge(&hdev->rx_q);
1836 skb_queue_purge(&hdev->cmd_q);
1838 /* Avoid potential lockdep warnings from the *_flush() calls by
1839 * ensuring the workqueue is empty up front.
1841 drain_workqueue(hdev->workqueue);
1844 hci_inquiry_cache_flush(hdev);
1845 hci_conn_hash_flush(hdev);
1846 hci_dev_unlock(hdev);
1851 atomic_set(&hdev->cmd_cnt, 1);
1852 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1854 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1856 hci_req_unlock(hdev);
1860 int hci_dev_reset(__u16 dev)
1862 struct hci_dev *hdev;
1865 hdev = hci_dev_get(dev);
1869 if (!test_bit(HCI_UP, &hdev->flags)) {
1874 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1879 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1884 err = hci_dev_do_reset(hdev);
1891 int hci_dev_reset_stat(__u16 dev)
1893 struct hci_dev *hdev;
1896 hdev = hci_dev_get(dev);
1900 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1905 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1910 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1917 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1919 bool conn_changed, discov_changed;
1921 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1923 if ((scan & SCAN_PAGE))
1924 conn_changed = !hci_dev_test_and_set_flag(hdev,
1927 conn_changed = hci_dev_test_and_clear_flag(hdev,
1930 if ((scan & SCAN_INQUIRY)) {
1931 discov_changed = !hci_dev_test_and_set_flag(hdev,
1934 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1935 discov_changed = hci_dev_test_and_clear_flag(hdev,
1939 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1942 if (conn_changed || discov_changed) {
1943 /* In case this was disabled through mgmt */
1944 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1946 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1947 mgmt_update_adv_data(hdev);
1949 mgmt_new_settings(hdev);
1953 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1955 struct hci_dev *hdev;
1956 struct hci_dev_req dr;
1959 if (copy_from_user(&dr, arg, sizeof(dr)))
1962 hdev = hci_dev_get(dr.dev_id);
1966 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1971 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1976 if (hdev->dev_type != HCI_BREDR) {
1981 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1988 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1993 if (!lmp_encrypt_capable(hdev)) {
1998 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1999 /* Auth must be enabled first */
2000 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2006 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2011 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2014 /* Ensure that the connectable and discoverable states
2015 * get correctly modified as this was a non-mgmt change.
2018 hci_update_scan_state(hdev, dr.dev_opt);
2022 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2026 case HCISETLINKMODE:
2027 hdev->link_mode = ((__u16) dr.dev_opt) &
2028 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2032 hdev->pkt_type = (__u16) dr.dev_opt;
2036 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2037 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2041 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2042 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2055 int hci_get_dev_list(void __user *arg)
2057 struct hci_dev *hdev;
2058 struct hci_dev_list_req *dl;
2059 struct hci_dev_req *dr;
2060 int n = 0, size, err;
2063 if (get_user(dev_num, (__u16 __user *) arg))
2066 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2069 size = sizeof(*dl) + dev_num * sizeof(*dr);
2071 dl = kzalloc(size, GFP_KERNEL);
2077 read_lock(&hci_dev_list_lock);
2078 list_for_each_entry(hdev, &hci_dev_list, list) {
2079 unsigned long flags = hdev->flags;
2081 /* When the auto-off is configured it means the transport
2082 * is running, but in that case still indicate that the
2083 * device is actually down.
2085 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2086 flags &= ~BIT(HCI_UP);
2088 (dr + n)->dev_id = hdev->id;
2089 (dr + n)->dev_opt = flags;
2094 read_unlock(&hci_dev_list_lock);
2097 size = sizeof(*dl) + n * sizeof(*dr);
2099 err = copy_to_user(arg, dl, size);
2102 return err ? -EFAULT : 0;
2105 int hci_get_dev_info(void __user *arg)
2107 struct hci_dev *hdev;
2108 struct hci_dev_info di;
2109 unsigned long flags;
2112 if (copy_from_user(&di, arg, sizeof(di)))
2115 hdev = hci_dev_get(di.dev_id);
2119 /* When the auto-off is configured it means the transport
2120 * is running, but in that case still indicate that the
2121 * device is actually down.
2123 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2124 flags = hdev->flags & ~BIT(HCI_UP);
2126 flags = hdev->flags;
2128 strcpy(di.name, hdev->name);
2129 di.bdaddr = hdev->bdaddr;
2130 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2132 di.pkt_type = hdev->pkt_type;
2133 if (lmp_bredr_capable(hdev)) {
2134 di.acl_mtu = hdev->acl_mtu;
2135 di.acl_pkts = hdev->acl_pkts;
2136 di.sco_mtu = hdev->sco_mtu;
2137 di.sco_pkts = hdev->sco_pkts;
2139 di.acl_mtu = hdev->le_mtu;
2140 di.acl_pkts = hdev->le_pkts;
2144 di.link_policy = hdev->link_policy;
2145 di.link_mode = hdev->link_mode;
2147 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2148 memcpy(&di.features, &hdev->features, sizeof(di.features));
2150 if (copy_to_user(arg, &di, sizeof(di)))
2158 /* ---- Interface to HCI drivers ---- */
2160 static int hci_rfkill_set_block(void *data, bool blocked)
2162 struct hci_dev *hdev = data;
2164 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2166 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2170 hci_dev_set_flag(hdev, HCI_RFKILLED);
2171 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2172 !hci_dev_test_flag(hdev, HCI_CONFIG))
2173 hci_dev_do_close(hdev);
2175 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2181 static const struct rfkill_ops hci_rfkill_ops = {
2182 .set_block = hci_rfkill_set_block,
2185 static void hci_power_on(struct work_struct *work)
2187 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2190 BT_DBG("%s", hdev->name);
2192 err = hci_dev_do_open(hdev);
2195 mgmt_set_powered_failed(hdev, err);
2196 hci_dev_unlock(hdev);
2200 /* During the HCI setup phase, a few error conditions are
2201 * ignored and they need to be checked now. If they are still
2202 * valid, it is important to turn the device back off.
2204 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2205 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2206 (hdev->dev_type == HCI_BREDR &&
2207 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2208 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2209 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2210 hci_dev_do_close(hdev);
2211 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2212 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2213 HCI_AUTO_OFF_TIMEOUT);
2216 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2217 /* For unconfigured devices, set the HCI_RAW flag
2218 * so that userspace can easily identify them.
2220 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2221 set_bit(HCI_RAW, &hdev->flags);
2223 /* For fully configured devices, this will send
2224 * the Index Added event. For unconfigured devices,
2225 * it will send Unconfigued Index Added event.
2227 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2228 * and no event will be send.
2230 mgmt_index_added(hdev);
2231 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2232 /* When the controller is now configured, then it
2233 * is important to clear the HCI_RAW flag.
2235 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2236 clear_bit(HCI_RAW, &hdev->flags);
2238 /* Powering on the controller with HCI_CONFIG set only
2239 * happens with the transition from unconfigured to
2240 * configured. This will send the Index Added event.
2242 mgmt_index_added(hdev);
2246 static void hci_power_off(struct work_struct *work)
2248 struct hci_dev *hdev = container_of(work, struct hci_dev,
2251 BT_DBG("%s", hdev->name);
2253 hci_dev_do_close(hdev);
2256 static void hci_error_reset(struct work_struct *work)
2258 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2260 BT_DBG("%s", hdev->name);
2263 hdev->hw_error(hdev, hdev->hw_error_code);
2265 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2266 hdev->hw_error_code);
2268 if (hci_dev_do_close(hdev))
2271 hci_dev_do_open(hdev);
2274 static void hci_discov_off(struct work_struct *work)
2276 struct hci_dev *hdev;
2278 hdev = container_of(work, struct hci_dev, discov_off.work);
2280 BT_DBG("%s", hdev->name);
2282 mgmt_discoverable_timeout(hdev);
2285 static void hci_adv_timeout_expire(struct work_struct *work)
2287 struct hci_dev *hdev;
2289 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2291 BT_DBG("%s", hdev->name);
2293 mgmt_adv_timeout_expired(hdev);
2296 void hci_uuids_clear(struct hci_dev *hdev)
2298 struct bt_uuid *uuid, *tmp;
2300 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2301 list_del(&uuid->list);
2306 void hci_link_keys_clear(struct hci_dev *hdev)
2308 struct link_key *key;
2310 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2311 list_del_rcu(&key->list);
2312 kfree_rcu(key, rcu);
2316 void hci_smp_ltks_clear(struct hci_dev *hdev)
2320 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2321 list_del_rcu(&k->list);
2326 void hci_smp_irks_clear(struct hci_dev *hdev)
2330 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2331 list_del_rcu(&k->list);
2336 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2341 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2342 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2352 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2353 u8 key_type, u8 old_key_type)
2356 if (key_type < 0x03)
2359 /* Debug keys are insecure so don't store them persistently */
2360 if (key_type == HCI_LK_DEBUG_COMBINATION)
2363 /* Changed combination key and there's no previous one */
2364 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2367 /* Security mode 3 case */
2371 /* BR/EDR key derived using SC from an LE link */
2372 if (conn->type == LE_LINK)
2375 /* Neither local nor remote side had no-bonding as requirement */
2376 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2379 /* Local side had dedicated bonding as requirement */
2380 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2383 /* Remote side had dedicated bonding as requirement */
2384 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2387 /* If none of the above criteria match, then don't store the key
2392 static u8 ltk_role(u8 type)
2394 if (type == SMP_LTK)
2395 return HCI_ROLE_MASTER;
2397 return HCI_ROLE_SLAVE;
2400 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 u8 addr_type, u8 role)
2406 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2407 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2410 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2420 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2422 struct smp_irk *irk;
2425 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2426 if (!bacmp(&irk->rpa, rpa)) {
2432 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2433 if (smp_irk_matches(hdev, irk->val, rpa)) {
2434 bacpy(&irk->rpa, rpa);
2444 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2447 struct smp_irk *irk;
2449 /* Identity Address must be public or static random */
2450 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2454 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2455 if (addr_type == irk->addr_type &&
2456 bacmp(bdaddr, &irk->bdaddr) == 0) {
2466 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2467 bdaddr_t *bdaddr, u8 *val, u8 type,
2468 u8 pin_len, bool *persistent)
2470 struct link_key *key, *old_key;
2473 old_key = hci_find_link_key(hdev, bdaddr);
2475 old_key_type = old_key->type;
2478 old_key_type = conn ? conn->key_type : 0xff;
2479 key = kzalloc(sizeof(*key), GFP_KERNEL);
2482 list_add_rcu(&key->list, &hdev->link_keys);
2485 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2487 /* Some buggy controller combinations generate a changed
2488 * combination key for legacy pairing even when there's no
2490 if (type == HCI_LK_CHANGED_COMBINATION &&
2491 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2492 type = HCI_LK_COMBINATION;
2494 conn->key_type = type;
2497 bacpy(&key->bdaddr, bdaddr);
2498 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2499 key->pin_len = pin_len;
2501 if (type == HCI_LK_CHANGED_COMBINATION)
2502 key->type = old_key_type;
2507 *persistent = hci_persistent_key(hdev, conn, type,
2513 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2514 u8 addr_type, u8 type, u8 authenticated,
2515 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2517 struct smp_ltk *key, *old_key;
2518 u8 role = ltk_role(type);
2520 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2524 key = kzalloc(sizeof(*key), GFP_KERNEL);
2527 list_add_rcu(&key->list, &hdev->long_term_keys);
2530 bacpy(&key->bdaddr, bdaddr);
2531 key->bdaddr_type = addr_type;
2532 memcpy(key->val, tk, sizeof(key->val));
2533 key->authenticated = authenticated;
2536 key->enc_size = enc_size;
2542 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2545 struct smp_irk *irk;
2547 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2549 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2553 bacpy(&irk->bdaddr, bdaddr);
2554 irk->addr_type = addr_type;
2556 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2559 memcpy(irk->val, val, 16);
2560 bacpy(&irk->rpa, rpa);
2565 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2567 struct link_key *key;
2569 key = hci_find_link_key(hdev, bdaddr);
2573 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2575 list_del_rcu(&key->list);
2576 kfree_rcu(key, rcu);
2581 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2586 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2587 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2590 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2592 list_del_rcu(&k->list);
2597 return removed ? 0 : -ENOENT;
2600 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2604 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2605 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2608 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2610 list_del_rcu(&k->list);
2615 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2618 struct smp_irk *irk;
2621 if (type == BDADDR_BREDR) {
2622 if (hci_find_link_key(hdev, bdaddr))
2627 /* Convert to HCI addr type which struct smp_ltk uses */
2628 if (type == BDADDR_LE_PUBLIC)
2629 addr_type = ADDR_LE_DEV_PUBLIC;
2631 addr_type = ADDR_LE_DEV_RANDOM;
2633 irk = hci_get_irk(hdev, bdaddr, addr_type);
2635 bdaddr = &irk->bdaddr;
2636 addr_type = irk->addr_type;
2640 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2641 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2651 /* HCI command timer function */
2652 static void hci_cmd_timeout(struct work_struct *work)
2654 struct hci_dev *hdev = container_of(work, struct hci_dev,
2657 if (hdev->sent_cmd) {
2658 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2659 u16 opcode = __le16_to_cpu(sent->opcode);
2661 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2663 BT_ERR("%s command tx timeout", hdev->name);
2666 atomic_set(&hdev->cmd_cnt, 1);
2667 queue_work(hdev->workqueue, &hdev->cmd_work);
2670 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2671 bdaddr_t *bdaddr, u8 bdaddr_type)
2673 struct oob_data *data;
2675 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2676 if (bacmp(bdaddr, &data->bdaddr) != 0)
2678 if (data->bdaddr_type != bdaddr_type)
2686 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2689 struct oob_data *data;
2691 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2695 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2697 list_del(&data->list);
2703 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2705 struct oob_data *data, *n;
2707 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2708 list_del(&data->list);
2713 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2714 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2715 u8 *hash256, u8 *rand256)
2717 struct oob_data *data;
2719 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2721 data = kmalloc(sizeof(*data), GFP_KERNEL);
2725 bacpy(&data->bdaddr, bdaddr);
2726 data->bdaddr_type = bdaddr_type;
2727 list_add(&data->list, &hdev->remote_oob_data);
2730 if (hash192 && rand192) {
2731 memcpy(data->hash192, hash192, sizeof(data->hash192));
2732 memcpy(data->rand192, rand192, sizeof(data->rand192));
2733 if (hash256 && rand256)
2734 data->present = 0x03;
2736 memset(data->hash192, 0, sizeof(data->hash192));
2737 memset(data->rand192, 0, sizeof(data->rand192));
2738 if (hash256 && rand256)
2739 data->present = 0x02;
2741 data->present = 0x00;
2744 if (hash256 && rand256) {
2745 memcpy(data->hash256, hash256, sizeof(data->hash256));
2746 memcpy(data->rand256, rand256, sizeof(data->rand256));
2748 memset(data->hash256, 0, sizeof(data->hash256));
2749 memset(data->rand256, 0, sizeof(data->rand256));
2750 if (hash192 && rand192)
2751 data->present = 0x01;
2754 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2759 /* This function requires the caller holds hdev->lock */
2760 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2762 struct adv_info *adv_instance;
2764 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2765 if (adv_instance->instance == instance)
2766 return adv_instance;
2772 /* This function requires the caller holds hdev->lock */
2773 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2774 struct adv_info *cur_instance;
2776 cur_instance = hci_find_adv_instance(hdev, instance);
2780 if (cur_instance == list_last_entry(&hdev->adv_instances,
2781 struct adv_info, list))
2782 return list_first_entry(&hdev->adv_instances,
2783 struct adv_info, list);
2785 return list_next_entry(cur_instance, list);
2788 /* This function requires the caller holds hdev->lock */
2789 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2791 struct adv_info *adv_instance;
2793 adv_instance = hci_find_adv_instance(hdev, instance);
2797 BT_DBG("%s removing %dMR", hdev->name, instance);
2799 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2800 cancel_delayed_work(&hdev->adv_instance_expire);
2801 hdev->adv_instance_timeout = 0;
2804 list_del(&adv_instance->list);
2805 kfree(adv_instance);
2807 hdev->adv_instance_cnt--;
2812 /* This function requires the caller holds hdev->lock */
2813 void hci_adv_instances_clear(struct hci_dev *hdev)
2815 struct adv_info *adv_instance, *n;
2817 if (hdev->adv_instance_timeout) {
2818 cancel_delayed_work(&hdev->adv_instance_expire);
2819 hdev->adv_instance_timeout = 0;
2822 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2823 list_del(&adv_instance->list);
2824 kfree(adv_instance);
2827 hdev->adv_instance_cnt = 0;
2830 /* This function requires the caller holds hdev->lock */
2831 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2832 u16 adv_data_len, u8 *adv_data,
2833 u16 scan_rsp_len, u8 *scan_rsp_data,
2834 u16 timeout, u16 duration)
2836 struct adv_info *adv_instance;
2838 adv_instance = hci_find_adv_instance(hdev, instance);
2840 memset(adv_instance->adv_data, 0,
2841 sizeof(adv_instance->adv_data));
2842 memset(adv_instance->scan_rsp_data, 0,
2843 sizeof(adv_instance->scan_rsp_data));
2845 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2846 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2849 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2853 adv_instance->pending = true;
2854 adv_instance->instance = instance;
2855 list_add(&adv_instance->list, &hdev->adv_instances);
2856 hdev->adv_instance_cnt++;
2859 adv_instance->flags = flags;
2860 adv_instance->adv_data_len = adv_data_len;
2861 adv_instance->scan_rsp_len = scan_rsp_len;
2864 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2867 memcpy(adv_instance->scan_rsp_data,
2868 scan_rsp_data, scan_rsp_len);
2870 adv_instance->timeout = timeout;
2871 adv_instance->remaining_time = timeout;
2874 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2876 adv_instance->duration = duration;
2878 BT_DBG("%s for %dMR", hdev->name, instance);
2883 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2884 bdaddr_t *bdaddr, u8 type)
2886 struct bdaddr_list *b;
2888 list_for_each_entry(b, bdaddr_list, list) {
2889 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2896 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2898 struct list_head *p, *n;
2900 list_for_each_safe(p, n, bdaddr_list) {
2901 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2908 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2910 struct bdaddr_list *entry;
2912 if (!bacmp(bdaddr, BDADDR_ANY))
2915 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2918 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2922 bacpy(&entry->bdaddr, bdaddr);
2923 entry->bdaddr_type = type;
2925 list_add(&entry->list, list);
2930 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2932 struct bdaddr_list *entry;
2934 if (!bacmp(bdaddr, BDADDR_ANY)) {
2935 hci_bdaddr_list_clear(list);
2939 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2943 list_del(&entry->list);
2949 /* This function requires the caller holds hdev->lock */
2950 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2951 bdaddr_t *addr, u8 addr_type)
2953 struct hci_conn_params *params;
2955 list_for_each_entry(params, &hdev->le_conn_params, list) {
2956 if (bacmp(¶ms->addr, addr) == 0 &&
2957 params->addr_type == addr_type) {
2965 /* This function requires the caller holds hdev->lock */
2966 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2967 bdaddr_t *addr, u8 addr_type)
2969 struct hci_conn_params *param;
2971 list_for_each_entry(param, list, action) {
2972 if (bacmp(¶m->addr, addr) == 0 &&
2973 param->addr_type == addr_type)
2980 /* This function requires the caller holds hdev->lock */
2981 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2982 bdaddr_t *addr, u8 addr_type)
2984 struct hci_conn_params *params;
2986 params = hci_conn_params_lookup(hdev, addr, addr_type);
2990 params = kzalloc(sizeof(*params), GFP_KERNEL);
2992 BT_ERR("Out of memory");
2996 bacpy(¶ms->addr, addr);
2997 params->addr_type = addr_type;
2999 list_add(¶ms->list, &hdev->le_conn_params);
3000 INIT_LIST_HEAD(¶ms->action);
3002 params->conn_min_interval = hdev->le_conn_min_interval;
3003 params->conn_max_interval = hdev->le_conn_max_interval;
3004 params->conn_latency = hdev->le_conn_latency;
3005 params->supervision_timeout = hdev->le_supv_timeout;
3006 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3008 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3013 static void hci_conn_params_free(struct hci_conn_params *params)
3016 hci_conn_drop(params->conn);
3017 hci_conn_put(params->conn);
3020 list_del(¶ms->action);
3021 list_del(¶ms->list);
3025 /* This function requires the caller holds hdev->lock */
3026 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3028 struct hci_conn_params *params;
3030 params = hci_conn_params_lookup(hdev, addr, addr_type);
3034 hci_conn_params_free(params);
3036 hci_update_background_scan(hdev);
3038 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3041 /* This function requires the caller holds hdev->lock */
3042 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3044 struct hci_conn_params *params, *tmp;
3046 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3047 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3050 /* If trying to estabilish one time connection to disabled
3051 * device, leave the params, but mark them as just once.
3053 if (params->explicit_connect) {
3054 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3058 list_del(¶ms->list);
3062 BT_DBG("All LE disabled connection parameters were removed");
3065 /* This function requires the caller holds hdev->lock */
3066 void hci_conn_params_clear_all(struct hci_dev *hdev)
3068 struct hci_conn_params *params, *tmp;
3070 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3071 hci_conn_params_free(params);
3073 hci_update_background_scan(hdev);
3075 BT_DBG("All LE connection parameters were removed");
3078 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3081 BT_ERR("Failed to start inquiry: status %d", status);
3084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3085 hci_dev_unlock(hdev);
3090 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3093 /* General inquiry access code (GIAC) */
3094 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3095 struct hci_cp_inquiry cp;
3099 BT_ERR("Failed to disable LE scanning: status %d", status);
3103 hdev->discovery.scan_start = 0;
3105 switch (hdev->discovery.type) {
3106 case DISCOV_TYPE_LE:
3108 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3109 hci_dev_unlock(hdev);
3112 case DISCOV_TYPE_INTERLEAVED:
3115 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3117 /* If we were running LE only scan, change discovery
3118 * state. If we were running both LE and BR/EDR inquiry
3119 * simultaneously, and BR/EDR inquiry is already
3120 * finished, stop discovery, otherwise BR/EDR inquiry
3121 * will stop discovery when finished. If we will resolve
3122 * remote device name, do not change discovery state.
3124 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3125 hdev->discovery.state != DISCOVERY_RESOLVING)
3126 hci_discovery_set_state(hdev,
3129 struct hci_request req;
3131 hci_inquiry_cache_flush(hdev);
3133 hci_req_init(&req, hdev);
3135 memset(&cp, 0, sizeof(cp));
3136 memcpy(&cp.lap, lap, sizeof(cp.lap));
3137 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3138 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3140 err = hci_req_run(&req, inquiry_complete);
3142 BT_ERR("Inquiry request failed: err %d", err);
3143 hci_discovery_set_state(hdev,
3148 hci_dev_unlock(hdev);
3153 static void le_scan_disable_work(struct work_struct *work)
3155 struct hci_dev *hdev = container_of(work, struct hci_dev,
3156 le_scan_disable.work);
3157 struct hci_request req;
3160 BT_DBG("%s", hdev->name);
3162 cancel_delayed_work_sync(&hdev->le_scan_restart);
3164 hci_req_init(&req, hdev);
3166 hci_req_add_le_scan_disable(&req);
3168 err = hci_req_run(&req, le_scan_disable_work_complete);
3170 BT_ERR("Disable LE scanning request failed: err %d", err);
3173 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3176 unsigned long timeout, duration, scan_start, now;
3178 BT_DBG("%s", hdev->name);
3181 BT_ERR("Failed to restart LE scan: status %d", status);
3185 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3186 !hdev->discovery.scan_start)
3189 /* When the scan was started, hdev->le_scan_disable has been queued
3190 * after duration from scan_start. During scan restart this job
3191 * has been canceled, and we need to queue it again after proper
3192 * timeout, to make sure that scan does not run indefinitely.
3194 duration = hdev->discovery.scan_duration;
3195 scan_start = hdev->discovery.scan_start;
3197 if (now - scan_start <= duration) {
3200 if (now >= scan_start)
3201 elapsed = now - scan_start;
3203 elapsed = ULONG_MAX - scan_start + now;
3205 timeout = duration - elapsed;
3209 queue_delayed_work(hdev->workqueue,
3210 &hdev->le_scan_disable, timeout);
3213 static void le_scan_restart_work(struct work_struct *work)
3215 struct hci_dev *hdev = container_of(work, struct hci_dev,
3216 le_scan_restart.work);
3217 struct hci_request req;
3218 struct hci_cp_le_set_scan_enable cp;
3221 BT_DBG("%s", hdev->name);
3223 /* If controller is not scanning we are done. */
3224 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3227 hci_req_init(&req, hdev);
3229 hci_req_add_le_scan_disable(&req);
3231 memset(&cp, 0, sizeof(cp));
3232 cp.enable = LE_SCAN_ENABLE;
3233 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3234 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3236 err = hci_req_run(&req, le_scan_restart_work_complete);
3238 BT_ERR("Restart LE scan request failed: err %d", err);
3241 /* Copy the Identity Address of the controller.
3243 * If the controller has a public BD_ADDR, then by default use that one.
3244 * If this is a LE only controller without a public address, default to
3245 * the static random address.
3247 * For debugging purposes it is possible to force controllers with a
3248 * public address to use the static random address instead.
3250 * In case BR/EDR has been disabled on a dual-mode controller and
3251 * userspace has configured a static address, then that address
3252 * becomes the identity address instead of the public BR/EDR address.
3254 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3257 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3258 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3259 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3260 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3261 bacpy(bdaddr, &hdev->static_addr);
3262 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3264 bacpy(bdaddr, &hdev->bdaddr);
3265 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3269 /* Alloc HCI device */
3270 struct hci_dev *hci_alloc_dev(void)
3272 struct hci_dev *hdev;
3274 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3278 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3279 hdev->esco_type = (ESCO_HV1);
3280 hdev->link_mode = (HCI_LM_ACCEPT);
3281 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3282 hdev->io_capability = 0x03; /* No Input No Output */
3283 hdev->manufacturer = 0xffff; /* Default to internal use */
3284 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3285 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3286 hdev->adv_instance_cnt = 0;
3287 hdev->cur_adv_instance = 0x00;
3288 hdev->adv_instance_timeout = 0;
3290 hdev->sniff_max_interval = 800;
3291 hdev->sniff_min_interval = 80;
3293 hdev->le_adv_channel_map = 0x07;
3294 hdev->le_adv_min_interval = 0x0800;
3295 hdev->le_adv_max_interval = 0x0800;
3296 hdev->le_scan_interval = 0x0060;
3297 hdev->le_scan_window = 0x0030;
3298 hdev->le_conn_min_interval = 0x0028;
3299 hdev->le_conn_max_interval = 0x0038;
3300 hdev->le_conn_latency = 0x0000;
3301 hdev->le_supv_timeout = 0x002a;
3302 hdev->le_def_tx_len = 0x001b;
3303 hdev->le_def_tx_time = 0x0148;
3304 hdev->le_max_tx_len = 0x001b;
3305 hdev->le_max_tx_time = 0x0148;
3306 hdev->le_max_rx_len = 0x001b;
3307 hdev->le_max_rx_time = 0x0148;
3309 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3310 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3311 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3312 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3314 mutex_init(&hdev->lock);
3315 mutex_init(&hdev->req_lock);
3317 INIT_LIST_HEAD(&hdev->mgmt_pending);
3318 INIT_LIST_HEAD(&hdev->blacklist);
3319 INIT_LIST_HEAD(&hdev->whitelist);
3320 INIT_LIST_HEAD(&hdev->uuids);
3321 INIT_LIST_HEAD(&hdev->link_keys);
3322 INIT_LIST_HEAD(&hdev->long_term_keys);
3323 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3324 INIT_LIST_HEAD(&hdev->remote_oob_data);
3325 INIT_LIST_HEAD(&hdev->le_white_list);
3326 INIT_LIST_HEAD(&hdev->le_conn_params);
3327 INIT_LIST_HEAD(&hdev->pend_le_conns);
3328 INIT_LIST_HEAD(&hdev->pend_le_reports);
3329 INIT_LIST_HEAD(&hdev->conn_hash.list);
3330 INIT_LIST_HEAD(&hdev->adv_instances);
3332 INIT_WORK(&hdev->rx_work, hci_rx_work);
3333 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3334 INIT_WORK(&hdev->tx_work, hci_tx_work);
3335 INIT_WORK(&hdev->power_on, hci_power_on);
3336 INIT_WORK(&hdev->error_reset, hci_error_reset);
3338 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3339 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3340 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3341 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3342 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3344 skb_queue_head_init(&hdev->rx_q);
3345 skb_queue_head_init(&hdev->cmd_q);
3346 skb_queue_head_init(&hdev->raw_q);
3348 init_waitqueue_head(&hdev->req_wait_q);
3350 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3352 hci_init_sysfs(hdev);
3353 discovery_init(hdev);
3357 EXPORT_SYMBOL(hci_alloc_dev);
3359 /* Free HCI device */
3360 void hci_free_dev(struct hci_dev *hdev)
3362 /* will free via device release */
3363 put_device(&hdev->dev);
3365 EXPORT_SYMBOL(hci_free_dev);
3367 /* Register HCI device */
3368 int hci_register_dev(struct hci_dev *hdev)
3372 if (!hdev->open || !hdev->close || !hdev->send)
3375 /* Do not allow HCI_AMP devices to register at index 0,
3376 * so the index can be used as the AMP controller ID.
3378 switch (hdev->dev_type) {
3380 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3383 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3392 sprintf(hdev->name, "hci%d", id);
3395 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3397 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3398 WQ_MEM_RECLAIM, 1, hdev->name);
3399 if (!hdev->workqueue) {
3404 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3405 WQ_MEM_RECLAIM, 1, hdev->name);
3406 if (!hdev->req_workqueue) {
3407 destroy_workqueue(hdev->workqueue);
3412 if (!IS_ERR_OR_NULL(bt_debugfs))
3413 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3415 dev_set_name(&hdev->dev, "%s", hdev->name);
3417 error = device_add(&hdev->dev);
3421 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3422 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3425 if (rfkill_register(hdev->rfkill) < 0) {
3426 rfkill_destroy(hdev->rfkill);
3427 hdev->rfkill = NULL;
3431 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3432 hci_dev_set_flag(hdev, HCI_RFKILLED);
3434 hci_dev_set_flag(hdev, HCI_SETUP);
3435 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3437 if (hdev->dev_type == HCI_BREDR) {
3438 /* Assume BR/EDR support until proven otherwise (such as
3439 * through reading supported features during init.
3441 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3444 write_lock(&hci_dev_list_lock);
3445 list_add(&hdev->list, &hci_dev_list);
3446 write_unlock(&hci_dev_list_lock);
3448 /* Devices that are marked for raw-only usage are unconfigured
3449 * and should not be included in normal operation.
3451 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3452 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3454 hci_sock_dev_event(hdev, HCI_DEV_REG);
3457 queue_work(hdev->req_workqueue, &hdev->power_on);
3462 destroy_workqueue(hdev->workqueue);
3463 destroy_workqueue(hdev->req_workqueue);
3465 ida_simple_remove(&hci_index_ida, hdev->id);
3469 EXPORT_SYMBOL(hci_register_dev);
3471 /* Unregister HCI device */
3472 void hci_unregister_dev(struct hci_dev *hdev)
3474 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3476 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3478 write_lock(&hci_dev_list_lock);
3479 list_del(&hdev->list);
3480 write_unlock(&hci_dev_list_lock);
3482 hci_dev_do_close(hdev);
3484 cancel_work_sync(&hdev->power_on);
3486 if (!test_bit(HCI_INIT, &hdev->flags) &&
3487 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3488 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3490 mgmt_index_removed(hdev);
3491 hci_dev_unlock(hdev);
3494 /* mgmt_index_removed should take care of emptying the
3496 BUG_ON(!list_empty(&hdev->mgmt_pending));
3498 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3501 rfkill_unregister(hdev->rfkill);
3502 rfkill_destroy(hdev->rfkill);
3505 device_del(&hdev->dev);
3506 /* Actual cleanup is deferred until hci_cleanup_dev(). */
3509 EXPORT_SYMBOL(hci_unregister_dev);
3511 /* Cleanup HCI device */
3512 void hci_cleanup_dev(struct hci_dev *hdev)
3514 debugfs_remove_recursive(hdev->debugfs);
3516 destroy_workqueue(hdev->workqueue);
3517 destroy_workqueue(hdev->req_workqueue);
3520 hci_bdaddr_list_clear(&hdev->blacklist);
3521 hci_bdaddr_list_clear(&hdev->whitelist);
3522 hci_uuids_clear(hdev);
3523 hci_link_keys_clear(hdev);
3524 hci_smp_ltks_clear(hdev);
3525 hci_smp_irks_clear(hdev);
3526 hci_remote_oob_data_clear(hdev);
3527 hci_adv_instances_clear(hdev);
3528 hci_bdaddr_list_clear(&hdev->le_white_list);
3529 hci_conn_params_clear_all(hdev);
3530 hci_discovery_filter_clear(hdev);
3531 hci_dev_unlock(hdev);
3533 ida_simple_remove(&hci_index_ida, hdev->id);
3536 /* Suspend HCI device */
3537 int hci_suspend_dev(struct hci_dev *hdev)
3539 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3542 EXPORT_SYMBOL(hci_suspend_dev);
3544 /* Resume HCI device */
3545 int hci_resume_dev(struct hci_dev *hdev)
3547 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3550 EXPORT_SYMBOL(hci_resume_dev);
3552 /* Reset HCI device */
3553 int hci_reset_dev(struct hci_dev *hdev)
3555 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3556 struct sk_buff *skb;
3558 skb = bt_skb_alloc(3, GFP_ATOMIC);
3562 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3563 memcpy(skb_put(skb, 3), hw_err, 3);
3565 /* Send Hardware Error to upper stack */
3566 return hci_recv_frame(hdev, skb);
3568 EXPORT_SYMBOL(hci_reset_dev);
3570 /* Receive frame from HCI drivers */
3571 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3573 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3574 && !test_bit(HCI_INIT, &hdev->flags))) {
3579 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3580 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3581 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3587 bt_cb(skb)->incoming = 1;
3590 __net_timestamp(skb);
3592 skb_queue_tail(&hdev->rx_q, skb);
3593 queue_work(hdev->workqueue, &hdev->rx_work);
3597 EXPORT_SYMBOL(hci_recv_frame);
3599 /* Receive diagnostic message from HCI drivers */
3600 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3602 /* Mark as diagnostic packet */
3603 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3606 __net_timestamp(skb);
3608 skb_queue_tail(&hdev->rx_q, skb);
3609 queue_work(hdev->workqueue, &hdev->rx_work);
3613 EXPORT_SYMBOL(hci_recv_diag);
3615 /* ---- Interface to upper protocols ---- */
3617 int hci_register_cb(struct hci_cb *cb)
3619 BT_DBG("%p name %s", cb, cb->name);
3621 mutex_lock(&hci_cb_list_lock);
3622 list_add_tail(&cb->list, &hci_cb_list);
3623 mutex_unlock(&hci_cb_list_lock);
3627 EXPORT_SYMBOL(hci_register_cb);
3629 int hci_unregister_cb(struct hci_cb *cb)
3631 BT_DBG("%p name %s", cb, cb->name);
3633 mutex_lock(&hci_cb_list_lock);
3634 list_del(&cb->list);
3635 mutex_unlock(&hci_cb_list_lock);
3639 EXPORT_SYMBOL(hci_unregister_cb);
3641 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3645 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3648 __net_timestamp(skb);
3650 /* Send copy to monitor */
3651 hci_send_to_monitor(hdev, skb);
3653 if (atomic_read(&hdev->promisc)) {
3654 /* Send copy to the sockets */
3655 hci_send_to_sock(hdev, skb);
3658 /* Get rid of skb owner, prior to sending to the driver. */
3661 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3666 err = hdev->send(hdev, skb);
3668 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3673 /* Send HCI command */
3674 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3677 struct sk_buff *skb;
3679 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3681 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3683 BT_ERR("%s no memory for command", hdev->name);
3687 /* Stand-alone HCI commands must be flagged as
3688 * single-command requests.
3690 bt_cb(skb)->hci.req_start = true;
3692 skb_queue_tail(&hdev->cmd_q, skb);
3693 queue_work(hdev->workqueue, &hdev->cmd_work);
3698 /* Get data from the previously sent command */
3699 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3701 struct hci_command_hdr *hdr;
3703 if (!hdev->sent_cmd)
3706 hdr = (void *) hdev->sent_cmd->data;
3708 if (hdr->opcode != cpu_to_le16(opcode))
3711 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3713 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3716 /* Send HCI command and wait for command commplete event */
3717 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3718 const void *param, u32 timeout)
3720 struct sk_buff *skb;
3722 if (!test_bit(HCI_UP, &hdev->flags))
3723 return ERR_PTR(-ENETDOWN);
3725 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3728 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3729 hci_req_unlock(hdev);
3733 EXPORT_SYMBOL(hci_cmd_sync);
3736 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3738 struct hci_acl_hdr *hdr;
3741 skb_push(skb, HCI_ACL_HDR_SIZE);
3742 skb_reset_transport_header(skb);
3743 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3744 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3745 hdr->dlen = cpu_to_le16(len);
3748 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3749 struct sk_buff *skb, __u16 flags)
3751 struct hci_conn *conn = chan->conn;
3752 struct hci_dev *hdev = conn->hdev;
3753 struct sk_buff *list;
3755 skb->len = skb_headlen(skb);
3758 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3760 switch (hdev->dev_type) {
3762 hci_add_acl_hdr(skb, conn->handle, flags);
3765 hci_add_acl_hdr(skb, chan->handle, flags);
3768 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3772 list = skb_shinfo(skb)->frag_list;
3774 /* Non fragmented */
3775 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3777 skb_queue_tail(queue, skb);
3780 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3782 skb_shinfo(skb)->frag_list = NULL;
3784 /* Queue all fragments atomically. We need to use spin_lock_bh
3785 * here because of 6LoWPAN links, as there this function is
3786 * called from softirq and using normal spin lock could cause
3789 spin_lock_bh(&queue->lock);
3791 __skb_queue_tail(queue, skb);
3793 flags &= ~ACL_START;
3796 skb = list; list = list->next;
3798 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3799 hci_add_acl_hdr(skb, conn->handle, flags);
3801 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3803 __skb_queue_tail(queue, skb);
3806 spin_unlock_bh(&queue->lock);
3810 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3812 struct hci_dev *hdev = chan->conn->hdev;
3814 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3816 hci_queue_acl(chan, &chan->data_q, skb, flags);
3818 queue_work(hdev->workqueue, &hdev->tx_work);
3822 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3824 struct hci_dev *hdev = conn->hdev;
3825 struct hci_sco_hdr hdr;
3827 BT_DBG("%s len %d", hdev->name, skb->len);
3829 hdr.handle = cpu_to_le16(conn->handle);
3830 hdr.dlen = skb->len;
3832 skb_push(skb, HCI_SCO_HDR_SIZE);
3833 skb_reset_transport_header(skb);
3834 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3836 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3838 skb_queue_tail(&conn->data_q, skb);
3839 queue_work(hdev->workqueue, &hdev->tx_work);
3842 /* ---- HCI TX task (outgoing data) ---- */
3844 /* HCI Connection scheduler */
3845 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3848 struct hci_conn_hash *h = &hdev->conn_hash;
3849 struct hci_conn *conn = NULL, *c;
3850 unsigned int num = 0, min = ~0;
3852 /* We don't have to lock device here. Connections are always
3853 * added and removed with TX task disabled. */
3857 list_for_each_entry_rcu(c, &h->list, list) {
3858 if (c->type != type || skb_queue_empty(&c->data_q))
3861 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3866 if (c->sent < min) {
3871 if (hci_conn_num(hdev, type) == num)
3880 switch (conn->type) {
3882 cnt = hdev->acl_cnt;
3886 cnt = hdev->sco_cnt;
3889 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3893 BT_ERR("Unknown link type");
3901 BT_DBG("conn %p quote %d", conn, *quote);
3905 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3907 struct hci_conn_hash *h = &hdev->conn_hash;
3910 BT_ERR("%s link tx timeout", hdev->name);
3914 /* Kill stalled connections */
3915 list_for_each_entry_rcu(c, &h->list, list) {
3916 if (c->type == type && c->sent) {
3917 BT_ERR("%s killing stalled connection %pMR",
3918 hdev->name, &c->dst);
3919 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3926 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3929 struct hci_conn_hash *h = &hdev->conn_hash;
3930 struct hci_chan *chan = NULL;
3931 unsigned int num = 0, min = ~0, cur_prio = 0;
3932 struct hci_conn *conn;
3933 int cnt, q, conn_num = 0;
3935 BT_DBG("%s", hdev->name);
3939 list_for_each_entry_rcu(conn, &h->list, list) {
3940 struct hci_chan *tmp;
3942 if (conn->type != type)
3945 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3950 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3951 struct sk_buff *skb;
3953 if (skb_queue_empty(&tmp->data_q))
3956 skb = skb_peek(&tmp->data_q);
3957 if (skb->priority < cur_prio)
3960 if (skb->priority > cur_prio) {
3963 cur_prio = skb->priority;
3968 if (conn->sent < min) {
3974 if (hci_conn_num(hdev, type) == conn_num)
3983 switch (chan->conn->type) {
3985 cnt = hdev->acl_cnt;
3988 cnt = hdev->block_cnt;
3992 cnt = hdev->sco_cnt;
3995 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3999 BT_ERR("Unknown link type");
4004 BT_DBG("chan %p quote %d", chan, *quote);
4008 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4010 struct hci_conn_hash *h = &hdev->conn_hash;
4011 struct hci_conn *conn;
4014 BT_DBG("%s", hdev->name);
4018 list_for_each_entry_rcu(conn, &h->list, list) {
4019 struct hci_chan *chan;
4021 if (conn->type != type)
4024 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4029 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4030 struct sk_buff *skb;
4037 if (skb_queue_empty(&chan->data_q))
4040 skb = skb_peek(&chan->data_q);
4041 if (skb->priority >= HCI_PRIO_MAX - 1)
4044 skb->priority = HCI_PRIO_MAX - 1;
4046 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4050 if (hci_conn_num(hdev, type) == num)
4058 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4060 /* Calculate count of blocks used by this packet */
4061 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4064 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4066 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4067 /* ACL tx timeout must be longer than maximum
4068 * link supervision timeout (40.9 seconds) */
4069 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4070 HCI_ACL_TX_TIMEOUT))
4071 hci_link_tx_to(hdev, ACL_LINK);
4075 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4077 unsigned int cnt = hdev->acl_cnt;
4078 struct hci_chan *chan;
4079 struct sk_buff *skb;
4082 __check_timeout(hdev, cnt);
4084 while (hdev->acl_cnt &&
4085 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4086 u32 priority = (skb_peek(&chan->data_q))->priority;
4087 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4089 skb->len, skb->priority);
4091 /* Stop if priority has changed */
4092 if (skb->priority < priority)
4095 skb = skb_dequeue(&chan->data_q);
4097 hci_conn_enter_active_mode(chan->conn,
4098 bt_cb(skb)->force_active);
4100 hci_send_frame(hdev, skb);
4101 hdev->acl_last_tx = jiffies;
4109 if (cnt != hdev->acl_cnt)
4110 hci_prio_recalculate(hdev, ACL_LINK);
4113 static void hci_sched_acl_blk(struct hci_dev *hdev)
4115 unsigned int cnt = hdev->block_cnt;
4116 struct hci_chan *chan;
4117 struct sk_buff *skb;
4121 __check_timeout(hdev, cnt);
4123 BT_DBG("%s", hdev->name);
4125 if (hdev->dev_type == HCI_AMP)
4130 while (hdev->block_cnt > 0 &&
4131 (chan = hci_chan_sent(hdev, type, "e))) {
4132 u32 priority = (skb_peek(&chan->data_q))->priority;
4133 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4136 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4137 skb->len, skb->priority);
4139 /* Stop if priority has changed */
4140 if (skb->priority < priority)
4143 skb = skb_dequeue(&chan->data_q);
4145 blocks = __get_blocks(hdev, skb);
4146 if (blocks > hdev->block_cnt)
4149 hci_conn_enter_active_mode(chan->conn,
4150 bt_cb(skb)->force_active);
4152 hci_send_frame(hdev, skb);
4153 hdev->acl_last_tx = jiffies;
4155 hdev->block_cnt -= blocks;
4158 chan->sent += blocks;
4159 chan->conn->sent += blocks;
4163 if (cnt != hdev->block_cnt)
4164 hci_prio_recalculate(hdev, type);
4167 static void hci_sched_acl(struct hci_dev *hdev)
4169 BT_DBG("%s", hdev->name);
4171 /* No ACL link over BR/EDR controller */
4172 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4175 /* No AMP link over AMP controller */
4176 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4179 switch (hdev->flow_ctl_mode) {
4180 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4181 hci_sched_acl_pkt(hdev);
4184 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4185 hci_sched_acl_blk(hdev);
4191 static void hci_sched_sco(struct hci_dev *hdev)
4193 struct hci_conn *conn;
4194 struct sk_buff *skb;
4197 BT_DBG("%s", hdev->name);
4199 if (!hci_conn_num(hdev, SCO_LINK))
4202 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4203 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4204 BT_DBG("skb %p len %d", skb, skb->len);
4205 hci_send_frame(hdev, skb);
4208 if (conn->sent == ~0)
4214 static void hci_sched_esco(struct hci_dev *hdev)
4216 struct hci_conn *conn;
4217 struct sk_buff *skb;
4220 BT_DBG("%s", hdev->name);
4222 if (!hci_conn_num(hdev, ESCO_LINK))
4225 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4227 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4228 BT_DBG("skb %p len %d", skb, skb->len);
4229 hci_send_frame(hdev, skb);
4232 if (conn->sent == ~0)
4238 static void hci_sched_le(struct hci_dev *hdev)
4240 struct hci_chan *chan;
4241 struct sk_buff *skb;
4242 int quote, cnt, tmp;
4244 BT_DBG("%s", hdev->name);
4246 if (!hci_conn_num(hdev, LE_LINK))
4249 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4250 /* LE tx timeout must be longer than maximum
4251 * link supervision timeout (40.9 seconds) */
4252 if (!hdev->le_cnt && hdev->le_pkts &&
4253 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4254 hci_link_tx_to(hdev, LE_LINK);
4257 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4259 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4260 u32 priority = (skb_peek(&chan->data_q))->priority;
4261 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4262 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4263 skb->len, skb->priority);
4265 /* Stop if priority has changed */
4266 if (skb->priority < priority)
4269 skb = skb_dequeue(&chan->data_q);
4271 hci_send_frame(hdev, skb);
4272 hdev->le_last_tx = jiffies;
4283 hdev->acl_cnt = cnt;
4286 hci_prio_recalculate(hdev, LE_LINK);
4289 static void hci_tx_work(struct work_struct *work)
4291 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4292 struct sk_buff *skb;
4294 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4295 hdev->sco_cnt, hdev->le_cnt);
4297 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4298 /* Schedule queues and send stuff to HCI driver */
4299 hci_sched_acl(hdev);
4300 hci_sched_sco(hdev);
4301 hci_sched_esco(hdev);
4305 /* Send next queued raw (unknown type) packet */
4306 while ((skb = skb_dequeue(&hdev->raw_q)))
4307 hci_send_frame(hdev, skb);
4310 /* ----- HCI RX task (incoming data processing) ----- */
4312 /* ACL data packet */
4313 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4315 struct hci_acl_hdr *hdr = (void *) skb->data;
4316 struct hci_conn *conn;
4317 __u16 handle, flags;
4319 skb_pull(skb, HCI_ACL_HDR_SIZE);
4321 handle = __le16_to_cpu(hdr->handle);
4322 flags = hci_flags(handle);
4323 handle = hci_handle(handle);
4325 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4328 hdev->stat.acl_rx++;
4331 conn = hci_conn_hash_lookup_handle(hdev, handle);
4332 hci_dev_unlock(hdev);
4335 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4337 /* Send to upper protocol */
4338 l2cap_recv_acldata(conn, skb, flags);
4341 BT_ERR("%s ACL packet for unknown connection handle %d",
4342 hdev->name, handle);
4348 /* SCO data packet */
4349 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4351 struct hci_sco_hdr *hdr = (void *) skb->data;
4352 struct hci_conn *conn;
4355 skb_pull(skb, HCI_SCO_HDR_SIZE);
4357 handle = __le16_to_cpu(hdr->handle);
4359 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4361 hdev->stat.sco_rx++;
4364 conn = hci_conn_hash_lookup_handle(hdev, handle);
4365 hci_dev_unlock(hdev);
4368 /* Send to upper protocol */
4369 sco_recv_scodata(conn, skb);
4372 BT_ERR("%s SCO packet for unknown connection handle %d",
4373 hdev->name, handle);
4379 static bool hci_req_is_complete(struct hci_dev *hdev)
4381 struct sk_buff *skb;
4383 skb = skb_peek(&hdev->cmd_q);
4387 return bt_cb(skb)->hci.req_start;
4390 static void hci_resend_last(struct hci_dev *hdev)
4392 struct hci_command_hdr *sent;
4393 struct sk_buff *skb;
4396 if (!hdev->sent_cmd)
4399 sent = (void *) hdev->sent_cmd->data;
4400 opcode = __le16_to_cpu(sent->opcode);
4401 if (opcode == HCI_OP_RESET)
4404 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4408 skb_queue_head(&hdev->cmd_q, skb);
4409 queue_work(hdev->workqueue, &hdev->cmd_work);
4412 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4413 hci_req_complete_t *req_complete,
4414 hci_req_complete_skb_t *req_complete_skb)
4416 struct sk_buff *skb;
4417 unsigned long flags;
4419 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4421 /* If the completed command doesn't match the last one that was
4422 * sent we need to do special handling of it.
4424 if (!hci_sent_cmd_data(hdev, opcode)) {
4425 /* Some CSR based controllers generate a spontaneous
4426 * reset complete event during init and any pending
4427 * command will never be completed. In such a case we
4428 * need to resend whatever was the last sent
4431 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4432 hci_resend_last(hdev);
4437 /* If the command succeeded and there's still more commands in
4438 * this request the request is not yet complete.
4440 if (!status && !hci_req_is_complete(hdev))
4443 /* If this was the last command in a request the complete
4444 * callback would be found in hdev->sent_cmd instead of the
4445 * command queue (hdev->cmd_q).
4447 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4448 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4452 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4453 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4457 /* Remove all pending commands belonging to this request */
4458 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4459 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4460 if (bt_cb(skb)->hci.req_start) {
4461 __skb_queue_head(&hdev->cmd_q, skb);
4465 *req_complete = bt_cb(skb)->hci.req_complete;
4466 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4469 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4472 static void hci_rx_work(struct work_struct *work)
4474 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4475 struct sk_buff *skb;
4477 BT_DBG("%s", hdev->name);
4479 while ((skb = skb_dequeue(&hdev->rx_q))) {
4480 /* Send copy to monitor */
4481 hci_send_to_monitor(hdev, skb);
4483 if (atomic_read(&hdev->promisc)) {
4484 /* Send copy to the sockets */
4485 hci_send_to_sock(hdev, skb);
4488 /* If the device has been opened in HCI_USER_CHANNEL,
4489 * the userspace has exclusive access to device.
4490 * When device is HCI_INIT, we still need to process
4491 * the data packets to the driver in order
4492 * to complete its setup().
4494 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4495 !test_bit(HCI_INIT, &hdev->flags)) {
4500 if (test_bit(HCI_INIT, &hdev->flags)) {
4501 /* Don't process data packets in this states. */
4502 switch (bt_cb(skb)->pkt_type) {
4503 case HCI_ACLDATA_PKT:
4504 case HCI_SCODATA_PKT:
4511 switch (bt_cb(skb)->pkt_type) {
4513 BT_DBG("%s Event packet", hdev->name);
4514 hci_event_packet(hdev, skb);
4517 case HCI_ACLDATA_PKT:
4518 BT_DBG("%s ACL data packet", hdev->name);
4519 hci_acldata_packet(hdev, skb);
4522 case HCI_SCODATA_PKT:
4523 BT_DBG("%s SCO data packet", hdev->name);
4524 hci_scodata_packet(hdev, skb);
4534 static void hci_cmd_work(struct work_struct *work)
4536 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4537 struct sk_buff *skb;
4539 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4540 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4542 /* Send queued commands */
4543 if (atomic_read(&hdev->cmd_cnt)) {
4544 skb = skb_dequeue(&hdev->cmd_q);
4548 kfree_skb(hdev->sent_cmd);
4550 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4551 if (hdev->sent_cmd) {
4552 atomic_dec(&hdev->cmd_cnt);
4553 hci_send_frame(hdev, skb);
4554 if (test_bit(HCI_RESET, &hdev->flags))
4555 cancel_delayed_work(&hdev->cmd_timer);
4557 schedule_delayed_work(&hdev->cmd_timer,
4560 skb_queue_head(&hdev->cmd_q, skb);
4561 queue_work(hdev->workqueue, &hdev->cmd_work);