2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
60 /* ---- HCI debugfs entries ---- */
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
65 struct hci_dev *hdev = file->private_data;
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
77 struct hci_dev *hdev = file->private_data;
80 size_t buf_size = min(count, (sizeof(buf)-1));
83 if (!test_bit(HCI_UP, &hdev->flags))
86 if (copy_from_user(buf, user_buf, buf_size))
90 if (strtobool(buf, &enable))
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
96 hci_req_sync_lock(hdev);
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_sync_unlock(hdev);
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
115 static const struct file_operations dut_mode_fops = {
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
125 struct hci_dev *hdev = file->private_data;
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
137 struct hci_dev *hdev = file->private_data;
139 size_t buf_size = min(count, (sizeof(buf)-1));
143 if (copy_from_user(buf, user_buf, buf_size))
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active or in user channel operation, then there is no need
152 * for the vendor callback. Instead just store the desired value and
153 * the setting will be programmed when the controller gets powered on.
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156 (!test_bit(HCI_RUNNING, &hdev->flags) ||
157 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
160 hci_req_sync_lock(hdev);
161 err = hdev->set_diag(hdev, enable);
162 hci_req_sync_unlock(hdev);
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176 static const struct file_operations vendor_diag_fops = {
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
195 BT_DBG("%s %ld", req->hdev->name, opt);
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
203 static void bredr_init(struct hci_request *req)
205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
207 /* Read Local Supported Features */
208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210 /* Read Local Version */
211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213 /* Read BD Address */
214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
217 static void amp_init1(struct hci_request *req)
219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
221 /* Read Local Version */
222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
227 /* Read Local AMP Info */
228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
230 /* Read Data Blk size */
231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
240 static int amp_init2(struct hci_request *req)
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
254 struct hci_dev *hdev = req->hdev;
256 BT_DBG("%s %ld", hdev->name, opt);
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 hci_reset_req(req, 0);
262 switch (hdev->dev_type) {
270 BT_ERR("Unknown device type %d", hdev->dev_type);
277 static void bredr_setup(struct hci_request *req)
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
285 /* Read Class of Device */
286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
288 /* Read Local Name */
289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
291 /* Read Voice Setting */
292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
294 /* Read Number of Supported IAC */
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
297 /* Read Current IAC LAP */
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
300 /* Clear Event Filters */
301 flt_type = HCI_FLT_CLEAR_ALL;
302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
304 /* Connection accept timeout ~20 secs */
305 param = cpu_to_le16(0x7d00);
306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
309 static void le_setup(struct hci_request *req)
311 struct hci_dev *hdev = req->hdev;
313 /* Read LE Buffer Size */
314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
316 /* Read LE Local Supported Features */
317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
319 /* Read LE Supported States */
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev))
324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
327 static void hci_setup_event_mask(struct hci_request *req)
329 struct hci_dev *hdev = req->hdev;
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01; /* Flow Specification Complete */
346 /* Use a different default for LE-only devices */
347 memset(events, 0, sizeof(events));
348 events[1] |= 0x20; /* Command Complete */
349 events[1] |= 0x40; /* Command Status */
350 events[1] |= 0x80; /* Hardware Error */
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10; /* Disconnection Complete */
358 events[2] |= 0x04; /* Number of Completed Packets */
359 events[3] |= 0x02; /* Data Buffer Overflow */
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08; /* Read Remote Version Information
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80; /* Encryption Change */
372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378 events[4] |= 0x02; /* Inquiry Result with RSSI */
380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04; /* Read Remote Extended Features Complete */
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08; /* Synchronous Connection Complete */
385 events[5] |= 0x10; /* Synchronous Connection Changed */
388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20; /* Sniff Subrating */
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80; /* Encryption Key Refresh Complete */
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40; /* Extended Inquiry Result */
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01; /* Enhanced Flush Complete */
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80; /* Link Supervision Timeout Changed */
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01; /* IO Capability Request */
405 events[6] |= 0x02; /* IO Capability Response */
406 events[6] |= 0x04; /* User Confirmation Request */
407 events[6] |= 0x08; /* User Passkey Request */
408 events[6] |= 0x10; /* Remote OOB Data Request */
409 events[6] |= 0x20; /* Simple Pairing Complete */
410 events[7] |= 0x04; /* User Passkey Notification */
411 events[7] |= 0x08; /* Keypress Notification */
412 events[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20; /* LE Meta-Event */
420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
425 struct hci_dev *hdev = req->hdev;
427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
430 if (lmp_bredr_capable(hdev))
433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
435 if (lmp_le_capable(hdev))
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
450 if (lmp_ssp_capable(hdev)) {
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
457 hdev->max_page = 0x01;
459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
465 struct hci_cp_write_eir cp;
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487 if (lmp_inq_tx_pwr_capable(hdev))
488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
507 static void hci_setup_link_policy(struct hci_request *req)
509 struct hci_dev *hdev = req->hdev;
510 struct hci_cp_write_def_link_policy cp;
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
522 cp.policy = cpu_to_le16(link_policy);
523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
526 static void hci_set_le_support(struct hci_request *req)
528 struct hci_dev *hdev = req->hdev;
529 struct hci_cp_write_le_host_supported cp;
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev))
535 memset(&cp, 0, sizeof(cp));
537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
542 if (cp.le != lmp_host_le_capable(hdev))
543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
547 static void hci_set_event_mask_page_2(struct hci_request *req)
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551 bool changed = false;
553 /* If Connectionless Slave Broadcast master role is supported
554 * enable all necessary events for it.
556 if (lmp_csb_master_capable(hdev)) {
557 events[1] |= 0x40; /* Triggered Clock Capture */
558 events[1] |= 0x80; /* Synchronization Train Complete */
559 events[2] |= 0x10; /* Slave Page Response Timeout */
560 events[2] |= 0x20; /* CSB Channel Map Change */
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
567 if (lmp_csb_slave_capable(hdev)) {
568 events[2] |= 0x01; /* Synchronization Train Received */
569 events[2] |= 0x02; /* CSB Receive */
570 events[2] |= 0x04; /* CSB Timeout */
571 events[2] |= 0x08; /* Truncated Page Complete */
575 /* Enable Authenticated Payload Timeout Expired event if supported */
576 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
581 /* Some Broadcom based controllers indicate support for Set Event
582 * Mask Page 2 command, but then actually do not support it. Since
583 * the default value is all bits set to zero, the command is only
584 * required if the event mask has to be changed. In case no change
585 * to the event mask is needed, skip this command.
588 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589 sizeof(events), events);
592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
594 struct hci_dev *hdev = req->hdev;
597 hci_setup_event_mask(req);
599 if (hdev->commands[6] & 0x20 &&
600 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601 struct hci_cp_read_stored_link_key cp;
603 bacpy(&cp.bdaddr, BDADDR_ANY);
605 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
608 if (hdev->commands[5] & 0x10)
609 hci_setup_link_policy(req);
611 if (hdev->commands[8] & 0x01)
612 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
614 /* Some older Broadcom based Bluetooth 1.2 controllers do not
615 * support the Read Page Scan Type command. Check support for
616 * this command in the bit mask of supported commands.
618 if (hdev->commands[13] & 0x01)
619 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
621 if (lmp_le_capable(hdev)) {
624 memset(events, 0, sizeof(events));
626 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627 events[0] |= 0x10; /* LE Long Term Key Request */
629 /* If controller supports the Connection Parameters Request
630 * Link Layer Procedure, enable the corresponding event.
632 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633 events[0] |= 0x20; /* LE Remote Connection
637 /* If the controller supports the Data Length Extension
638 * feature, enable the corresponding event.
640 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641 events[0] |= 0x40; /* LE Data Length Change */
643 /* If the controller supports Extended Scanner Filter
644 * Policies, enable the correspondig event.
646 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647 events[1] |= 0x04; /* LE Direct Advertising
651 /* If the controller supports Channel Selection Algorithm #2
652 * feature, enable the corresponding event.
654 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
655 events[2] |= 0x08; /* LE Channel Selection
659 /* If the controller supports the LE Set Scan Enable command,
660 * enable the corresponding advertising report event.
662 if (hdev->commands[26] & 0x08)
663 events[0] |= 0x02; /* LE Advertising Report */
665 /* If the controller supports the LE Create Connection
666 * command, enable the corresponding event.
668 if (hdev->commands[26] & 0x10)
669 events[0] |= 0x01; /* LE Connection Complete */
671 /* If the controller supports the LE Connection Update
672 * command, enable the corresponding event.
674 if (hdev->commands[27] & 0x04)
675 events[0] |= 0x04; /* LE Connection Update
679 /* If the controller supports the LE Read Remote Used Features
680 * command, enable the corresponding event.
682 if (hdev->commands[27] & 0x20)
683 events[0] |= 0x08; /* LE Read Remote Used
687 /* If the controller supports the LE Read Local P-256
688 * Public Key command, enable the corresponding event.
690 if (hdev->commands[34] & 0x02)
691 events[0] |= 0x80; /* LE Read Local P-256
692 * Public Key Complete
695 /* If the controller supports the LE Generate DHKey
696 * command, enable the corresponding event.
698 if (hdev->commands[34] & 0x04)
699 events[1] |= 0x01; /* LE Generate DHKey Complete */
701 /* If the controller supports the LE Set Default PHY or
702 * LE Set PHY commands, enable the corresponding event.
704 if (hdev->commands[35] & (0x20 | 0x40))
705 events[1] |= 0x08; /* LE PHY Update Complete */
707 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
710 if (hdev->commands[25] & 0x40) {
711 /* Read LE Advertising Channel TX Power */
712 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
715 if (hdev->commands[26] & 0x40) {
716 /* Read LE White List Size */
717 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
721 if (hdev->commands[26] & 0x80) {
722 /* Clear LE White List */
723 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
726 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
727 /* Read LE Maximum Data Length */
728 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
730 /* Read LE Suggested Default Data Length */
731 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
734 hci_set_le_support(req);
737 /* Read features beyond page 1 if available */
738 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
739 struct hci_cp_read_local_ext_features cp;
742 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
749 static int hci_init4_req(struct hci_request *req, unsigned long opt)
751 struct hci_dev *hdev = req->hdev;
753 /* Some Broadcom based Bluetooth controllers do not support the
754 * Delete Stored Link Key command. They are clearly indicating its
755 * absence in the bit mask of supported commands.
757 * Check the supported commands and only if the the command is marked
758 * as supported send it. If not supported assume that the controller
759 * does not have actual support for stored link keys which makes this
760 * command redundant anyway.
762 * Some controllers indicate that they support handling deleting
763 * stored link keys, but they don't. The quirk lets a driver
764 * just disable this command.
766 if (hdev->commands[6] & 0x80 &&
767 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
768 struct hci_cp_delete_stored_link_key cp;
770 bacpy(&cp.bdaddr, BDADDR_ANY);
771 cp.delete_all = 0x01;
772 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
776 /* Set event mask page 2 if the HCI command for it is supported */
777 if (hdev->commands[22] & 0x04)
778 hci_set_event_mask_page_2(req);
780 /* Read local codec list if the HCI command is supported */
781 if (hdev->commands[29] & 0x20)
782 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
784 /* Get MWS transport configuration if the HCI command is supported */
785 if (hdev->commands[30] & 0x08)
786 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
788 /* Check for Synchronization Train support */
789 if (lmp_sync_train_capable(hdev))
790 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
792 /* Enable Secure Connections if supported and configured */
793 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
794 bredr_sc_enabled(hdev)) {
797 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
798 sizeof(support), &support);
801 /* Set Suggested Default Data Length to maximum if supported */
802 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
803 struct hci_cp_le_write_def_data_len cp;
805 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
806 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
807 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
810 /* Set Default PHY parameters if command is supported */
811 if (hdev->commands[35] & 0x20) {
812 struct hci_cp_le_set_default_phy cp;
814 /* No transmitter PHY or receiver PHY preferences */
819 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
825 static int __hci_init(struct hci_dev *hdev)
829 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
833 if (hci_dev_test_flag(hdev, HCI_SETUP))
834 hci_debugfs_create_basic(hdev);
836 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
840 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
841 * BR/EDR/LE type controllers. AMP controllers only need the
842 * first two stages of init.
844 if (hdev->dev_type != HCI_PRIMARY)
847 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
851 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
855 /* This function is only called when the controller is actually in
856 * configured state. When the controller is marked as unconfigured,
857 * this initialization procedure is not run.
859 * It means that it is possible that a controller runs through its
860 * setup phase and then discovers missing settings. If that is the
861 * case, then this function will not be called. It then will only
862 * be called during the config phase.
864 * So only when in setup phase or config phase, create the debugfs
865 * entries and register the SMP channels.
867 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
868 !hci_dev_test_flag(hdev, HCI_CONFIG))
871 hci_debugfs_create_common(hdev);
873 if (lmp_bredr_capable(hdev))
874 hci_debugfs_create_bredr(hdev);
876 if (lmp_le_capable(hdev))
877 hci_debugfs_create_le(hdev);
882 static int hci_init0_req(struct hci_request *req, unsigned long opt)
884 struct hci_dev *hdev = req->hdev;
886 BT_DBG("%s %ld", hdev->name, opt);
889 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
890 hci_reset_req(req, 0);
892 /* Read Local Version */
893 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
895 /* Read BD Address */
896 if (hdev->set_bdaddr)
897 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
902 static int __hci_unconf_init(struct hci_dev *hdev)
906 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
909 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
913 if (hci_dev_test_flag(hdev, HCI_SETUP))
914 hci_debugfs_create_basic(hdev);
919 static int hci_scan_req(struct hci_request *req, unsigned long opt)
923 BT_DBG("%s %x", req->hdev->name, scan);
925 /* Inquiry and Page scans */
926 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
930 static int hci_auth_req(struct hci_request *req, unsigned long opt)
934 BT_DBG("%s %x", req->hdev->name, auth);
937 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
941 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
945 BT_DBG("%s %x", req->hdev->name, encrypt);
948 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
952 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
954 __le16 policy = cpu_to_le16(opt);
956 BT_DBG("%s %x", req->hdev->name, policy);
958 /* Default link policy */
959 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
963 /* Get HCI device by index.
964 * Device is held on return. */
965 struct hci_dev *hci_dev_get(int index)
967 struct hci_dev *hdev = NULL, *d;
974 read_lock(&hci_dev_list_lock);
975 list_for_each_entry(d, &hci_dev_list, list) {
976 if (d->id == index) {
977 hdev = hci_dev_hold(d);
981 read_unlock(&hci_dev_list_lock);
985 /* ---- Inquiry support ---- */
987 bool hci_discovery_active(struct hci_dev *hdev)
989 struct discovery_state *discov = &hdev->discovery;
991 switch (discov->state) {
992 case DISCOVERY_FINDING:
993 case DISCOVERY_RESOLVING:
1001 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1003 int old_state = hdev->discovery.state;
1005 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1007 if (old_state == state)
1010 hdev->discovery.state = state;
1013 case DISCOVERY_STOPPED:
1014 hci_update_background_scan(hdev);
1016 if (old_state != DISCOVERY_STARTING)
1017 mgmt_discovering(hdev, 0);
1019 case DISCOVERY_STARTING:
1021 case DISCOVERY_FINDING:
1022 mgmt_discovering(hdev, 1);
1024 case DISCOVERY_RESOLVING:
1026 case DISCOVERY_STOPPING:
1031 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1033 struct discovery_state *cache = &hdev->discovery;
1034 struct inquiry_entry *p, *n;
1036 list_for_each_entry_safe(p, n, &cache->all, all) {
1041 INIT_LIST_HEAD(&cache->unknown);
1042 INIT_LIST_HEAD(&cache->resolve);
1045 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1048 struct discovery_state *cache = &hdev->discovery;
1049 struct inquiry_entry *e;
1051 BT_DBG("cache %p, %pMR", cache, bdaddr);
1053 list_for_each_entry(e, &cache->all, all) {
1054 if (!bacmp(&e->data.bdaddr, bdaddr))
1061 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1064 struct discovery_state *cache = &hdev->discovery;
1065 struct inquiry_entry *e;
1067 BT_DBG("cache %p, %pMR", cache, bdaddr);
1069 list_for_each_entry(e, &cache->unknown, list) {
1070 if (!bacmp(&e->data.bdaddr, bdaddr))
1077 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1081 struct discovery_state *cache = &hdev->discovery;
1082 struct inquiry_entry *e;
1084 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1086 list_for_each_entry(e, &cache->resolve, list) {
1087 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1089 if (!bacmp(&e->data.bdaddr, bdaddr))
1096 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1097 struct inquiry_entry *ie)
1099 struct discovery_state *cache = &hdev->discovery;
1100 struct list_head *pos = &cache->resolve;
1101 struct inquiry_entry *p;
1103 list_del(&ie->list);
1105 list_for_each_entry(p, &cache->resolve, list) {
1106 if (p->name_state != NAME_PENDING &&
1107 abs(p->data.rssi) >= abs(ie->data.rssi))
1112 list_add(&ie->list, pos);
1115 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct inquiry_entry *ie;
1122 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1124 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1126 if (!data->ssp_mode)
1127 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1129 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1131 if (!ie->data.ssp_mode)
1132 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1134 if (ie->name_state == NAME_NEEDED &&
1135 data->rssi != ie->data.rssi) {
1136 ie->data.rssi = data->rssi;
1137 hci_inquiry_cache_update_resolve(hdev, ie);
1143 /* Entry not in the cache. Add new one. */
1144 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1146 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1150 list_add(&ie->all, &cache->all);
1153 ie->name_state = NAME_KNOWN;
1155 ie->name_state = NAME_NOT_KNOWN;
1156 list_add(&ie->list, &cache->unknown);
1160 if (name_known && ie->name_state != NAME_KNOWN &&
1161 ie->name_state != NAME_PENDING) {
1162 ie->name_state = NAME_KNOWN;
1163 list_del(&ie->list);
1166 memcpy(&ie->data, data, sizeof(*data));
1167 ie->timestamp = jiffies;
1168 cache->timestamp = jiffies;
1170 if (ie->name_state == NAME_NOT_KNOWN)
1171 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1177 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1179 struct discovery_state *cache = &hdev->discovery;
1180 struct inquiry_info *info = (struct inquiry_info *) buf;
1181 struct inquiry_entry *e;
1184 list_for_each_entry(e, &cache->all, all) {
1185 struct inquiry_data *data = &e->data;
1190 bacpy(&info->bdaddr, &data->bdaddr);
1191 info->pscan_rep_mode = data->pscan_rep_mode;
1192 info->pscan_period_mode = data->pscan_period_mode;
1193 info->pscan_mode = data->pscan_mode;
1194 memcpy(info->dev_class, data->dev_class, 3);
1195 info->clock_offset = data->clock_offset;
1201 BT_DBG("cache %p, copied %d", cache, copied);
1205 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1207 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1208 struct hci_dev *hdev = req->hdev;
1209 struct hci_cp_inquiry cp;
1211 BT_DBG("%s", hdev->name);
1213 if (test_bit(HCI_INQUIRY, &hdev->flags))
1217 memcpy(&cp.lap, &ir->lap, 3);
1218 cp.length = ir->length;
1219 cp.num_rsp = ir->num_rsp;
1220 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1225 int hci_inquiry(void __user *arg)
1227 __u8 __user *ptr = arg;
1228 struct hci_inquiry_req ir;
1229 struct hci_dev *hdev;
1230 int err = 0, do_inquiry = 0, max_rsp;
1234 if (copy_from_user(&ir, ptr, sizeof(ir)))
1237 hdev = hci_dev_get(ir.dev_id);
1241 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1246 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1251 if (hdev->dev_type != HCI_PRIMARY) {
1256 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1261 /* Restrict maximum inquiry length to 60 seconds */
1262 if (ir.length > 60) {
1268 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1269 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1270 hci_inquiry_cache_flush(hdev);
1273 hci_dev_unlock(hdev);
1275 timeo = ir.length * msecs_to_jiffies(2000);
1278 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1283 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1284 * cleared). If it is interrupted by a signal, return -EINTR.
1286 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1287 TASK_INTERRUPTIBLE)) {
1293 /* for unlimited number of responses we will use buffer with
1296 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1298 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1299 * copy it to the user space.
1301 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1308 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1309 hci_dev_unlock(hdev);
1311 BT_DBG("num_rsp %d", ir.num_rsp);
1313 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1315 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1328 static int hci_dev_do_open(struct hci_dev *hdev)
1332 BT_DBG("%s %p", hdev->name, hdev);
1334 hci_req_sync_lock(hdev);
1336 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1341 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1342 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1343 /* Check for rfkill but allow the HCI setup stage to
1344 * proceed (which in itself doesn't cause any RF activity).
1346 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1351 /* Check for valid public address or a configured static
1352 * random adddress, but let the HCI setup proceed to
1353 * be able to determine if there is a public address
1356 * In case of user channel usage, it is not important
1357 * if a public address or static random address is
1360 * This check is only valid for BR/EDR controllers
1361 * since AMP controllers do not have an address.
1363 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1364 hdev->dev_type == HCI_PRIMARY &&
1365 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1366 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1367 ret = -EADDRNOTAVAIL;
1372 if (test_bit(HCI_UP, &hdev->flags)) {
1377 if (hdev->open(hdev)) {
1382 set_bit(HCI_RUNNING, &hdev->flags);
1383 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1385 atomic_set(&hdev->cmd_cnt, 1);
1386 set_bit(HCI_INIT, &hdev->flags);
1388 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1389 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1392 ret = hdev->setup(hdev);
1394 /* The transport driver can set these quirks before
1395 * creating the HCI device or in its setup callback.
1397 * In case any of them is set, the controller has to
1398 * start up as unconfigured.
1400 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1404 /* For an unconfigured controller it is required to
1405 * read at least the version information provided by
1406 * the Read Local Version Information command.
1408 * If the set_bdaddr driver callback is provided, then
1409 * also the original Bluetooth public device address
1410 * will be read using the Read BD Address command.
1412 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413 ret = __hci_unconf_init(hdev);
1416 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417 /* If public address change is configured, ensure that
1418 * the address gets programmed. If the driver does not
1419 * support changing the public address, fail the power
1422 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1424 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1426 ret = -EADDRNOTAVAIL;
1430 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1432 ret = __hci_init(hdev);
1433 if (!ret && hdev->post_init)
1434 ret = hdev->post_init(hdev);
1438 /* If the HCI Reset command is clearing all diagnostic settings,
1439 * then they need to be reprogrammed after the init procedure
1442 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1443 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1444 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1445 ret = hdev->set_diag(hdev, true);
1447 clear_bit(HCI_INIT, &hdev->flags);
1451 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1452 set_bit(HCI_UP, &hdev->flags);
1453 hci_sock_dev_event(hdev, HCI_DEV_UP);
1454 hci_leds_update_powered(hdev, true);
1455 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1456 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1457 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1458 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1459 hci_dev_test_flag(hdev, HCI_MGMT) &&
1460 hdev->dev_type == HCI_PRIMARY) {
1461 ret = __hci_req_hci_power_on(hdev);
1462 mgmt_power_on(hdev, ret);
1465 /* Init failed, cleanup */
1466 flush_work(&hdev->tx_work);
1468 /* Since hci_rx_work() is possible to awake new cmd_work
1469 * it should be flushed first to avoid unexpected call of
1472 flush_work(&hdev->rx_work);
1473 flush_work(&hdev->cmd_work);
1475 skb_queue_purge(&hdev->cmd_q);
1476 skb_queue_purge(&hdev->rx_q);
1481 if (hdev->sent_cmd) {
1482 kfree_skb(hdev->sent_cmd);
1483 hdev->sent_cmd = NULL;
1486 clear_bit(HCI_RUNNING, &hdev->flags);
1487 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1490 hdev->flags &= BIT(HCI_RAW);
1494 hci_req_sync_unlock(hdev);
1498 /* ---- HCI ioctl helpers ---- */
1500 int hci_dev_open(__u16 dev)
1502 struct hci_dev *hdev;
1505 hdev = hci_dev_get(dev);
1509 /* Devices that are marked as unconfigured can only be powered
1510 * up as user channel. Trying to bring them up as normal devices
1511 * will result into a failure. Only user channel operation is
1514 * When this function is called for a user channel, the flag
1515 * HCI_USER_CHANNEL will be set first before attempting to
1518 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1519 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1524 /* We need to ensure that no other power on/off work is pending
1525 * before proceeding to call hci_dev_do_open. This is
1526 * particularly important if the setup procedure has not yet
1529 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1530 cancel_delayed_work(&hdev->power_off);
1532 /* After this call it is guaranteed that the setup procedure
1533 * has finished. This means that error conditions like RFKILL
1534 * or no valid public or static random address apply.
1536 flush_workqueue(hdev->req_workqueue);
1538 /* For controllers not using the management interface and that
1539 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1540 * so that pairing works for them. Once the management interface
1541 * is in use this bit will be cleared again and userspace has
1542 * to explicitly enable it.
1544 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1545 !hci_dev_test_flag(hdev, HCI_MGMT))
1546 hci_dev_set_flag(hdev, HCI_BONDABLE);
1548 err = hci_dev_do_open(hdev);
1555 /* This function requires the caller holds hdev->lock */
1556 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1558 struct hci_conn_params *p;
1560 list_for_each_entry(p, &hdev->le_conn_params, list) {
1562 hci_conn_drop(p->conn);
1563 hci_conn_put(p->conn);
1566 list_del_init(&p->action);
1569 BT_DBG("All LE pending actions cleared");
1572 int hci_dev_do_close(struct hci_dev *hdev)
1576 BT_DBG("%s %p", hdev->name, hdev);
1578 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1579 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1580 test_bit(HCI_UP, &hdev->flags)) {
1581 /* Execute vendor specific shutdown routine */
1583 hdev->shutdown(hdev);
1586 cancel_delayed_work(&hdev->power_off);
1588 hci_request_cancel_all(hdev);
1589 hci_req_sync_lock(hdev);
1591 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1592 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593 test_bit(HCI_UP, &hdev->flags)) {
1594 /* Execute vendor specific shutdown routine */
1596 hdev->shutdown(hdev);
1599 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1600 cancel_delayed_work_sync(&hdev->cmd_timer);
1601 hci_req_sync_unlock(hdev);
1605 hci_leds_update_powered(hdev, false);
1607 /* Flush RX and TX works */
1608 flush_work(&hdev->tx_work);
1609 flush_work(&hdev->rx_work);
1611 if (hdev->discov_timeout > 0) {
1612 hdev->discov_timeout = 0;
1613 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1614 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1617 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1618 cancel_delayed_work(&hdev->service_cache);
1620 if (hci_dev_test_flag(hdev, HCI_MGMT))
1621 cancel_delayed_work_sync(&hdev->rpa_expired);
1623 /* Avoid potential lockdep warnings from the *_flush() calls by
1624 * ensuring the workqueue is empty up front.
1626 drain_workqueue(hdev->workqueue);
1630 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1632 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1634 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1635 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1636 hci_dev_test_flag(hdev, HCI_MGMT))
1637 __mgmt_power_off(hdev);
1639 hci_inquiry_cache_flush(hdev);
1640 hci_pend_le_actions_clear(hdev);
1641 hci_conn_hash_flush(hdev);
1642 hci_dev_unlock(hdev);
1644 smp_unregister(hdev);
1646 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1652 skb_queue_purge(&hdev->cmd_q);
1653 atomic_set(&hdev->cmd_cnt, 1);
1654 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1655 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1656 set_bit(HCI_INIT, &hdev->flags);
1657 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1658 clear_bit(HCI_INIT, &hdev->flags);
1661 /* flush cmd work */
1662 flush_work(&hdev->cmd_work);
1665 skb_queue_purge(&hdev->rx_q);
1666 skb_queue_purge(&hdev->cmd_q);
1667 skb_queue_purge(&hdev->raw_q);
1669 /* Drop last sent command */
1670 if (hdev->sent_cmd) {
1671 cancel_delayed_work_sync(&hdev->cmd_timer);
1672 kfree_skb(hdev->sent_cmd);
1673 hdev->sent_cmd = NULL;
1676 clear_bit(HCI_RUNNING, &hdev->flags);
1677 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1679 /* After this point our queues are empty
1680 * and no tasks are scheduled. */
1684 hdev->flags &= BIT(HCI_RAW);
1685 hci_dev_clear_volatile_flags(hdev);
1687 /* Controller radio is available but is currently powered down */
1688 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1690 memset(hdev->eir, 0, sizeof(hdev->eir));
1691 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1692 bacpy(&hdev->random_addr, BDADDR_ANY);
1694 hci_req_sync_unlock(hdev);
1700 int hci_dev_close(__u16 dev)
1702 struct hci_dev *hdev;
1705 hdev = hci_dev_get(dev);
1709 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1714 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1715 cancel_delayed_work(&hdev->power_off);
1717 err = hci_dev_do_close(hdev);
1724 static int hci_dev_do_reset(struct hci_dev *hdev)
1728 BT_DBG("%s %p", hdev->name, hdev);
1730 hci_req_sync_lock(hdev);
1733 skb_queue_purge(&hdev->rx_q);
1734 skb_queue_purge(&hdev->cmd_q);
1736 /* Avoid potential lockdep warnings from the *_flush() calls by
1737 * ensuring the workqueue is empty up front.
1739 drain_workqueue(hdev->workqueue);
1742 hci_inquiry_cache_flush(hdev);
1743 hci_conn_hash_flush(hdev);
1744 hci_dev_unlock(hdev);
1749 atomic_set(&hdev->cmd_cnt, 1);
1750 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1752 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1754 hci_req_sync_unlock(hdev);
1758 int hci_dev_reset(__u16 dev)
1760 struct hci_dev *hdev;
1763 hdev = hci_dev_get(dev);
1767 if (!test_bit(HCI_UP, &hdev->flags)) {
1772 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1777 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1782 err = hci_dev_do_reset(hdev);
1789 int hci_dev_reset_stat(__u16 dev)
1791 struct hci_dev *hdev;
1794 hdev = hci_dev_get(dev);
1798 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1803 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1808 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1815 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1817 bool conn_changed, discov_changed;
1819 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1821 if ((scan & SCAN_PAGE))
1822 conn_changed = !hci_dev_test_and_set_flag(hdev,
1825 conn_changed = hci_dev_test_and_clear_flag(hdev,
1828 if ((scan & SCAN_INQUIRY)) {
1829 discov_changed = !hci_dev_test_and_set_flag(hdev,
1832 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1833 discov_changed = hci_dev_test_and_clear_flag(hdev,
1837 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1840 if (conn_changed || discov_changed) {
1841 /* In case this was disabled through mgmt */
1842 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1844 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1845 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1847 mgmt_new_settings(hdev);
1851 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1853 struct hci_dev *hdev;
1854 struct hci_dev_req dr;
1857 if (copy_from_user(&dr, arg, sizeof(dr)))
1860 hdev = hci_dev_get(dr.dev_id);
1864 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1869 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1874 if (hdev->dev_type != HCI_PRIMARY) {
1879 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1886 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1887 HCI_INIT_TIMEOUT, NULL);
1891 if (!lmp_encrypt_capable(hdev)) {
1896 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1897 /* Auth must be enabled first */
1898 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1899 HCI_INIT_TIMEOUT, NULL);
1904 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1905 HCI_INIT_TIMEOUT, NULL);
1909 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1910 HCI_INIT_TIMEOUT, NULL);
1912 /* Ensure that the connectable and discoverable states
1913 * get correctly modified as this was a non-mgmt change.
1916 hci_update_scan_state(hdev, dr.dev_opt);
1920 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1921 HCI_INIT_TIMEOUT, NULL);
1924 case HCISETLINKMODE:
1925 hdev->link_mode = ((__u16) dr.dev_opt) &
1926 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1930 hdev->pkt_type = (__u16) dr.dev_opt;
1934 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1935 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1939 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1940 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1953 int hci_get_dev_list(void __user *arg)
1955 struct hci_dev *hdev;
1956 struct hci_dev_list_req *dl;
1957 struct hci_dev_req *dr;
1958 int n = 0, size, err;
1961 if (get_user(dev_num, (__u16 __user *) arg))
1964 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1967 size = sizeof(*dl) + dev_num * sizeof(*dr);
1969 dl = kzalloc(size, GFP_KERNEL);
1975 read_lock(&hci_dev_list_lock);
1976 list_for_each_entry(hdev, &hci_dev_list, list) {
1977 unsigned long flags = hdev->flags;
1979 /* When the auto-off is configured it means the transport
1980 * is running, but in that case still indicate that the
1981 * device is actually down.
1983 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1984 flags &= ~BIT(HCI_UP);
1986 (dr + n)->dev_id = hdev->id;
1987 (dr + n)->dev_opt = flags;
1992 read_unlock(&hci_dev_list_lock);
1995 size = sizeof(*dl) + n * sizeof(*dr);
1997 err = copy_to_user(arg, dl, size);
2000 return err ? -EFAULT : 0;
2003 int hci_get_dev_info(void __user *arg)
2005 struct hci_dev *hdev;
2006 struct hci_dev_info di;
2007 unsigned long flags;
2010 if (copy_from_user(&di, arg, sizeof(di)))
2013 hdev = hci_dev_get(di.dev_id);
2017 /* When the auto-off is configured it means the transport
2018 * is running, but in that case still indicate that the
2019 * device is actually down.
2021 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2022 flags = hdev->flags & ~BIT(HCI_UP);
2024 flags = hdev->flags;
2026 strcpy(di.name, hdev->name);
2027 di.bdaddr = hdev->bdaddr;
2028 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2030 di.pkt_type = hdev->pkt_type;
2031 if (lmp_bredr_capable(hdev)) {
2032 di.acl_mtu = hdev->acl_mtu;
2033 di.acl_pkts = hdev->acl_pkts;
2034 di.sco_mtu = hdev->sco_mtu;
2035 di.sco_pkts = hdev->sco_pkts;
2037 di.acl_mtu = hdev->le_mtu;
2038 di.acl_pkts = hdev->le_pkts;
2042 di.link_policy = hdev->link_policy;
2043 di.link_mode = hdev->link_mode;
2045 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2046 memcpy(&di.features, &hdev->features, sizeof(di.features));
2048 if (copy_to_user(arg, &di, sizeof(di)))
2056 /* ---- Interface to HCI drivers ---- */
2058 static int hci_rfkill_set_block(void *data, bool blocked)
2060 struct hci_dev *hdev = data;
2062 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2064 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2068 hci_dev_set_flag(hdev, HCI_RFKILLED);
2069 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2070 !hci_dev_test_flag(hdev, HCI_CONFIG))
2071 hci_dev_do_close(hdev);
2073 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2079 static const struct rfkill_ops hci_rfkill_ops = {
2080 .set_block = hci_rfkill_set_block,
2083 static void hci_power_on(struct work_struct *work)
2085 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2088 BT_DBG("%s", hdev->name);
2090 if (test_bit(HCI_UP, &hdev->flags) &&
2091 hci_dev_test_flag(hdev, HCI_MGMT) &&
2092 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2093 cancel_delayed_work(&hdev->power_off);
2094 hci_req_sync_lock(hdev);
2095 err = __hci_req_hci_power_on(hdev);
2096 hci_req_sync_unlock(hdev);
2097 mgmt_power_on(hdev, err);
2101 err = hci_dev_do_open(hdev);
2104 mgmt_set_powered_failed(hdev, err);
2105 hci_dev_unlock(hdev);
2109 /* During the HCI setup phase, a few error conditions are
2110 * ignored and they need to be checked now. If they are still
2111 * valid, it is important to turn the device back off.
2113 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2114 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2115 (hdev->dev_type == HCI_PRIMARY &&
2116 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2117 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2118 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2119 hci_dev_do_close(hdev);
2120 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2121 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2122 HCI_AUTO_OFF_TIMEOUT);
2125 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2126 /* For unconfigured devices, set the HCI_RAW flag
2127 * so that userspace can easily identify them.
2129 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2130 set_bit(HCI_RAW, &hdev->flags);
2132 /* For fully configured devices, this will send
2133 * the Index Added event. For unconfigured devices,
2134 * it will send Unconfigued Index Added event.
2136 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2137 * and no event will be send.
2139 mgmt_index_added(hdev);
2140 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2141 /* When the controller is now configured, then it
2142 * is important to clear the HCI_RAW flag.
2144 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2145 clear_bit(HCI_RAW, &hdev->flags);
2147 /* Powering on the controller with HCI_CONFIG set only
2148 * happens with the transition from unconfigured to
2149 * configured. This will send the Index Added event.
2151 mgmt_index_added(hdev);
2155 static void hci_power_off(struct work_struct *work)
2157 struct hci_dev *hdev = container_of(work, struct hci_dev,
2160 BT_DBG("%s", hdev->name);
2162 hci_dev_do_close(hdev);
2165 static void hci_error_reset(struct work_struct *work)
2167 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2169 BT_DBG("%s", hdev->name);
2172 hdev->hw_error(hdev, hdev->hw_error_code);
2174 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2175 hdev->hw_error_code);
2177 if (hci_dev_do_close(hdev))
2180 hci_dev_do_open(hdev);
2183 void hci_uuids_clear(struct hci_dev *hdev)
2185 struct bt_uuid *uuid, *tmp;
2187 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2188 list_del(&uuid->list);
2193 void hci_link_keys_clear(struct hci_dev *hdev)
2195 struct link_key *key;
2197 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2198 list_del_rcu(&key->list);
2199 kfree_rcu(key, rcu);
2203 void hci_smp_ltks_clear(struct hci_dev *hdev)
2207 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2208 list_del_rcu(&k->list);
2213 void hci_smp_irks_clear(struct hci_dev *hdev)
2217 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2218 list_del_rcu(&k->list);
2223 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2228 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2229 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2239 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2240 u8 key_type, u8 old_key_type)
2243 if (key_type < 0x03)
2246 /* Debug keys are insecure so don't store them persistently */
2247 if (key_type == HCI_LK_DEBUG_COMBINATION)
2250 /* Changed combination key and there's no previous one */
2251 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2254 /* Security mode 3 case */
2258 /* BR/EDR key derived using SC from an LE link */
2259 if (conn->type == LE_LINK)
2262 /* Neither local nor remote side had no-bonding as requirement */
2263 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2266 /* Local side had dedicated bonding as requirement */
2267 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2270 /* Remote side had dedicated bonding as requirement */
2271 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2274 /* If none of the above criteria match, then don't store the key
2279 static u8 ltk_role(u8 type)
2281 if (type == SMP_LTK)
2282 return HCI_ROLE_MASTER;
2284 return HCI_ROLE_SLAVE;
2287 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2288 u8 addr_type, u8 role)
2293 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2294 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2297 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2307 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2309 struct smp_irk *irk;
2312 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2313 if (!bacmp(&irk->rpa, rpa)) {
2319 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2320 if (smp_irk_matches(hdev, irk->val, rpa)) {
2321 bacpy(&irk->rpa, rpa);
2331 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2334 struct smp_irk *irk;
2336 /* Identity Address must be public or static random */
2337 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2341 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2342 if (addr_type == irk->addr_type &&
2343 bacmp(bdaddr, &irk->bdaddr) == 0) {
2353 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2354 bdaddr_t *bdaddr, u8 *val, u8 type,
2355 u8 pin_len, bool *persistent)
2357 struct link_key *key, *old_key;
2360 old_key = hci_find_link_key(hdev, bdaddr);
2362 old_key_type = old_key->type;
2365 old_key_type = conn ? conn->key_type : 0xff;
2366 key = kzalloc(sizeof(*key), GFP_KERNEL);
2369 list_add_rcu(&key->list, &hdev->link_keys);
2372 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2374 /* Some buggy controller combinations generate a changed
2375 * combination key for legacy pairing even when there's no
2377 if (type == HCI_LK_CHANGED_COMBINATION &&
2378 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2379 type = HCI_LK_COMBINATION;
2381 conn->key_type = type;
2384 bacpy(&key->bdaddr, bdaddr);
2385 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2386 key->pin_len = pin_len;
2388 if (type == HCI_LK_CHANGED_COMBINATION)
2389 key->type = old_key_type;
2394 *persistent = hci_persistent_key(hdev, conn, type,
2400 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 u8 addr_type, u8 type, u8 authenticated,
2402 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2404 struct smp_ltk *key, *old_key;
2405 u8 role = ltk_role(type);
2407 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2411 key = kzalloc(sizeof(*key), GFP_KERNEL);
2414 list_add_rcu(&key->list, &hdev->long_term_keys);
2417 bacpy(&key->bdaddr, bdaddr);
2418 key->bdaddr_type = addr_type;
2419 memcpy(key->val, tk, sizeof(key->val));
2420 key->authenticated = authenticated;
2423 key->enc_size = enc_size;
2429 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2430 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2432 struct smp_irk *irk;
2434 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2436 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2440 bacpy(&irk->bdaddr, bdaddr);
2441 irk->addr_type = addr_type;
2443 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2446 memcpy(irk->val, val, 16);
2447 bacpy(&irk->rpa, rpa);
2452 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2454 struct link_key *key;
2456 key = hci_find_link_key(hdev, bdaddr);
2460 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462 list_del_rcu(&key->list);
2463 kfree_rcu(key, rcu);
2468 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2473 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2474 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2477 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479 list_del_rcu(&k->list);
2484 return removed ? 0 : -ENOENT;
2487 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2491 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2492 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2495 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2497 list_del_rcu(&k->list);
2502 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2505 struct smp_irk *irk;
2508 if (type == BDADDR_BREDR) {
2509 if (hci_find_link_key(hdev, bdaddr))
2514 /* Convert to HCI addr type which struct smp_ltk uses */
2515 if (type == BDADDR_LE_PUBLIC)
2516 addr_type = ADDR_LE_DEV_PUBLIC;
2518 addr_type = ADDR_LE_DEV_RANDOM;
2520 irk = hci_get_irk(hdev, bdaddr, addr_type);
2522 bdaddr = &irk->bdaddr;
2523 addr_type = irk->addr_type;
2527 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2528 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2538 /* HCI command timer function */
2539 static void hci_cmd_timeout(struct work_struct *work)
2541 struct hci_dev *hdev = container_of(work, struct hci_dev,
2544 if (hdev->sent_cmd) {
2545 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2546 u16 opcode = __le16_to_cpu(sent->opcode);
2548 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2550 BT_ERR("%s command tx timeout", hdev->name);
2553 atomic_set(&hdev->cmd_cnt, 1);
2554 queue_work(hdev->workqueue, &hdev->cmd_work);
2557 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2558 bdaddr_t *bdaddr, u8 bdaddr_type)
2560 struct oob_data *data;
2562 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2563 if (bacmp(bdaddr, &data->bdaddr) != 0)
2565 if (data->bdaddr_type != bdaddr_type)
2573 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2576 struct oob_data *data;
2578 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2582 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2584 list_del(&data->list);
2590 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2592 struct oob_data *data, *n;
2594 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2595 list_del(&data->list);
2600 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2601 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2602 u8 *hash256, u8 *rand256)
2604 struct oob_data *data;
2606 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2608 data = kmalloc(sizeof(*data), GFP_KERNEL);
2612 bacpy(&data->bdaddr, bdaddr);
2613 data->bdaddr_type = bdaddr_type;
2614 list_add(&data->list, &hdev->remote_oob_data);
2617 if (hash192 && rand192) {
2618 memcpy(data->hash192, hash192, sizeof(data->hash192));
2619 memcpy(data->rand192, rand192, sizeof(data->rand192));
2620 if (hash256 && rand256)
2621 data->present = 0x03;
2623 memset(data->hash192, 0, sizeof(data->hash192));
2624 memset(data->rand192, 0, sizeof(data->rand192));
2625 if (hash256 && rand256)
2626 data->present = 0x02;
2628 data->present = 0x00;
2631 if (hash256 && rand256) {
2632 memcpy(data->hash256, hash256, sizeof(data->hash256));
2633 memcpy(data->rand256, rand256, sizeof(data->rand256));
2635 memset(data->hash256, 0, sizeof(data->hash256));
2636 memset(data->rand256, 0, sizeof(data->rand256));
2637 if (hash192 && rand192)
2638 data->present = 0x01;
2641 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2646 /* This function requires the caller holds hdev->lock */
2647 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2649 struct adv_info *adv_instance;
2651 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2652 if (adv_instance->instance == instance)
2653 return adv_instance;
2659 /* This function requires the caller holds hdev->lock */
2660 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2662 struct adv_info *cur_instance;
2664 cur_instance = hci_find_adv_instance(hdev, instance);
2668 if (cur_instance == list_last_entry(&hdev->adv_instances,
2669 struct adv_info, list))
2670 return list_first_entry(&hdev->adv_instances,
2671 struct adv_info, list);
2673 return list_next_entry(cur_instance, list);
2676 /* This function requires the caller holds hdev->lock */
2677 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2679 struct adv_info *adv_instance;
2681 adv_instance = hci_find_adv_instance(hdev, instance);
2685 BT_DBG("%s removing %dMR", hdev->name, instance);
2687 if (hdev->cur_adv_instance == instance) {
2688 if (hdev->adv_instance_timeout) {
2689 cancel_delayed_work(&hdev->adv_instance_expire);
2690 hdev->adv_instance_timeout = 0;
2692 hdev->cur_adv_instance = 0x00;
2695 list_del(&adv_instance->list);
2696 kfree(adv_instance);
2698 hdev->adv_instance_cnt--;
2703 /* This function requires the caller holds hdev->lock */
2704 void hci_adv_instances_clear(struct hci_dev *hdev)
2706 struct adv_info *adv_instance, *n;
2708 if (hdev->adv_instance_timeout) {
2709 cancel_delayed_work(&hdev->adv_instance_expire);
2710 hdev->adv_instance_timeout = 0;
2713 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2714 list_del(&adv_instance->list);
2715 kfree(adv_instance);
2718 hdev->adv_instance_cnt = 0;
2719 hdev->cur_adv_instance = 0x00;
2722 /* This function requires the caller holds hdev->lock */
2723 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2724 u16 adv_data_len, u8 *adv_data,
2725 u16 scan_rsp_len, u8 *scan_rsp_data,
2726 u16 timeout, u16 duration)
2728 struct adv_info *adv_instance;
2730 adv_instance = hci_find_adv_instance(hdev, instance);
2732 memset(adv_instance->adv_data, 0,
2733 sizeof(adv_instance->adv_data));
2734 memset(adv_instance->scan_rsp_data, 0,
2735 sizeof(adv_instance->scan_rsp_data));
2737 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2738 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2741 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2745 adv_instance->pending = true;
2746 adv_instance->instance = instance;
2747 list_add(&adv_instance->list, &hdev->adv_instances);
2748 hdev->adv_instance_cnt++;
2751 adv_instance->flags = flags;
2752 adv_instance->adv_data_len = adv_data_len;
2753 adv_instance->scan_rsp_len = scan_rsp_len;
2756 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2759 memcpy(adv_instance->scan_rsp_data,
2760 scan_rsp_data, scan_rsp_len);
2762 adv_instance->timeout = timeout;
2763 adv_instance->remaining_time = timeout;
2766 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2768 adv_instance->duration = duration;
2770 BT_DBG("%s for %dMR", hdev->name, instance);
2775 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2776 bdaddr_t *bdaddr, u8 type)
2778 struct bdaddr_list *b;
2780 list_for_each_entry(b, bdaddr_list, list) {
2781 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2788 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2790 struct bdaddr_list *b, *n;
2792 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2798 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2800 struct bdaddr_list *entry;
2802 if (!bacmp(bdaddr, BDADDR_ANY))
2805 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2808 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2812 bacpy(&entry->bdaddr, bdaddr);
2813 entry->bdaddr_type = type;
2815 list_add(&entry->list, list);
2820 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2822 struct bdaddr_list *entry;
2824 if (!bacmp(bdaddr, BDADDR_ANY)) {
2825 hci_bdaddr_list_clear(list);
2829 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2833 list_del(&entry->list);
2839 /* This function requires the caller holds hdev->lock */
2840 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2841 bdaddr_t *addr, u8 addr_type)
2843 struct hci_conn_params *params;
2845 list_for_each_entry(params, &hdev->le_conn_params, list) {
2846 if (bacmp(¶ms->addr, addr) == 0 &&
2847 params->addr_type == addr_type) {
2855 /* This function requires the caller holds hdev->lock */
2856 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2857 bdaddr_t *addr, u8 addr_type)
2859 struct hci_conn_params *param;
2861 list_for_each_entry(param, list, action) {
2862 if (bacmp(¶m->addr, addr) == 0 &&
2863 param->addr_type == addr_type)
2870 /* This function requires the caller holds hdev->lock */
2871 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2872 bdaddr_t *addr, u8 addr_type)
2874 struct hci_conn_params *params;
2876 params = hci_conn_params_lookup(hdev, addr, addr_type);
2880 params = kzalloc(sizeof(*params), GFP_KERNEL);
2882 BT_ERR("Out of memory");
2886 bacpy(¶ms->addr, addr);
2887 params->addr_type = addr_type;
2889 list_add(¶ms->list, &hdev->le_conn_params);
2890 INIT_LIST_HEAD(¶ms->action);
2892 params->conn_min_interval = hdev->le_conn_min_interval;
2893 params->conn_max_interval = hdev->le_conn_max_interval;
2894 params->conn_latency = hdev->le_conn_latency;
2895 params->supervision_timeout = hdev->le_supv_timeout;
2896 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2898 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2903 static void hci_conn_params_free(struct hci_conn_params *params)
2906 hci_conn_drop(params->conn);
2907 hci_conn_put(params->conn);
2910 list_del(¶ms->action);
2911 list_del(¶ms->list);
2915 /* This function requires the caller holds hdev->lock */
2916 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2918 struct hci_conn_params *params;
2920 params = hci_conn_params_lookup(hdev, addr, addr_type);
2924 hci_conn_params_free(params);
2926 hci_update_background_scan(hdev);
2928 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2931 /* This function requires the caller holds hdev->lock */
2932 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2934 struct hci_conn_params *params, *tmp;
2936 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2937 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2940 /* If trying to estabilish one time connection to disabled
2941 * device, leave the params, but mark them as just once.
2943 if (params->explicit_connect) {
2944 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2948 list_del(¶ms->list);
2952 BT_DBG("All LE disabled connection parameters were removed");
2955 /* This function requires the caller holds hdev->lock */
2956 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2958 struct hci_conn_params *params, *tmp;
2960 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2961 hci_conn_params_free(params);
2963 BT_DBG("All LE connection parameters were removed");
2966 /* Copy the Identity Address of the controller.
2968 * If the controller has a public BD_ADDR, then by default use that one.
2969 * If this is a LE only controller without a public address, default to
2970 * the static random address.
2972 * For debugging purposes it is possible to force controllers with a
2973 * public address to use the static random address instead.
2975 * In case BR/EDR has been disabled on a dual-mode controller and
2976 * userspace has configured a static address, then that address
2977 * becomes the identity address instead of the public BR/EDR address.
2979 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2982 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2983 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2984 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2985 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2986 bacpy(bdaddr, &hdev->static_addr);
2987 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2989 bacpy(bdaddr, &hdev->bdaddr);
2990 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2994 /* Alloc HCI device */
2995 struct hci_dev *hci_alloc_dev(void)
2997 struct hci_dev *hdev;
2999 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3003 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3004 hdev->esco_type = (ESCO_HV1);
3005 hdev->link_mode = (HCI_LM_ACCEPT);
3006 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3007 hdev->io_capability = 0x03; /* No Input No Output */
3008 hdev->manufacturer = 0xffff; /* Default to internal use */
3009 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3010 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3011 hdev->adv_instance_cnt = 0;
3012 hdev->cur_adv_instance = 0x00;
3013 hdev->adv_instance_timeout = 0;
3015 hdev->sniff_max_interval = 800;
3016 hdev->sniff_min_interval = 80;
3018 hdev->le_adv_channel_map = 0x07;
3019 hdev->le_adv_min_interval = 0x0800;
3020 hdev->le_adv_max_interval = 0x0800;
3021 hdev->le_scan_interval = 0x0060;
3022 hdev->le_scan_window = 0x0030;
3023 hdev->le_conn_min_interval = 0x0018;
3024 hdev->le_conn_max_interval = 0x0028;
3025 hdev->le_conn_latency = 0x0000;
3026 hdev->le_supv_timeout = 0x002a;
3027 hdev->le_def_tx_len = 0x001b;
3028 hdev->le_def_tx_time = 0x0148;
3029 hdev->le_max_tx_len = 0x001b;
3030 hdev->le_max_tx_time = 0x0148;
3031 hdev->le_max_rx_len = 0x001b;
3032 hdev->le_max_rx_time = 0x0148;
3034 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3035 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3036 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3037 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3039 mutex_init(&hdev->lock);
3040 mutex_init(&hdev->req_lock);
3042 INIT_LIST_HEAD(&hdev->mgmt_pending);
3043 INIT_LIST_HEAD(&hdev->blacklist);
3044 INIT_LIST_HEAD(&hdev->whitelist);
3045 INIT_LIST_HEAD(&hdev->uuids);
3046 INIT_LIST_HEAD(&hdev->link_keys);
3047 INIT_LIST_HEAD(&hdev->long_term_keys);
3048 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3049 INIT_LIST_HEAD(&hdev->remote_oob_data);
3050 INIT_LIST_HEAD(&hdev->le_white_list);
3051 INIT_LIST_HEAD(&hdev->le_conn_params);
3052 INIT_LIST_HEAD(&hdev->pend_le_conns);
3053 INIT_LIST_HEAD(&hdev->pend_le_reports);
3054 INIT_LIST_HEAD(&hdev->conn_hash.list);
3055 INIT_LIST_HEAD(&hdev->adv_instances);
3057 INIT_WORK(&hdev->rx_work, hci_rx_work);
3058 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3059 INIT_WORK(&hdev->tx_work, hci_tx_work);
3060 INIT_WORK(&hdev->power_on, hci_power_on);
3061 INIT_WORK(&hdev->error_reset, hci_error_reset);
3063 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3065 skb_queue_head_init(&hdev->rx_q);
3066 skb_queue_head_init(&hdev->cmd_q);
3067 skb_queue_head_init(&hdev->raw_q);
3069 init_waitqueue_head(&hdev->req_wait_q);
3071 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3073 hci_request_setup(hdev);
3075 hci_init_sysfs(hdev);
3076 discovery_init(hdev);
3080 EXPORT_SYMBOL(hci_alloc_dev);
3082 /* Free HCI device */
3083 void hci_free_dev(struct hci_dev *hdev)
3085 /* will free via device release */
3086 put_device(&hdev->dev);
3088 EXPORT_SYMBOL(hci_free_dev);
3090 /* Register HCI device */
3091 int hci_register_dev(struct hci_dev *hdev)
3095 if (!hdev->open || !hdev->close || !hdev->send)
3098 /* Do not allow HCI_AMP devices to register at index 0,
3099 * so the index can be used as the AMP controller ID.
3101 switch (hdev->dev_type) {
3103 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3106 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3115 sprintf(hdev->name, "hci%d", id);
3118 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3120 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3121 if (!hdev->workqueue) {
3126 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3128 if (!hdev->req_workqueue) {
3129 destroy_workqueue(hdev->workqueue);
3134 if (!IS_ERR_OR_NULL(bt_debugfs))
3135 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3137 dev_set_name(&hdev->dev, "%s", hdev->name);
3139 error = device_add(&hdev->dev);
3143 hci_leds_init(hdev);
3145 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3146 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3149 if (rfkill_register(hdev->rfkill) < 0) {
3150 rfkill_destroy(hdev->rfkill);
3151 hdev->rfkill = NULL;
3155 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3156 hci_dev_set_flag(hdev, HCI_RFKILLED);
3158 hci_dev_set_flag(hdev, HCI_SETUP);
3159 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3161 if (hdev->dev_type == HCI_PRIMARY) {
3162 /* Assume BR/EDR support until proven otherwise (such as
3163 * through reading supported features during init.
3165 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3168 write_lock(&hci_dev_list_lock);
3169 list_add(&hdev->list, &hci_dev_list);
3170 write_unlock(&hci_dev_list_lock);
3172 /* Devices that are marked for raw-only usage are unconfigured
3173 * and should not be included in normal operation.
3175 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3176 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3178 hci_sock_dev_event(hdev, HCI_DEV_REG);
3181 queue_work(hdev->req_workqueue, &hdev->power_on);
3186 destroy_workqueue(hdev->workqueue);
3187 destroy_workqueue(hdev->req_workqueue);
3189 ida_simple_remove(&hci_index_ida, hdev->id);
3193 EXPORT_SYMBOL(hci_register_dev);
3195 /* Unregister HCI device */
3196 void hci_unregister_dev(struct hci_dev *hdev)
3198 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3200 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3202 write_lock(&hci_dev_list_lock);
3203 list_del(&hdev->list);
3204 write_unlock(&hci_dev_list_lock);
3206 cancel_work_sync(&hdev->power_on);
3208 hci_dev_do_close(hdev);
3210 if (!test_bit(HCI_INIT, &hdev->flags) &&
3211 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3212 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3214 mgmt_index_removed(hdev);
3215 hci_dev_unlock(hdev);
3218 /* mgmt_index_removed should take care of emptying the
3220 BUG_ON(!list_empty(&hdev->mgmt_pending));
3222 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3225 rfkill_unregister(hdev->rfkill);
3226 rfkill_destroy(hdev->rfkill);
3229 device_del(&hdev->dev);
3230 /* Actual cleanup is deferred until hci_cleanup_dev(). */
3233 EXPORT_SYMBOL(hci_unregister_dev);
3235 /* Cleanup HCI device */
3236 void hci_cleanup_dev(struct hci_dev *hdev)
3238 debugfs_remove_recursive(hdev->debugfs);
3239 kfree_const(hdev->hw_info);
3240 kfree_const(hdev->fw_info);
3242 destroy_workqueue(hdev->workqueue);
3243 destroy_workqueue(hdev->req_workqueue);
3246 hci_bdaddr_list_clear(&hdev->blacklist);
3247 hci_bdaddr_list_clear(&hdev->whitelist);
3248 hci_uuids_clear(hdev);
3249 hci_link_keys_clear(hdev);
3250 hci_smp_ltks_clear(hdev);
3251 hci_smp_irks_clear(hdev);
3252 hci_remote_oob_data_clear(hdev);
3253 hci_adv_instances_clear(hdev);
3254 hci_bdaddr_list_clear(&hdev->le_white_list);
3255 hci_conn_params_clear_all(hdev);
3256 hci_discovery_filter_clear(hdev);
3257 hci_dev_unlock(hdev);
3259 ida_simple_remove(&hci_index_ida, hdev->id);
3262 /* Suspend HCI device */
3263 int hci_suspend_dev(struct hci_dev *hdev)
3265 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3268 EXPORT_SYMBOL(hci_suspend_dev);
3270 /* Resume HCI device */
3271 int hci_resume_dev(struct hci_dev *hdev)
3273 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3276 EXPORT_SYMBOL(hci_resume_dev);
3278 /* Reset HCI device */
3279 int hci_reset_dev(struct hci_dev *hdev)
3281 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3282 struct sk_buff *skb;
3284 skb = bt_skb_alloc(3, GFP_ATOMIC);
3288 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3289 skb_put_data(skb, hw_err, 3);
3291 /* Send Hardware Error to upper stack */
3292 return hci_recv_frame(hdev, skb);
3294 EXPORT_SYMBOL(hci_reset_dev);
3296 /* Receive frame from HCI drivers */
3297 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3299 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3300 && !test_bit(HCI_INIT, &hdev->flags))) {
3305 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3306 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3307 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3313 bt_cb(skb)->incoming = 1;
3316 __net_timestamp(skb);
3318 skb_queue_tail(&hdev->rx_q, skb);
3319 queue_work(hdev->workqueue, &hdev->rx_work);
3323 EXPORT_SYMBOL(hci_recv_frame);
3325 /* Receive diagnostic message from HCI drivers */
3326 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3328 /* Mark as diagnostic packet */
3329 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3332 __net_timestamp(skb);
3334 skb_queue_tail(&hdev->rx_q, skb);
3335 queue_work(hdev->workqueue, &hdev->rx_work);
3339 EXPORT_SYMBOL(hci_recv_diag);
3341 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3345 va_start(vargs, fmt);
3346 kfree_const(hdev->hw_info);
3347 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3350 EXPORT_SYMBOL(hci_set_hw_info);
3352 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3356 va_start(vargs, fmt);
3357 kfree_const(hdev->fw_info);
3358 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3361 EXPORT_SYMBOL(hci_set_fw_info);
3363 /* ---- Interface to upper protocols ---- */
3365 int hci_register_cb(struct hci_cb *cb)
3367 BT_DBG("%p name %s", cb, cb->name);
3369 mutex_lock(&hci_cb_list_lock);
3370 list_add_tail(&cb->list, &hci_cb_list);
3371 mutex_unlock(&hci_cb_list_lock);
3375 EXPORT_SYMBOL(hci_register_cb);
3377 int hci_unregister_cb(struct hci_cb *cb)
3379 BT_DBG("%p name %s", cb, cb->name);
3381 mutex_lock(&hci_cb_list_lock);
3382 list_del(&cb->list);
3383 mutex_unlock(&hci_cb_list_lock);
3387 EXPORT_SYMBOL(hci_unregister_cb);
3389 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3393 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3397 __net_timestamp(skb);
3399 /* Send copy to monitor */
3400 hci_send_to_monitor(hdev, skb);
3402 if (atomic_read(&hdev->promisc)) {
3403 /* Send copy to the sockets */
3404 hci_send_to_sock(hdev, skb);
3407 /* Get rid of skb owner, prior to sending to the driver. */
3410 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3415 err = hdev->send(hdev, skb);
3417 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3422 /* Send HCI command */
3423 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3426 struct sk_buff *skb;
3428 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3430 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3432 BT_ERR("%s no memory for command", hdev->name);
3436 /* Stand-alone HCI commands must be flagged as
3437 * single-command requests.
3439 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3441 skb_queue_tail(&hdev->cmd_q, skb);
3442 queue_work(hdev->workqueue, &hdev->cmd_work);
3447 /* Get data from the previously sent command */
3448 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3450 struct hci_command_hdr *hdr;
3452 if (!hdev->sent_cmd)
3455 hdr = (void *) hdev->sent_cmd->data;
3457 if (hdr->opcode != cpu_to_le16(opcode))
3460 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3462 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3465 /* Send HCI command and wait for command commplete event */
3466 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3467 const void *param, u32 timeout)
3469 struct sk_buff *skb;
3471 if (!test_bit(HCI_UP, &hdev->flags))
3472 return ERR_PTR(-ENETDOWN);
3474 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3476 hci_req_sync_lock(hdev);
3477 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3478 hci_req_sync_unlock(hdev);
3482 EXPORT_SYMBOL(hci_cmd_sync);
3485 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3487 struct hci_acl_hdr *hdr;
3490 skb_push(skb, HCI_ACL_HDR_SIZE);
3491 skb_reset_transport_header(skb);
3492 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3493 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3494 hdr->dlen = cpu_to_le16(len);
3497 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3498 struct sk_buff *skb, __u16 flags)
3500 struct hci_conn *conn = chan->conn;
3501 struct hci_dev *hdev = conn->hdev;
3502 struct sk_buff *list;
3504 skb->len = skb_headlen(skb);
3507 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3509 switch (hdev->dev_type) {
3511 hci_add_acl_hdr(skb, conn->handle, flags);
3514 hci_add_acl_hdr(skb, chan->handle, flags);
3517 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3521 list = skb_shinfo(skb)->frag_list;
3523 /* Non fragmented */
3524 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3526 skb_queue_tail(queue, skb);
3529 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3531 skb_shinfo(skb)->frag_list = NULL;
3533 /* Queue all fragments atomically. We need to use spin_lock_bh
3534 * here because of 6LoWPAN links, as there this function is
3535 * called from softirq and using normal spin lock could cause
3538 spin_lock_bh(&queue->lock);
3540 __skb_queue_tail(queue, skb);
3542 flags &= ~ACL_START;
3545 skb = list; list = list->next;
3547 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3548 hci_add_acl_hdr(skb, conn->handle, flags);
3550 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3552 __skb_queue_tail(queue, skb);
3555 spin_unlock_bh(&queue->lock);
3559 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3561 struct hci_dev *hdev = chan->conn->hdev;
3563 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3565 hci_queue_acl(chan, &chan->data_q, skb, flags);
3567 queue_work(hdev->workqueue, &hdev->tx_work);
3571 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3573 struct hci_dev *hdev = conn->hdev;
3574 struct hci_sco_hdr hdr;
3576 BT_DBG("%s len %d", hdev->name, skb->len);
3578 hdr.handle = cpu_to_le16(conn->handle);
3579 hdr.dlen = skb->len;
3581 skb_push(skb, HCI_SCO_HDR_SIZE);
3582 skb_reset_transport_header(skb);
3583 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3585 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3587 skb_queue_tail(&conn->data_q, skb);
3588 queue_work(hdev->workqueue, &hdev->tx_work);
3591 /* ---- HCI TX task (outgoing data) ---- */
3593 /* HCI Connection scheduler */
3594 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3597 struct hci_conn_hash *h = &hdev->conn_hash;
3598 struct hci_conn *conn = NULL, *c;
3599 unsigned int num = 0, min = ~0;
3601 /* We don't have to lock device here. Connections are always
3602 * added and removed with TX task disabled. */
3606 list_for_each_entry_rcu(c, &h->list, list) {
3607 if (c->type != type || skb_queue_empty(&c->data_q))
3610 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3615 if (c->sent < min) {
3620 if (hci_conn_num(hdev, type) == num)
3629 switch (conn->type) {
3631 cnt = hdev->acl_cnt;
3635 cnt = hdev->sco_cnt;
3638 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3642 BT_ERR("Unknown link type");
3650 BT_DBG("conn %p quote %d", conn, *quote);
3654 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3656 struct hci_conn_hash *h = &hdev->conn_hash;
3659 BT_ERR("%s link tx timeout", hdev->name);
3663 /* Kill stalled connections */
3664 list_for_each_entry_rcu(c, &h->list, list) {
3665 if (c->type == type && c->sent) {
3666 BT_ERR("%s killing stalled connection %pMR",
3667 hdev->name, &c->dst);
3668 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3675 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3678 struct hci_conn_hash *h = &hdev->conn_hash;
3679 struct hci_chan *chan = NULL;
3680 unsigned int num = 0, min = ~0, cur_prio = 0;
3681 struct hci_conn *conn;
3682 int cnt, q, conn_num = 0;
3684 BT_DBG("%s", hdev->name);
3688 list_for_each_entry_rcu(conn, &h->list, list) {
3689 struct hci_chan *tmp;
3691 if (conn->type != type)
3694 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3699 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3700 struct sk_buff *skb;
3702 if (skb_queue_empty(&tmp->data_q))
3705 skb = skb_peek(&tmp->data_q);
3706 if (skb->priority < cur_prio)
3709 if (skb->priority > cur_prio) {
3712 cur_prio = skb->priority;
3717 if (conn->sent < min) {
3723 if (hci_conn_num(hdev, type) == conn_num)
3732 switch (chan->conn->type) {
3734 cnt = hdev->acl_cnt;
3737 cnt = hdev->block_cnt;
3741 cnt = hdev->sco_cnt;
3744 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3748 BT_ERR("Unknown link type");
3753 BT_DBG("chan %p quote %d", chan, *quote);
3757 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3759 struct hci_conn_hash *h = &hdev->conn_hash;
3760 struct hci_conn *conn;
3763 BT_DBG("%s", hdev->name);
3767 list_for_each_entry_rcu(conn, &h->list, list) {
3768 struct hci_chan *chan;
3770 if (conn->type != type)
3773 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3778 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3779 struct sk_buff *skb;
3786 if (skb_queue_empty(&chan->data_q))
3789 skb = skb_peek(&chan->data_q);
3790 if (skb->priority >= HCI_PRIO_MAX - 1)
3793 skb->priority = HCI_PRIO_MAX - 1;
3795 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3799 if (hci_conn_num(hdev, type) == num)
3807 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3809 /* Calculate count of blocks used by this packet */
3810 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3813 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3815 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3816 /* ACL tx timeout must be longer than maximum
3817 * link supervision timeout (40.9 seconds) */
3818 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3819 HCI_ACL_TX_TIMEOUT))
3820 hci_link_tx_to(hdev, ACL_LINK);
3824 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3826 unsigned int cnt = hdev->acl_cnt;
3827 struct hci_chan *chan;
3828 struct sk_buff *skb;
3831 __check_timeout(hdev, cnt);
3833 while (hdev->acl_cnt &&
3834 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3835 u32 priority = (skb_peek(&chan->data_q))->priority;
3836 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3837 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3838 skb->len, skb->priority);
3840 /* Stop if priority has changed */
3841 if (skb->priority < priority)
3844 skb = skb_dequeue(&chan->data_q);
3846 hci_conn_enter_active_mode(chan->conn,
3847 bt_cb(skb)->force_active);
3849 hci_send_frame(hdev, skb);
3850 hdev->acl_last_tx = jiffies;
3858 if (cnt != hdev->acl_cnt)
3859 hci_prio_recalculate(hdev, ACL_LINK);
3862 static void hci_sched_acl_blk(struct hci_dev *hdev)
3864 unsigned int cnt = hdev->block_cnt;
3865 struct hci_chan *chan;
3866 struct sk_buff *skb;
3870 __check_timeout(hdev, cnt);
3872 BT_DBG("%s", hdev->name);
3874 if (hdev->dev_type == HCI_AMP)
3879 while (hdev->block_cnt > 0 &&
3880 (chan = hci_chan_sent(hdev, type, "e))) {
3881 u32 priority = (skb_peek(&chan->data_q))->priority;
3882 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3886 skb->len, skb->priority);
3888 /* Stop if priority has changed */
3889 if (skb->priority < priority)
3892 skb = skb_dequeue(&chan->data_q);
3894 blocks = __get_blocks(hdev, skb);
3895 if (blocks > hdev->block_cnt)
3898 hci_conn_enter_active_mode(chan->conn,
3899 bt_cb(skb)->force_active);
3901 hci_send_frame(hdev, skb);
3902 hdev->acl_last_tx = jiffies;
3904 hdev->block_cnt -= blocks;
3907 chan->sent += blocks;
3908 chan->conn->sent += blocks;
3912 if (cnt != hdev->block_cnt)
3913 hci_prio_recalculate(hdev, type);
3916 static void hci_sched_acl(struct hci_dev *hdev)
3918 BT_DBG("%s", hdev->name);
3920 /* No ACL link over BR/EDR controller */
3921 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3924 /* No AMP link over AMP controller */
3925 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3928 switch (hdev->flow_ctl_mode) {
3929 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3930 hci_sched_acl_pkt(hdev);
3933 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3934 hci_sched_acl_blk(hdev);
3940 static void hci_sched_sco(struct hci_dev *hdev)
3942 struct hci_conn *conn;
3943 struct sk_buff *skb;
3946 BT_DBG("%s", hdev->name);
3948 if (!hci_conn_num(hdev, SCO_LINK))
3951 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3952 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3953 BT_DBG("skb %p len %d", skb, skb->len);
3954 hci_send_frame(hdev, skb);
3957 if (conn->sent == ~0)
3963 static void hci_sched_esco(struct hci_dev *hdev)
3965 struct hci_conn *conn;
3966 struct sk_buff *skb;
3969 BT_DBG("%s", hdev->name);
3971 if (!hci_conn_num(hdev, ESCO_LINK))
3974 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3976 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3977 BT_DBG("skb %p len %d", skb, skb->len);
3978 hci_send_frame(hdev, skb);
3981 if (conn->sent == ~0)
3987 static void hci_sched_le(struct hci_dev *hdev)
3989 struct hci_chan *chan;
3990 struct sk_buff *skb;
3991 int quote, cnt, tmp;
3993 BT_DBG("%s", hdev->name);
3995 if (!hci_conn_num(hdev, LE_LINK))
3998 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3999 /* LE tx timeout must be longer than maximum
4000 * link supervision timeout (40.9 seconds) */
4001 if (!hdev->le_cnt && hdev->le_pkts &&
4002 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4003 hci_link_tx_to(hdev, LE_LINK);
4006 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4008 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4009 u32 priority = (skb_peek(&chan->data_q))->priority;
4010 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4011 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4012 skb->len, skb->priority);
4014 /* Stop if priority has changed */
4015 if (skb->priority < priority)
4018 skb = skb_dequeue(&chan->data_q);
4020 hci_send_frame(hdev, skb);
4021 hdev->le_last_tx = jiffies;
4032 hdev->acl_cnt = cnt;
4035 hci_prio_recalculate(hdev, LE_LINK);
4038 static void hci_tx_work(struct work_struct *work)
4040 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4041 struct sk_buff *skb;
4043 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4044 hdev->sco_cnt, hdev->le_cnt);
4046 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4047 /* Schedule queues and send stuff to HCI driver */
4048 hci_sched_acl(hdev);
4049 hci_sched_sco(hdev);
4050 hci_sched_esco(hdev);
4054 /* Send next queued raw (unknown type) packet */
4055 while ((skb = skb_dequeue(&hdev->raw_q)))
4056 hci_send_frame(hdev, skb);
4059 /* ----- HCI RX task (incoming data processing) ----- */
4061 /* ACL data packet */
4062 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4064 struct hci_acl_hdr *hdr = (void *) skb->data;
4065 struct hci_conn *conn;
4066 __u16 handle, flags;
4068 skb_pull(skb, HCI_ACL_HDR_SIZE);
4070 handle = __le16_to_cpu(hdr->handle);
4071 flags = hci_flags(handle);
4072 handle = hci_handle(handle);
4074 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4077 hdev->stat.acl_rx++;
4080 conn = hci_conn_hash_lookup_handle(hdev, handle);
4081 hci_dev_unlock(hdev);
4084 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4086 /* Send to upper protocol */
4087 l2cap_recv_acldata(conn, skb, flags);
4090 BT_ERR("%s ACL packet for unknown connection handle %d",
4091 hdev->name, handle);
4097 /* SCO data packet */
4098 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4100 struct hci_sco_hdr *hdr = (void *) skb->data;
4101 struct hci_conn *conn;
4104 skb_pull(skb, HCI_SCO_HDR_SIZE);
4106 handle = __le16_to_cpu(hdr->handle);
4108 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4110 hdev->stat.sco_rx++;
4113 conn = hci_conn_hash_lookup_handle(hdev, handle);
4114 hci_dev_unlock(hdev);
4117 /* Send to upper protocol */
4118 sco_recv_scodata(conn, skb);
4121 BT_ERR("%s SCO packet for unknown connection handle %d",
4122 hdev->name, handle);
4128 static bool hci_req_is_complete(struct hci_dev *hdev)
4130 struct sk_buff *skb;
4132 skb = skb_peek(&hdev->cmd_q);
4136 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4139 static void hci_resend_last(struct hci_dev *hdev)
4141 struct hci_command_hdr *sent;
4142 struct sk_buff *skb;
4145 if (!hdev->sent_cmd)
4148 sent = (void *) hdev->sent_cmd->data;
4149 opcode = __le16_to_cpu(sent->opcode);
4150 if (opcode == HCI_OP_RESET)
4153 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4157 skb_queue_head(&hdev->cmd_q, skb);
4158 queue_work(hdev->workqueue, &hdev->cmd_work);
4161 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4162 hci_req_complete_t *req_complete,
4163 hci_req_complete_skb_t *req_complete_skb)
4165 struct sk_buff *skb;
4166 unsigned long flags;
4168 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4170 /* If the completed command doesn't match the last one that was
4171 * sent we need to do special handling of it.
4173 if (!hci_sent_cmd_data(hdev, opcode)) {
4174 /* Some CSR based controllers generate a spontaneous
4175 * reset complete event during init and any pending
4176 * command will never be completed. In such a case we
4177 * need to resend whatever was the last sent
4180 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4181 hci_resend_last(hdev);
4186 /* If the command succeeded and there's still more commands in
4187 * this request the request is not yet complete.
4189 if (!status && !hci_req_is_complete(hdev))
4192 /* If this was the last command in a request the complete
4193 * callback would be found in hdev->sent_cmd instead of the
4194 * command queue (hdev->cmd_q).
4196 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4197 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4201 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4202 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4206 /* Remove all pending commands belonging to this request */
4207 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4208 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4209 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4210 __skb_queue_head(&hdev->cmd_q, skb);
4214 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4215 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4217 *req_complete = bt_cb(skb)->hci.req_complete;
4220 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4223 static void hci_rx_work(struct work_struct *work)
4225 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4226 struct sk_buff *skb;
4228 BT_DBG("%s", hdev->name);
4230 while ((skb = skb_dequeue(&hdev->rx_q))) {
4231 /* Send copy to monitor */
4232 hci_send_to_monitor(hdev, skb);
4234 if (atomic_read(&hdev->promisc)) {
4235 /* Send copy to the sockets */
4236 hci_send_to_sock(hdev, skb);
4239 /* If the device has been opened in HCI_USER_CHANNEL,
4240 * the userspace has exclusive access to device.
4241 * When device is HCI_INIT, we still need to process
4242 * the data packets to the driver in order
4243 * to complete its setup().
4245 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4246 !test_bit(HCI_INIT, &hdev->flags)) {
4251 if (test_bit(HCI_INIT, &hdev->flags)) {
4252 /* Don't process data packets in this states. */
4253 switch (hci_skb_pkt_type(skb)) {
4254 case HCI_ACLDATA_PKT:
4255 case HCI_SCODATA_PKT:
4262 switch (hci_skb_pkt_type(skb)) {
4264 BT_DBG("%s Event packet", hdev->name);
4265 hci_event_packet(hdev, skb);
4268 case HCI_ACLDATA_PKT:
4269 BT_DBG("%s ACL data packet", hdev->name);
4270 hci_acldata_packet(hdev, skb);
4273 case HCI_SCODATA_PKT:
4274 BT_DBG("%s SCO data packet", hdev->name);
4275 hci_scodata_packet(hdev, skb);
4285 static void hci_cmd_work(struct work_struct *work)
4287 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4288 struct sk_buff *skb;
4290 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4291 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4293 /* Send queued commands */
4294 if (atomic_read(&hdev->cmd_cnt)) {
4295 skb = skb_dequeue(&hdev->cmd_q);
4299 kfree_skb(hdev->sent_cmd);
4301 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4302 if (hdev->sent_cmd) {
4303 atomic_dec(&hdev->cmd_cnt);
4304 hci_send_frame(hdev, skb);
4305 if (test_bit(HCI_RESET, &hdev->flags))
4306 cancel_delayed_work(&hdev->cmd_timer);
4308 schedule_delayed_work(&hdev->cmd_timer,
4311 skb_queue_head(&hdev->cmd_q, skb);
4312 queue_work(hdev->workqueue, &hdev->cmd_work);