2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
86 if (!test_bit(HCI_UP, &hdev->flags))
89 err = kstrtobool_from_user(user_buf, count, &enable);
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
96 hci_req_sync_lock(hdev);
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_sync_unlock(hdev);
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
115 static const struct file_operations dut_mode_fops = {
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
125 struct hci_dev *hdev = file->private_data;
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
137 struct hci_dev *hdev = file->private_data;
141 err = kstrtobool_from_user(user_buf, count, &enable);
145 /* When the diagnostic flags are not persistent and the transport
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
155 hci_req_sync_lock(hdev);
156 err = hdev->set_diag(hdev, enable);
157 hci_req_sync_unlock(hdev);
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171 static const struct file_operations vendor_diag_fops = {
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
190 BT_DBG("%s %ld", req->hdev->name, opt);
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
198 static void bredr_init(struct hci_request *req)
200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
202 /* Read Local Supported Features */
203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
205 /* Read Local Version */
206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
208 /* Read BD Address */
209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
212 static void amp_init1(struct hci_request *req)
214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
216 /* Read Local Version */
217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
222 /* Read Local AMP Info */
223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
225 /* Read Data Blk size */
226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
235 static int amp_init2(struct hci_request *req)
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
249 struct hci_dev *hdev = req->hdev;
251 BT_DBG("%s %ld", hdev->name, opt);
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 hci_reset_req(req, 0);
257 switch (hdev->dev_type) {
265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
272 static void bredr_setup(struct hci_request *req)
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
280 /* Read Class of Device */
281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
283 /* Read Local Name */
284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
286 /* Read Voice Setting */
287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
299 /* Connection accept timeout ~20 secs */
300 param = cpu_to_le16(0x7d00);
301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
304 static void le_setup(struct hci_request *req)
306 struct hci_dev *hdev = req->hdev;
308 /* Read LE Buffer Size */
309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
311 /* Read LE Local Supported Features */
312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
322 static void hci_setup_event_mask(struct hci_request *req)
324 struct hci_dev *hdev = req->hdev;
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
420 struct hci_dev *hdev = req->hdev;
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
425 if (lmp_bredr_capable(hdev))
428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
430 if (lmp_le_capable(hdev))
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
445 if (lmp_ssp_capable(hdev)) {
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
452 hdev->max_page = 0x01;
454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
460 struct hci_cp_write_eir cp;
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
482 if (lmp_inq_tx_pwr_capable(hdev))
483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
502 static void hci_setup_link_policy(struct hci_request *req)
504 struct hci_dev *hdev = req->hdev;
505 struct hci_cp_write_def_link_policy cp;
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
517 cp.policy = cpu_to_le16(link_policy);
518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
521 static void hci_set_le_support(struct hci_request *req)
523 struct hci_dev *hdev = req->hdev;
524 struct hci_cp_write_le_host_supported cp;
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
530 memset(&cp, 0, sizeof(cp));
532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
537 if (cp.le != lmp_host_le_capable(hdev))
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
542 static void hci_set_event_mask_page_2(struct hci_request *req)
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 bool changed = false;
548 /* If Connectionless Peripheral Broadcast central role is supported
549 * enable all necessary events for it.
551 if (lmp_cpb_central_capable(hdev)) {
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Peripheral Page Response Timeout */
555 events[2] |= 0x20; /* CPB Channel Map Change */
559 /* If Connectionless Peripheral Broadcast peripheral role is supported
560 * enable all necessary events for it.
562 if (lmp_cpb_peripheral_capable(hdev)) {
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CPB Receive */
565 events[2] |= 0x04; /* CPB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
570 /* Enable Authenticated Payload Timeout Expired event if supported */
571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
589 struct hci_dev *hdev = req->hdev;
592 hci_setup_event_mask(req);
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 struct hci_cp_read_stored_link_key cp;
598 bacpy(&cp.bdaddr, BDADDR_ANY);
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
603 if (hdev->commands[5] & 0x10)
604 hci_setup_link_policy(req);
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
613 /* Some older Broadcom based Bluetooth 1.2 controllers do not
614 * support the Read Page Scan Type command. Check support for
615 * this command in the bit mask of supported commands.
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620 if (lmp_le_capable(hdev)) {
623 memset(events, 0, sizeof(events));
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10; /* LE Long Term Key Request */
628 /* If controller supports the Connection Parameters Request
629 * Link Layer Procedure, enable the corresponding event.
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20; /* LE Remote Connection
636 /* If the controller supports the Data Length Extension
637 * feature, enable the corresponding event.
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40; /* LE Data Length Change */
642 /* If the controller supports LL Privacy feature, enable
643 * the corresponding event.
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02; /* LE Enhanced Connection
650 /* If the controller supports Extended Scanner Filter
651 * Policies, enable the corresponding event.
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04; /* LE Direct Advertising
658 /* If the controller supports Channel Selection Algorithm #2
659 * feature, enable the corresponding event.
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08; /* LE Channel Selection
666 /* If the controller supports the LE Set Scan Enable command,
667 * enable the corresponding advertising report event.
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02; /* LE Advertising Report */
672 /* If the controller supports the LE Create Connection
673 * command, enable the corresponding event.
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01; /* LE Connection Complete */
678 /* If the controller supports the LE Connection Update
679 * command, enable the corresponding event.
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04; /* LE Connection Update
686 /* If the controller supports the LE Read Remote Used Features
687 * command, enable the corresponding event.
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08; /* LE Read Remote Used
694 /* If the controller supports the LE Read Local P-256
695 * Public Key command, enable the corresponding event.
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80; /* LE Read Local P-256
699 * Public Key Complete
702 /* If the controller supports the LE Generate DHKey
703 * command, enable the corresponding event.
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01; /* LE Generate DHKey Complete */
708 /* If the controller supports the LE Set Default PHY or
709 * LE Set PHY commands, enable the corresponding event.
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08; /* LE PHY Update Complete */
714 /* If the controller supports LE Set Extended Scan Parameters
715 * and LE Set Extended Scan Enable commands, enable the
716 * corresponding event.
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10; /* LE Extended Advertising
723 /* If the controller supports the LE Extended Advertising
724 * command, enable the corresponding event.
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02; /* LE Advertising Set
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
734 /* Read LE Advertising Channel TX Power */
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736 /* HCI TS spec forbids mixing of legacy and extended
737 * advertising commands wherein READ_ADV_TX_POWER is
738 * also included. So do not call it if extended adv
739 * is supported otherwise controller will return
740 * COMMAND_DISALLOWED for extended commands.
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
745 if ((hdev->commands[38] & 0x80) &&
746 !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747 /* Read LE Min/Max Tx Power*/
748 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
752 if (hdev->commands[26] & 0x40) {
753 /* Read LE Accept List Size */
754 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
758 if (hdev->commands[26] & 0x80) {
759 /* Clear LE Accept List */
760 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
763 if (hdev->commands[34] & 0x40) {
764 /* Read LE Resolving List Size */
765 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
769 if (hdev->commands[34] & 0x20) {
770 /* Clear LE Resolving List */
771 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
774 if (hdev->commands[35] & 0x04) {
775 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
777 /* Set RPA timeout */
778 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
782 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783 /* Read LE Maximum Data Length */
784 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
786 /* Read LE Suggested Default Data Length */
787 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
790 if (ext_adv_capable(hdev)) {
791 /* Read LE Number of Supported Advertising Sets */
792 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
796 hci_set_le_support(req);
799 /* Read features beyond page 1 if available */
800 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801 struct hci_cp_read_local_ext_features cp;
804 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
813 struct hci_dev *hdev = req->hdev;
815 /* Some Broadcom based Bluetooth controllers do not support the
816 * Delete Stored Link Key command. They are clearly indicating its
817 * absence in the bit mask of supported commands.
819 * Check the supported commands and only if the command is marked
820 * as supported send it. If not supported assume that the controller
821 * does not have actual support for stored link keys which makes this
822 * command redundant anyway.
824 * Some controllers indicate that they support handling deleting
825 * stored link keys, but they don't. The quirk lets a driver
826 * just disable this command.
828 if (hdev->commands[6] & 0x80 &&
829 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830 struct hci_cp_delete_stored_link_key cp;
832 bacpy(&cp.bdaddr, BDADDR_ANY);
833 cp.delete_all = 0x01;
834 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
838 /* Set event mask page 2 if the HCI command for it is supported */
839 if (hdev->commands[22] & 0x04)
840 hci_set_event_mask_page_2(req);
842 /* Read local codec list if the HCI command is supported */
843 if (hdev->commands[29] & 0x20)
844 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
846 /* Read local pairing options if the HCI command is supported */
847 if (hdev->commands[41] & 0x08)
848 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
850 /* Get MWS transport configuration if the HCI command is supported */
851 if (hdev->commands[30] & 0x08)
852 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
854 /* Check for Synchronization Train support */
855 if (lmp_sync_train_capable(hdev))
856 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
858 /* Enable Secure Connections if supported and configured */
859 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860 bredr_sc_enabled(hdev)) {
863 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864 sizeof(support), &support);
867 /* Set erroneous data reporting if supported to the wideband speech
870 if (hdev->commands[18] & 0x08 &&
871 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872 bool enabled = hci_dev_test_flag(hdev,
873 HCI_WIDEBAND_SPEECH_ENABLED);
876 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877 struct hci_cp_write_def_err_data_reporting cp;
879 cp.err_data_reporting = enabled ?
880 ERR_DATA_REPORTING_ENABLED :
881 ERR_DATA_REPORTING_DISABLED;
883 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
888 /* Set Suggested Default Data Length to maximum if supported */
889 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890 struct hci_cp_le_write_def_data_len cp;
892 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
897 /* Set Default PHY parameters if command is supported */
898 if (hdev->commands[35] & 0x20) {
899 struct hci_cp_le_set_default_phy cp;
902 cp.tx_phys = hdev->le_tx_def_phys;
903 cp.rx_phys = hdev->le_rx_def_phys;
905 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
911 static int __hci_init(struct hci_dev *hdev)
915 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
919 if (hci_dev_test_flag(hdev, HCI_SETUP))
920 hci_debugfs_create_basic(hdev);
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
926 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927 * BR/EDR/LE type controllers. AMP controllers only need the
928 * first two stages of init.
930 if (hdev->dev_type != HCI_PRIMARY)
933 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
937 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
941 /* This function is only called when the controller is actually in
942 * configured state. When the controller is marked as unconfigured,
943 * this initialization procedure is not run.
945 * It means that it is possible that a controller runs through its
946 * setup phase and then discovers missing settings. If that is the
947 * case, then this function will not be called. It then will only
948 * be called during the config phase.
950 * So only when in setup phase or config phase, create the debugfs
951 * entries and register the SMP channels.
953 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954 !hci_dev_test_flag(hdev, HCI_CONFIG))
957 hci_debugfs_create_common(hdev);
959 if (lmp_bredr_capable(hdev))
960 hci_debugfs_create_bredr(hdev);
962 if (lmp_le_capable(hdev))
963 hci_debugfs_create_le(hdev);
968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
970 struct hci_dev *hdev = req->hdev;
972 BT_DBG("%s %ld", hdev->name, opt);
975 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976 hci_reset_req(req, 0);
978 /* Read Local Version */
979 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
981 /* Read BD Address */
982 if (hdev->set_bdaddr)
983 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
988 static int __hci_unconf_init(struct hci_dev *hdev)
992 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
995 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
999 if (hci_dev_test_flag(hdev, HCI_SETUP))
1000 hci_debugfs_create_basic(hdev);
1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1009 BT_DBG("%s %x", req->hdev->name, scan);
1011 /* Inquiry and Page scans */
1012 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1020 BT_DBG("%s %x", req->hdev->name, auth);
1022 /* Authentication */
1023 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1031 BT_DBG("%s %x", req->hdev->name, encrypt);
1034 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1040 __le16 policy = cpu_to_le16(opt);
1042 BT_DBG("%s %x", req->hdev->name, policy);
1044 /* Default link policy */
1045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1049 /* Get HCI device by index.
1050 * Device is held on return. */
1051 struct hci_dev *hci_dev_get(int index)
1053 struct hci_dev *hdev = NULL, *d;
1055 BT_DBG("%d", index);
1060 read_lock(&hci_dev_list_lock);
1061 list_for_each_entry(d, &hci_dev_list, list) {
1062 if (d->id == index) {
1063 hdev = hci_dev_hold(d);
1067 read_unlock(&hci_dev_list_lock);
1071 /* ---- Inquiry support ---- */
1073 bool hci_discovery_active(struct hci_dev *hdev)
1075 struct discovery_state *discov = &hdev->discovery;
1077 switch (discov->state) {
1078 case DISCOVERY_FINDING:
1079 case DISCOVERY_RESOLVING:
1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1089 int old_state = hdev->discovery.state;
1091 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1093 if (old_state == state)
1096 hdev->discovery.state = state;
1099 case DISCOVERY_STOPPED:
1100 hci_update_background_scan(hdev);
1102 if (old_state != DISCOVERY_STARTING)
1103 mgmt_discovering(hdev, 0);
1105 case DISCOVERY_STARTING:
1107 case DISCOVERY_FINDING:
1108 mgmt_discovering(hdev, 1);
1110 case DISCOVERY_RESOLVING:
1112 case DISCOVERY_STOPPING:
1117 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1119 struct discovery_state *cache = &hdev->discovery;
1120 struct inquiry_entry *p, *n;
1122 list_for_each_entry_safe(p, n, &cache->all, all) {
1127 INIT_LIST_HEAD(&cache->unknown);
1128 INIT_LIST_HEAD(&cache->resolve);
1131 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *e;
1137 BT_DBG("cache %p, %pMR", cache, bdaddr);
1139 list_for_each_entry(e, &cache->all, all) {
1140 if (!bacmp(&e->data.bdaddr, bdaddr))
1147 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1150 struct discovery_state *cache = &hdev->discovery;
1151 struct inquiry_entry *e;
1153 BT_DBG("cache %p, %pMR", cache, bdaddr);
1155 list_for_each_entry(e, &cache->unknown, list) {
1156 if (!bacmp(&e->data.bdaddr, bdaddr))
1163 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1167 struct discovery_state *cache = &hdev->discovery;
1168 struct inquiry_entry *e;
1170 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1172 list_for_each_entry(e, &cache->resolve, list) {
1173 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1175 if (!bacmp(&e->data.bdaddr, bdaddr))
1182 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1183 struct inquiry_entry *ie)
1185 struct discovery_state *cache = &hdev->discovery;
1186 struct list_head *pos = &cache->resolve;
1187 struct inquiry_entry *p;
1189 list_del(&ie->list);
1191 list_for_each_entry(p, &cache->resolve, list) {
1192 if (p->name_state != NAME_PENDING &&
1193 abs(p->data.rssi) >= abs(ie->data.rssi))
1198 list_add(&ie->list, pos);
1201 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1204 struct discovery_state *cache = &hdev->discovery;
1205 struct inquiry_entry *ie;
1208 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1210 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1212 if (!data->ssp_mode)
1213 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1215 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1217 if (!ie->data.ssp_mode)
1218 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1220 if (ie->name_state == NAME_NEEDED &&
1221 data->rssi != ie->data.rssi) {
1222 ie->data.rssi = data->rssi;
1223 hci_inquiry_cache_update_resolve(hdev, ie);
1229 /* Entry not in the cache. Add new one. */
1230 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1232 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1236 list_add(&ie->all, &cache->all);
1239 ie->name_state = NAME_KNOWN;
1241 ie->name_state = NAME_NOT_KNOWN;
1242 list_add(&ie->list, &cache->unknown);
1246 if (name_known && ie->name_state != NAME_KNOWN &&
1247 ie->name_state != NAME_PENDING) {
1248 ie->name_state = NAME_KNOWN;
1249 list_del(&ie->list);
1252 memcpy(&ie->data, data, sizeof(*data));
1253 ie->timestamp = jiffies;
1254 cache->timestamp = jiffies;
1256 if (ie->name_state == NAME_NOT_KNOWN)
1257 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1263 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1265 struct discovery_state *cache = &hdev->discovery;
1266 struct inquiry_info *info = (struct inquiry_info *) buf;
1267 struct inquiry_entry *e;
1270 list_for_each_entry(e, &cache->all, all) {
1271 struct inquiry_data *data = &e->data;
1276 bacpy(&info->bdaddr, &data->bdaddr);
1277 info->pscan_rep_mode = data->pscan_rep_mode;
1278 info->pscan_period_mode = data->pscan_period_mode;
1279 info->pscan_mode = data->pscan_mode;
1280 memcpy(info->dev_class, data->dev_class, 3);
1281 info->clock_offset = data->clock_offset;
1287 BT_DBG("cache %p, copied %d", cache, copied);
1291 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1293 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1294 struct hci_dev *hdev = req->hdev;
1295 struct hci_cp_inquiry cp;
1297 BT_DBG("%s", hdev->name);
1299 if (test_bit(HCI_INQUIRY, &hdev->flags))
1303 memcpy(&cp.lap, &ir->lap, 3);
1304 cp.length = ir->length;
1305 cp.num_rsp = ir->num_rsp;
1306 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1311 int hci_inquiry(void __user *arg)
1313 __u8 __user *ptr = arg;
1314 struct hci_inquiry_req ir;
1315 struct hci_dev *hdev;
1316 int err = 0, do_inquiry = 0, max_rsp;
1320 if (copy_from_user(&ir, ptr, sizeof(ir)))
1323 hdev = hci_dev_get(ir.dev_id);
1327 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1332 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1337 if (hdev->dev_type != HCI_PRIMARY) {
1342 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1347 /* Restrict maximum inquiry length to 60 seconds */
1348 if (ir.length > 60) {
1354 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1355 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1356 hci_inquiry_cache_flush(hdev);
1359 hci_dev_unlock(hdev);
1361 timeo = ir.length * msecs_to_jiffies(2000);
1364 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1369 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1370 * cleared). If it is interrupted by a signal, return -EINTR.
1372 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1373 TASK_INTERRUPTIBLE)) {
1379 /* for unlimited number of responses we will use buffer with
1382 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1384 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1385 * copy it to the user space.
1387 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1394 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1395 hci_dev_unlock(hdev);
1397 BT_DBG("num_rsp %d", ir.num_rsp);
1399 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1401 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1415 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1416 * (BD_ADDR) for a HCI device from
1417 * a firmware node property.
1418 * @hdev: The HCI device
1420 * Search the firmware node for 'local-bd-address'.
1422 * All-zero BD addresses are rejected, because those could be properties
1423 * that exist in the firmware tables, but were not updated by the firmware. For
1424 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1426 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1428 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1432 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1433 (u8 *)&ba, sizeof(ba));
1434 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1437 bacpy(&hdev->public_addr, &ba);
1440 static int hci_dev_do_open(struct hci_dev *hdev)
1444 BT_DBG("%s %p", hdev->name, hdev);
1446 hci_req_sync_lock(hdev);
1448 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1453 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1454 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1455 /* Check for rfkill but allow the HCI setup stage to
1456 * proceed (which in itself doesn't cause any RF activity).
1458 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1463 /* Check for valid public address or a configured static
1464 * random address, but let the HCI setup proceed to
1465 * be able to determine if there is a public address
1468 * In case of user channel usage, it is not important
1469 * if a public address or static random address is
1472 * This check is only valid for BR/EDR controllers
1473 * since AMP controllers do not have an address.
1475 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1476 hdev->dev_type == HCI_PRIMARY &&
1477 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1478 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1479 ret = -EADDRNOTAVAIL;
1484 if (test_bit(HCI_UP, &hdev->flags)) {
1489 if (hdev->open(hdev)) {
1494 set_bit(HCI_RUNNING, &hdev->flags);
1495 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1497 atomic_set(&hdev->cmd_cnt, 1);
1498 set_bit(HCI_INIT, &hdev->flags);
1500 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1501 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1502 bool invalid_bdaddr;
1504 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1507 ret = hdev->setup(hdev);
1509 /* The transport driver can set the quirk to mark the
1510 * BD_ADDR invalid before creating the HCI device or in
1511 * its setup callback.
1513 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1519 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1520 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1521 hci_dev_get_bd_addr_from_property(hdev);
1523 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1525 ret = hdev->set_bdaddr(hdev,
1526 &hdev->public_addr);
1528 /* If setting of the BD_ADDR from the device
1529 * property succeeds, then treat the address
1530 * as valid even if the invalid BD_ADDR
1531 * quirk indicates otherwise.
1534 invalid_bdaddr = false;
1539 /* The transport driver can set these quirks before
1540 * creating the HCI device or in its setup callback.
1542 * For the invalid BD_ADDR quirk it is possible that
1543 * it becomes a valid address if the bootloader does
1544 * provide it (see above).
1546 * In case any of them is set, the controller has to
1547 * start up as unconfigured.
1549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1551 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1553 /* For an unconfigured controller it is required to
1554 * read at least the version information provided by
1555 * the Read Local Version Information command.
1557 * If the set_bdaddr driver callback is provided, then
1558 * also the original Bluetooth public device address
1559 * will be read using the Read BD Address command.
1561 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1562 ret = __hci_unconf_init(hdev);
1565 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1566 /* If public address change is configured, ensure that
1567 * the address gets programmed. If the driver does not
1568 * support changing the public address, fail the power
1571 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1573 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1575 ret = -EADDRNOTAVAIL;
1579 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1581 ret = __hci_init(hdev);
1582 if (!ret && hdev->post_init)
1583 ret = hdev->post_init(hdev);
1587 /* If the HCI Reset command is clearing all diagnostic settings,
1588 * then they need to be reprogrammed after the init procedure
1591 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1592 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1593 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1594 ret = hdev->set_diag(hdev, true);
1599 clear_bit(HCI_INIT, &hdev->flags);
1603 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1604 hci_adv_instances_set_rpa_expired(hdev, true);
1605 set_bit(HCI_UP, &hdev->flags);
1606 hci_sock_dev_event(hdev, HCI_DEV_UP);
1607 hci_leds_update_powered(hdev, true);
1608 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1609 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1610 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1611 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1612 hci_dev_test_flag(hdev, HCI_MGMT) &&
1613 hdev->dev_type == HCI_PRIMARY) {
1614 ret = __hci_req_hci_power_on(hdev);
1615 mgmt_power_on(hdev, ret);
1618 /* Init failed, cleanup */
1619 flush_work(&hdev->tx_work);
1621 /* Since hci_rx_work() is possible to awake new cmd_work
1622 * it should be flushed first to avoid unexpected call of
1625 flush_work(&hdev->rx_work);
1626 flush_work(&hdev->cmd_work);
1628 skb_queue_purge(&hdev->cmd_q);
1629 skb_queue_purge(&hdev->rx_q);
1634 if (hdev->sent_cmd) {
1635 cancel_delayed_work_sync(&hdev->cmd_timer);
1636 kfree_skb(hdev->sent_cmd);
1637 hdev->sent_cmd = NULL;
1640 clear_bit(HCI_RUNNING, &hdev->flags);
1641 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1644 hdev->flags &= BIT(HCI_RAW);
1648 hci_req_sync_unlock(hdev);
1652 /* ---- HCI ioctl helpers ---- */
1654 int hci_dev_open(__u16 dev)
1656 struct hci_dev *hdev;
1659 hdev = hci_dev_get(dev);
1663 /* Devices that are marked as unconfigured can only be powered
1664 * up as user channel. Trying to bring them up as normal devices
1665 * will result into a failure. Only user channel operation is
1668 * When this function is called for a user channel, the flag
1669 * HCI_USER_CHANNEL will be set first before attempting to
1672 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1673 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1678 /* We need to ensure that no other power on/off work is pending
1679 * before proceeding to call hci_dev_do_open. This is
1680 * particularly important if the setup procedure has not yet
1683 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1684 cancel_delayed_work(&hdev->power_off);
1686 /* After this call it is guaranteed that the setup procedure
1687 * has finished. This means that error conditions like RFKILL
1688 * or no valid public or static random address apply.
1690 flush_workqueue(hdev->req_workqueue);
1692 /* For controllers not using the management interface and that
1693 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1694 * so that pairing works for them. Once the management interface
1695 * is in use this bit will be cleared again and userspace has
1696 * to explicitly enable it.
1698 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1699 !hci_dev_test_flag(hdev, HCI_MGMT))
1700 hci_dev_set_flag(hdev, HCI_BONDABLE);
1702 err = hci_dev_do_open(hdev);
1709 /* This function requires the caller holds hdev->lock */
1710 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1712 struct hci_conn_params *p;
1714 list_for_each_entry(p, &hdev->le_conn_params, list) {
1716 hci_conn_drop(p->conn);
1717 hci_conn_put(p->conn);
1720 list_del_init(&p->action);
1723 BT_DBG("All LE pending actions cleared");
1726 int hci_dev_do_close(struct hci_dev *hdev)
1731 BT_DBG("%s %p", hdev->name, hdev);
1733 cancel_delayed_work(&hdev->power_off);
1734 cancel_delayed_work(&hdev->ncmd_timer);
1736 hci_request_cancel_all(hdev);
1737 hci_req_sync_lock(hdev);
1739 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1740 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1741 test_bit(HCI_UP, &hdev->flags)) {
1742 /* Execute vendor specific shutdown routine */
1744 err = hdev->shutdown(hdev);
1747 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1748 cancel_delayed_work_sync(&hdev->cmd_timer);
1749 hci_req_sync_unlock(hdev);
1753 hci_leds_update_powered(hdev, false);
1755 /* Flush RX and TX works */
1756 flush_work(&hdev->tx_work);
1757 flush_work(&hdev->rx_work);
1759 if (hdev->discov_timeout > 0) {
1760 hdev->discov_timeout = 0;
1761 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1762 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1765 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1766 cancel_delayed_work(&hdev->service_cache);
1768 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1769 struct adv_info *adv_instance;
1771 cancel_delayed_work_sync(&hdev->rpa_expired);
1773 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1774 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1777 /* Avoid potential lockdep warnings from the *_flush() calls by
1778 * ensuring the workqueue is empty up front.
1780 drain_workqueue(hdev->workqueue);
1784 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1786 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1788 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1789 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1790 hci_dev_test_flag(hdev, HCI_MGMT))
1791 __mgmt_power_off(hdev);
1793 hci_inquiry_cache_flush(hdev);
1794 hci_pend_le_actions_clear(hdev);
1795 hci_conn_hash_flush(hdev);
1796 hci_dev_unlock(hdev);
1798 smp_unregister(hdev);
1800 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1802 aosp_do_close(hdev);
1803 msft_do_close(hdev);
1809 skb_queue_purge(&hdev->cmd_q);
1810 atomic_set(&hdev->cmd_cnt, 1);
1811 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1812 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1813 set_bit(HCI_INIT, &hdev->flags);
1814 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1815 clear_bit(HCI_INIT, &hdev->flags);
1818 /* flush cmd work */
1819 flush_work(&hdev->cmd_work);
1822 skb_queue_purge(&hdev->rx_q);
1823 skb_queue_purge(&hdev->cmd_q);
1824 skb_queue_purge(&hdev->raw_q);
1826 /* Drop last sent command */
1827 if (hdev->sent_cmd) {
1828 cancel_delayed_work_sync(&hdev->cmd_timer);
1829 kfree_skb(hdev->sent_cmd);
1830 hdev->sent_cmd = NULL;
1833 clear_bit(HCI_RUNNING, &hdev->flags);
1834 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1836 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1837 wake_up(&hdev->suspend_wait_q);
1839 /* After this point our queues are empty
1840 * and no tasks are scheduled. */
1844 hdev->flags &= BIT(HCI_RAW);
1845 hci_dev_clear_volatile_flags(hdev);
1847 /* Controller radio is available but is currently powered down */
1848 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1850 memset(hdev->eir, 0, sizeof(hdev->eir));
1851 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1852 bacpy(&hdev->random_addr, BDADDR_ANY);
1854 hci_req_sync_unlock(hdev);
1860 int hci_dev_close(__u16 dev)
1862 struct hci_dev *hdev;
1865 hdev = hci_dev_get(dev);
1869 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1874 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1875 cancel_delayed_work(&hdev->power_off);
1877 err = hci_dev_do_close(hdev);
1884 static int hci_dev_do_reset(struct hci_dev *hdev)
1888 BT_DBG("%s %p", hdev->name, hdev);
1890 hci_req_sync_lock(hdev);
1893 skb_queue_purge(&hdev->rx_q);
1894 skb_queue_purge(&hdev->cmd_q);
1896 /* Avoid potential lockdep warnings from the *_flush() calls by
1897 * ensuring the workqueue is empty up front.
1899 drain_workqueue(hdev->workqueue);
1902 hci_inquiry_cache_flush(hdev);
1903 hci_conn_hash_flush(hdev);
1904 hci_dev_unlock(hdev);
1909 atomic_set(&hdev->cmd_cnt, 1);
1910 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1912 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1914 hci_req_sync_unlock(hdev);
1918 int hci_dev_reset(__u16 dev)
1920 struct hci_dev *hdev;
1923 hdev = hci_dev_get(dev);
1927 if (!test_bit(HCI_UP, &hdev->flags)) {
1932 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1937 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1942 err = hci_dev_do_reset(hdev);
1949 int hci_dev_reset_stat(__u16 dev)
1951 struct hci_dev *hdev;
1954 hdev = hci_dev_get(dev);
1958 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1963 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1968 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1975 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1977 bool conn_changed, discov_changed;
1979 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1981 if ((scan & SCAN_PAGE))
1982 conn_changed = !hci_dev_test_and_set_flag(hdev,
1985 conn_changed = hci_dev_test_and_clear_flag(hdev,
1988 if ((scan & SCAN_INQUIRY)) {
1989 discov_changed = !hci_dev_test_and_set_flag(hdev,
1992 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1993 discov_changed = hci_dev_test_and_clear_flag(hdev,
1997 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2000 if (conn_changed || discov_changed) {
2001 /* In case this was disabled through mgmt */
2002 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2004 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2005 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2007 mgmt_new_settings(hdev);
2011 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2013 struct hci_dev *hdev;
2014 struct hci_dev_req dr;
2017 if (copy_from_user(&dr, arg, sizeof(dr)))
2020 hdev = hci_dev_get(dr.dev_id);
2024 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2029 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2034 if (hdev->dev_type != HCI_PRIMARY) {
2039 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2046 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2047 HCI_INIT_TIMEOUT, NULL);
2051 if (!lmp_encrypt_capable(hdev)) {
2056 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2057 /* Auth must be enabled first */
2058 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2059 HCI_INIT_TIMEOUT, NULL);
2064 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2065 HCI_INIT_TIMEOUT, NULL);
2069 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2070 HCI_INIT_TIMEOUT, NULL);
2072 /* Ensure that the connectable and discoverable states
2073 * get correctly modified as this was a non-mgmt change.
2076 hci_update_scan_state(hdev, dr.dev_opt);
2080 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2081 HCI_INIT_TIMEOUT, NULL);
2084 case HCISETLINKMODE:
2085 hdev->link_mode = ((__u16) dr.dev_opt) &
2086 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2090 if (hdev->pkt_type == (__u16) dr.dev_opt)
2093 hdev->pkt_type = (__u16) dr.dev_opt;
2094 mgmt_phy_configuration_changed(hdev, NULL);
2098 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2099 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2103 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2104 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2117 int hci_get_dev_list(void __user *arg)
2119 struct hci_dev *hdev;
2120 struct hci_dev_list_req *dl;
2121 struct hci_dev_req *dr;
2122 int n = 0, size, err;
2125 if (get_user(dev_num, (__u16 __user *) arg))
2128 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2131 size = sizeof(*dl) + dev_num * sizeof(*dr);
2133 dl = kzalloc(size, GFP_KERNEL);
2139 read_lock(&hci_dev_list_lock);
2140 list_for_each_entry(hdev, &hci_dev_list, list) {
2141 unsigned long flags = hdev->flags;
2143 /* When the auto-off is configured it means the transport
2144 * is running, but in that case still indicate that the
2145 * device is actually down.
2147 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2148 flags &= ~BIT(HCI_UP);
2150 (dr + n)->dev_id = hdev->id;
2151 (dr + n)->dev_opt = flags;
2156 read_unlock(&hci_dev_list_lock);
2159 size = sizeof(*dl) + n * sizeof(*dr);
2161 err = copy_to_user(arg, dl, size);
2164 return err ? -EFAULT : 0;
2167 int hci_get_dev_info(void __user *arg)
2169 struct hci_dev *hdev;
2170 struct hci_dev_info di;
2171 unsigned long flags;
2174 if (copy_from_user(&di, arg, sizeof(di)))
2177 hdev = hci_dev_get(di.dev_id);
2181 /* When the auto-off is configured it means the transport
2182 * is running, but in that case still indicate that the
2183 * device is actually down.
2185 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2186 flags = hdev->flags & ~BIT(HCI_UP);
2188 flags = hdev->flags;
2190 strcpy(di.name, hdev->name);
2191 di.bdaddr = hdev->bdaddr;
2192 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2194 di.pkt_type = hdev->pkt_type;
2195 if (lmp_bredr_capable(hdev)) {
2196 di.acl_mtu = hdev->acl_mtu;
2197 di.acl_pkts = hdev->acl_pkts;
2198 di.sco_mtu = hdev->sco_mtu;
2199 di.sco_pkts = hdev->sco_pkts;
2201 di.acl_mtu = hdev->le_mtu;
2202 di.acl_pkts = hdev->le_pkts;
2206 di.link_policy = hdev->link_policy;
2207 di.link_mode = hdev->link_mode;
2209 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2210 memcpy(&di.features, &hdev->features, sizeof(di.features));
2212 if (copy_to_user(arg, &di, sizeof(di)))
2220 /* ---- Interface to HCI drivers ---- */
2222 static int hci_rfkill_set_block(void *data, bool blocked)
2224 struct hci_dev *hdev = data;
2226 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2228 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2232 hci_dev_set_flag(hdev, HCI_RFKILLED);
2233 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2234 !hci_dev_test_flag(hdev, HCI_CONFIG))
2235 hci_dev_do_close(hdev);
2237 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2243 static const struct rfkill_ops hci_rfkill_ops = {
2244 .set_block = hci_rfkill_set_block,
2247 static void hci_power_on(struct work_struct *work)
2249 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2252 BT_DBG("%s", hdev->name);
2254 if (test_bit(HCI_UP, &hdev->flags) &&
2255 hci_dev_test_flag(hdev, HCI_MGMT) &&
2256 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2257 cancel_delayed_work(&hdev->power_off);
2258 hci_req_sync_lock(hdev);
2259 err = __hci_req_hci_power_on(hdev);
2260 hci_req_sync_unlock(hdev);
2261 mgmt_power_on(hdev, err);
2265 err = hci_dev_do_open(hdev);
2268 mgmt_set_powered_failed(hdev, err);
2269 hci_dev_unlock(hdev);
2273 /* During the HCI setup phase, a few error conditions are
2274 * ignored and they need to be checked now. If they are still
2275 * valid, it is important to turn the device back off.
2277 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2278 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2279 (hdev->dev_type == HCI_PRIMARY &&
2280 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2281 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2282 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2283 hci_dev_do_close(hdev);
2284 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2285 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2286 HCI_AUTO_OFF_TIMEOUT);
2289 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2290 /* For unconfigured devices, set the HCI_RAW flag
2291 * so that userspace can easily identify them.
2293 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2294 set_bit(HCI_RAW, &hdev->flags);
2296 /* For fully configured devices, this will send
2297 * the Index Added event. For unconfigured devices,
2298 * it will send Unconfigued Index Added event.
2300 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2301 * and no event will be send.
2303 mgmt_index_added(hdev);
2304 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2305 /* When the controller is now configured, then it
2306 * is important to clear the HCI_RAW flag.
2308 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2309 clear_bit(HCI_RAW, &hdev->flags);
2311 /* Powering on the controller with HCI_CONFIG set only
2312 * happens with the transition from unconfigured to
2313 * configured. This will send the Index Added event.
2315 mgmt_index_added(hdev);
2319 static void hci_power_off(struct work_struct *work)
2321 struct hci_dev *hdev = container_of(work, struct hci_dev,
2324 BT_DBG("%s", hdev->name);
2326 hci_dev_do_close(hdev);
2329 static void hci_error_reset(struct work_struct *work)
2331 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2333 BT_DBG("%s", hdev->name);
2336 hdev->hw_error(hdev, hdev->hw_error_code);
2338 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2340 if (hci_dev_do_close(hdev))
2343 hci_dev_do_open(hdev);
2346 void hci_uuids_clear(struct hci_dev *hdev)
2348 struct bt_uuid *uuid, *tmp;
2350 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2351 list_del(&uuid->list);
2356 void hci_link_keys_clear(struct hci_dev *hdev)
2358 struct link_key *key, *tmp;
2360 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
2361 list_del_rcu(&key->list);
2362 kfree_rcu(key, rcu);
2366 void hci_smp_ltks_clear(struct hci_dev *hdev)
2368 struct smp_ltk *k, *tmp;
2370 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2371 list_del_rcu(&k->list);
2376 void hci_smp_irks_clear(struct hci_dev *hdev)
2378 struct smp_irk *k, *tmp;
2380 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2381 list_del_rcu(&k->list);
2386 void hci_blocked_keys_clear(struct hci_dev *hdev)
2388 struct blocked_key *b, *tmp;
2390 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
2391 list_del_rcu(&b->list);
2396 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2398 bool blocked = false;
2399 struct blocked_key *b;
2402 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2403 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2413 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2418 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2419 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2422 if (hci_is_blocked_key(hdev,
2423 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2425 bt_dev_warn_ratelimited(hdev,
2426 "Link key blocked for %pMR",
2439 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2440 u8 key_type, u8 old_key_type)
2443 if (key_type < 0x03)
2446 /* Debug keys are insecure so don't store them persistently */
2447 if (key_type == HCI_LK_DEBUG_COMBINATION)
2450 /* Changed combination key and there's no previous one */
2451 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2454 /* Security mode 3 case */
2458 /* BR/EDR key derived using SC from an LE link */
2459 if (conn->type == LE_LINK)
2462 /* Neither local nor remote side had no-bonding as requirement */
2463 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2466 /* Local side had dedicated bonding as requirement */
2467 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2470 /* Remote side had dedicated bonding as requirement */
2471 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2474 /* If none of the above criteria match, then don't store the key
2479 static u8 ltk_role(u8 type)
2481 if (type == SMP_LTK)
2482 return HCI_ROLE_MASTER;
2484 return HCI_ROLE_SLAVE;
2487 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2488 u8 addr_type, u8 role)
2493 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2494 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2497 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2500 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2502 bt_dev_warn_ratelimited(hdev,
2503 "LTK blocked for %pMR",
2516 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2518 struct smp_irk *irk_to_return = NULL;
2519 struct smp_irk *irk;
2522 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2523 if (!bacmp(&irk->rpa, rpa)) {
2524 irk_to_return = irk;
2529 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2530 if (smp_irk_matches(hdev, irk->val, rpa)) {
2531 bacpy(&irk->rpa, rpa);
2532 irk_to_return = irk;
2538 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2539 irk_to_return->val)) {
2540 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2541 &irk_to_return->bdaddr);
2542 irk_to_return = NULL;
2547 return irk_to_return;
2550 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2553 struct smp_irk *irk_to_return = NULL;
2554 struct smp_irk *irk;
2556 /* Identity Address must be public or static random */
2557 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2561 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2562 if (addr_type == irk->addr_type &&
2563 bacmp(bdaddr, &irk->bdaddr) == 0) {
2564 irk_to_return = irk;
2571 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2572 irk_to_return->val)) {
2573 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2574 &irk_to_return->bdaddr);
2575 irk_to_return = NULL;
2580 return irk_to_return;
2583 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2584 bdaddr_t *bdaddr, u8 *val, u8 type,
2585 u8 pin_len, bool *persistent)
2587 struct link_key *key, *old_key;
2590 old_key = hci_find_link_key(hdev, bdaddr);
2592 old_key_type = old_key->type;
2595 old_key_type = conn ? conn->key_type : 0xff;
2596 key = kzalloc(sizeof(*key), GFP_KERNEL);
2599 list_add_rcu(&key->list, &hdev->link_keys);
2602 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2604 /* Some buggy controller combinations generate a changed
2605 * combination key for legacy pairing even when there's no
2607 if (type == HCI_LK_CHANGED_COMBINATION &&
2608 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2609 type = HCI_LK_COMBINATION;
2611 conn->key_type = type;
2614 bacpy(&key->bdaddr, bdaddr);
2615 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2616 key->pin_len = pin_len;
2618 if (type == HCI_LK_CHANGED_COMBINATION)
2619 key->type = old_key_type;
2624 *persistent = hci_persistent_key(hdev, conn, type,
2630 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2631 u8 addr_type, u8 type, u8 authenticated,
2632 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2634 struct smp_ltk *key, *old_key;
2635 u8 role = ltk_role(type);
2637 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2641 key = kzalloc(sizeof(*key), GFP_KERNEL);
2644 list_add_rcu(&key->list, &hdev->long_term_keys);
2647 bacpy(&key->bdaddr, bdaddr);
2648 key->bdaddr_type = addr_type;
2649 memcpy(key->val, tk, sizeof(key->val));
2650 key->authenticated = authenticated;
2653 key->enc_size = enc_size;
2659 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2660 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2662 struct smp_irk *irk;
2664 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2666 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2670 bacpy(&irk->bdaddr, bdaddr);
2671 irk->addr_type = addr_type;
2673 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2676 memcpy(irk->val, val, 16);
2677 bacpy(&irk->rpa, rpa);
2682 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2684 struct link_key *key;
2686 key = hci_find_link_key(hdev, bdaddr);
2690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2692 list_del_rcu(&key->list);
2693 kfree_rcu(key, rcu);
2698 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2700 struct smp_ltk *k, *tmp;
2703 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2704 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2707 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2709 list_del_rcu(&k->list);
2714 return removed ? 0 : -ENOENT;
2717 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2719 struct smp_irk *k, *tmp;
2721 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2722 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2725 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2727 list_del_rcu(&k->list);
2732 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2735 struct smp_irk *irk;
2738 if (type == BDADDR_BREDR) {
2739 if (hci_find_link_key(hdev, bdaddr))
2744 /* Convert to HCI addr type which struct smp_ltk uses */
2745 if (type == BDADDR_LE_PUBLIC)
2746 addr_type = ADDR_LE_DEV_PUBLIC;
2748 addr_type = ADDR_LE_DEV_RANDOM;
2750 irk = hci_get_irk(hdev, bdaddr, addr_type);
2752 bdaddr = &irk->bdaddr;
2753 addr_type = irk->addr_type;
2757 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2758 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2768 /* HCI command timer function */
2769 static void hci_cmd_timeout(struct work_struct *work)
2771 struct hci_dev *hdev = container_of(work, struct hci_dev,
2774 if (hdev->sent_cmd) {
2775 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2776 u16 opcode = __le16_to_cpu(sent->opcode);
2778 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2780 bt_dev_err(hdev, "command tx timeout");
2783 if (hdev->cmd_timeout)
2784 hdev->cmd_timeout(hdev);
2786 atomic_set(&hdev->cmd_cnt, 1);
2787 queue_work(hdev->workqueue, &hdev->cmd_work);
2790 /* HCI ncmd timer function */
2791 static void hci_ncmd_timeout(struct work_struct *work)
2793 struct hci_dev *hdev = container_of(work, struct hci_dev,
2796 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2798 /* During HCI_INIT phase no events can be injected if the ncmd timer
2799 * triggers since the procedure has its own timeout handling.
2801 if (test_bit(HCI_INIT, &hdev->flags))
2804 /* This is an irrecoverable state, inject hardware error event */
2805 hci_reset_dev(hdev);
2808 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2809 bdaddr_t *bdaddr, u8 bdaddr_type)
2811 struct oob_data *data;
2813 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2814 if (bacmp(bdaddr, &data->bdaddr) != 0)
2816 if (data->bdaddr_type != bdaddr_type)
2824 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2827 struct oob_data *data;
2829 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2833 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2835 list_del(&data->list);
2841 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2843 struct oob_data *data, *n;
2845 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2846 list_del(&data->list);
2851 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2852 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2853 u8 *hash256, u8 *rand256)
2855 struct oob_data *data;
2857 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2859 data = kmalloc(sizeof(*data), GFP_KERNEL);
2863 bacpy(&data->bdaddr, bdaddr);
2864 data->bdaddr_type = bdaddr_type;
2865 list_add(&data->list, &hdev->remote_oob_data);
2868 if (hash192 && rand192) {
2869 memcpy(data->hash192, hash192, sizeof(data->hash192));
2870 memcpy(data->rand192, rand192, sizeof(data->rand192));
2871 if (hash256 && rand256)
2872 data->present = 0x03;
2874 memset(data->hash192, 0, sizeof(data->hash192));
2875 memset(data->rand192, 0, sizeof(data->rand192));
2876 if (hash256 && rand256)
2877 data->present = 0x02;
2879 data->present = 0x00;
2882 if (hash256 && rand256) {
2883 memcpy(data->hash256, hash256, sizeof(data->hash256));
2884 memcpy(data->rand256, rand256, sizeof(data->rand256));
2886 memset(data->hash256, 0, sizeof(data->hash256));
2887 memset(data->rand256, 0, sizeof(data->rand256));
2888 if (hash192 && rand192)
2889 data->present = 0x01;
2892 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2897 /* This function requires the caller holds hdev->lock */
2898 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2900 struct adv_info *adv_instance;
2902 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2903 if (adv_instance->instance == instance)
2904 return adv_instance;
2910 /* This function requires the caller holds hdev->lock */
2911 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2913 struct adv_info *cur_instance;
2915 cur_instance = hci_find_adv_instance(hdev, instance);
2919 if (cur_instance == list_last_entry(&hdev->adv_instances,
2920 struct adv_info, list))
2921 return list_first_entry(&hdev->adv_instances,
2922 struct adv_info, list);
2924 return list_next_entry(cur_instance, list);
2927 /* This function requires the caller holds hdev->lock */
2928 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2930 struct adv_info *adv_instance;
2932 adv_instance = hci_find_adv_instance(hdev, instance);
2936 BT_DBG("%s removing %dMR", hdev->name, instance);
2938 if (hdev->cur_adv_instance == instance) {
2939 if (hdev->adv_instance_timeout) {
2940 cancel_delayed_work(&hdev->adv_instance_expire);
2941 hdev->adv_instance_timeout = 0;
2943 hdev->cur_adv_instance = 0x00;
2946 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2948 list_del(&adv_instance->list);
2949 kfree(adv_instance);
2951 hdev->adv_instance_cnt--;
2956 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2958 struct adv_info *adv_instance, *n;
2960 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2961 adv_instance->rpa_expired = rpa_expired;
2964 /* This function requires the caller holds hdev->lock */
2965 void hci_adv_instances_clear(struct hci_dev *hdev)
2967 struct adv_info *adv_instance, *n;
2969 if (hdev->adv_instance_timeout) {
2970 cancel_delayed_work(&hdev->adv_instance_expire);
2971 hdev->adv_instance_timeout = 0;
2974 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2975 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2976 list_del(&adv_instance->list);
2977 kfree(adv_instance);
2980 hdev->adv_instance_cnt = 0;
2981 hdev->cur_adv_instance = 0x00;
2984 static void adv_instance_rpa_expired(struct work_struct *work)
2986 struct adv_info *adv_instance = container_of(work, struct adv_info,
2987 rpa_expired_cb.work);
2991 adv_instance->rpa_expired = true;
2994 /* This function requires the caller holds hdev->lock */
2995 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2996 u16 adv_data_len, u8 *adv_data,
2997 u16 scan_rsp_len, u8 *scan_rsp_data,
2998 u16 timeout, u16 duration, s8 tx_power,
2999 u32 min_interval, u32 max_interval)
3001 struct adv_info *adv_instance;
3003 adv_instance = hci_find_adv_instance(hdev, instance);
3005 memset(adv_instance->adv_data, 0,
3006 sizeof(adv_instance->adv_data));
3007 memset(adv_instance->scan_rsp_data, 0,
3008 sizeof(adv_instance->scan_rsp_data));
3010 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3011 instance < 1 || instance > hdev->le_num_of_adv_sets)
3014 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3018 adv_instance->pending = true;
3019 adv_instance->instance = instance;
3020 list_add(&adv_instance->list, &hdev->adv_instances);
3021 hdev->adv_instance_cnt++;
3024 adv_instance->flags = flags;
3025 adv_instance->adv_data_len = adv_data_len;
3026 adv_instance->scan_rsp_len = scan_rsp_len;
3027 adv_instance->min_interval = min_interval;
3028 adv_instance->max_interval = max_interval;
3029 adv_instance->tx_power = tx_power;
3032 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3035 memcpy(adv_instance->scan_rsp_data,
3036 scan_rsp_data, scan_rsp_len);
3038 adv_instance->timeout = timeout;
3039 adv_instance->remaining_time = timeout;
3042 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3044 adv_instance->duration = duration;
3046 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3047 adv_instance_rpa_expired);
3049 BT_DBG("%s for %dMR", hdev->name, instance);
3054 /* This function requires the caller holds hdev->lock */
3055 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3056 u16 adv_data_len, u8 *adv_data,
3057 u16 scan_rsp_len, u8 *scan_rsp_data)
3059 struct adv_info *adv_instance;
3061 adv_instance = hci_find_adv_instance(hdev, instance);
3063 /* If advertisement doesn't exist, we can't modify its data */
3068 memset(adv_instance->adv_data, 0,
3069 sizeof(adv_instance->adv_data));
3070 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3071 adv_instance->adv_data_len = adv_data_len;
3075 memset(adv_instance->scan_rsp_data, 0,
3076 sizeof(adv_instance->scan_rsp_data));
3077 memcpy(adv_instance->scan_rsp_data,
3078 scan_rsp_data, scan_rsp_len);
3079 adv_instance->scan_rsp_len = scan_rsp_len;
3085 /* This function requires the caller holds hdev->lock */
3086 void hci_adv_monitors_clear(struct hci_dev *hdev)
3088 struct adv_monitor *monitor;
3091 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3092 hci_free_adv_monitor(hdev, monitor);
3094 idr_destroy(&hdev->adv_monitors_idr);
3097 /* Frees the monitor structure and do some bookkeepings.
3098 * This function requires the caller holds hdev->lock.
3100 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3102 struct adv_pattern *pattern;
3103 struct adv_pattern *tmp;
3108 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3109 list_del(&pattern->list);
3113 if (monitor->handle)
3114 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3116 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3117 hdev->adv_monitors_cnt--;
3118 mgmt_adv_monitor_removed(hdev, monitor->handle);
3124 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3126 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3129 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3131 return mgmt_remove_adv_monitor_complete(hdev, status);
3134 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3135 * also attempts to forward the request to the controller.
3136 * Returns true if request is forwarded (result is pending), false otherwise.
3137 * This function requires the caller holds hdev->lock.
3139 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3142 int min, max, handle;
3151 min = HCI_MIN_ADV_MONITOR_HANDLE;
3152 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3153 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3160 monitor->handle = handle;
3162 if (!hdev_is_powered(hdev))
3165 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3166 case HCI_ADV_MONITOR_EXT_NONE:
3167 hci_update_background_scan(hdev);
3168 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3169 /* Message was not forwarded to controller - not an error */
3171 case HCI_ADV_MONITOR_EXT_MSFT:
3172 *err = msft_add_monitor_pattern(hdev, monitor);
3173 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3181 /* Attempts to tell the controller and free the monitor. If somehow the
3182 * controller doesn't have a corresponding handle, remove anyway.
3183 * Returns true if request is forwarded (result is pending), false otherwise.
3184 * This function requires the caller holds hdev->lock.
3186 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3187 struct adv_monitor *monitor,
3188 u16 handle, int *err)
3192 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3193 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3195 case HCI_ADV_MONITOR_EXT_MSFT:
3196 *err = msft_remove_monitor(hdev, monitor, handle);
3200 /* In case no matching handle registered, just free the monitor */
3201 if (*err == -ENOENT)
3207 if (*err == -ENOENT)
3208 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3210 hci_free_adv_monitor(hdev, monitor);
3216 /* Returns true if request is forwarded (result is pending), false otherwise.
3217 * This function requires the caller holds hdev->lock.
3219 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3221 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3229 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3230 if (!*err && !pending)
3231 hci_update_background_scan(hdev);
3233 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3234 hdev->name, handle, *err, pending ? "" : "not ");
3239 /* Returns true if request is forwarded (result is pending), false otherwise.
3240 * This function requires the caller holds hdev->lock.
3242 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3244 struct adv_monitor *monitor;
3245 int idr_next_id = 0;
3246 bool pending = false;
3247 bool update = false;
3251 while (!*err && !pending) {
3252 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3256 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3258 if (!*err && !pending)
3263 hci_update_background_scan(hdev);
3265 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3266 hdev->name, *err, pending ? "" : "not ");
3271 /* This function requires the caller holds hdev->lock */
3272 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3274 return !idr_is_empty(&hdev->adv_monitors_idr);
3277 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3279 if (msft_monitor_supported(hdev))
3280 return HCI_ADV_MONITOR_EXT_MSFT;
3282 return HCI_ADV_MONITOR_EXT_NONE;
3285 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3286 bdaddr_t *bdaddr, u8 type)
3288 struct bdaddr_list *b;
3290 list_for_each_entry(b, bdaddr_list, list) {
3291 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3298 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3299 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3302 struct bdaddr_list_with_irk *b;
3304 list_for_each_entry(b, bdaddr_list, list) {
3305 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3312 struct bdaddr_list_with_flags *
3313 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3314 bdaddr_t *bdaddr, u8 type)
3316 struct bdaddr_list_with_flags *b;
3318 list_for_each_entry(b, bdaddr_list, list) {
3319 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3326 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3328 struct bdaddr_list *b, *n;
3330 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3336 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3338 struct bdaddr_list *entry;
3340 if (!bacmp(bdaddr, BDADDR_ANY))
3343 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3346 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3350 bacpy(&entry->bdaddr, bdaddr);
3351 entry->bdaddr_type = type;
3353 list_add(&entry->list, list);
3358 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3359 u8 type, u8 *peer_irk, u8 *local_irk)
3361 struct bdaddr_list_with_irk *entry;
3363 if (!bacmp(bdaddr, BDADDR_ANY))
3366 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3369 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3373 bacpy(&entry->bdaddr, bdaddr);
3374 entry->bdaddr_type = type;
3377 memcpy(entry->peer_irk, peer_irk, 16);
3380 memcpy(entry->local_irk, local_irk, 16);
3382 list_add(&entry->list, list);
3387 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3390 struct bdaddr_list_with_flags *entry;
3392 if (!bacmp(bdaddr, BDADDR_ANY))
3395 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3398 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3402 bacpy(&entry->bdaddr, bdaddr);
3403 entry->bdaddr_type = type;
3404 entry->current_flags = flags;
3406 list_add(&entry->list, list);
3411 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3413 struct bdaddr_list *entry;
3415 if (!bacmp(bdaddr, BDADDR_ANY)) {
3416 hci_bdaddr_list_clear(list);
3420 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3424 list_del(&entry->list);
3430 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3433 struct bdaddr_list_with_irk *entry;
3435 if (!bacmp(bdaddr, BDADDR_ANY)) {
3436 hci_bdaddr_list_clear(list);
3440 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3444 list_del(&entry->list);
3450 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3453 struct bdaddr_list_with_flags *entry;
3455 if (!bacmp(bdaddr, BDADDR_ANY)) {
3456 hci_bdaddr_list_clear(list);
3460 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3464 list_del(&entry->list);
3470 /* This function requires the caller holds hdev->lock */
3471 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3472 bdaddr_t *addr, u8 addr_type)
3474 struct hci_conn_params *params;
3476 list_for_each_entry(params, &hdev->le_conn_params, list) {
3477 if (bacmp(¶ms->addr, addr) == 0 &&
3478 params->addr_type == addr_type) {
3486 /* This function requires the caller holds hdev->lock */
3487 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3488 bdaddr_t *addr, u8 addr_type)
3490 struct hci_conn_params *param;
3492 switch (addr_type) {
3493 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3494 addr_type = ADDR_LE_DEV_PUBLIC;
3496 case ADDR_LE_DEV_RANDOM_RESOLVED:
3497 addr_type = ADDR_LE_DEV_RANDOM;
3501 list_for_each_entry(param, list, action) {
3502 if (bacmp(¶m->addr, addr) == 0 &&
3503 param->addr_type == addr_type)
3510 /* This function requires the caller holds hdev->lock */
3511 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3512 bdaddr_t *addr, u8 addr_type)
3514 struct hci_conn_params *params;
3516 params = hci_conn_params_lookup(hdev, addr, addr_type);
3520 params = kzalloc(sizeof(*params), GFP_KERNEL);
3522 bt_dev_err(hdev, "out of memory");
3526 bacpy(¶ms->addr, addr);
3527 params->addr_type = addr_type;
3529 list_add(¶ms->list, &hdev->le_conn_params);
3530 INIT_LIST_HEAD(¶ms->action);
3532 params->conn_min_interval = hdev->le_conn_min_interval;
3533 params->conn_max_interval = hdev->le_conn_max_interval;
3534 params->conn_latency = hdev->le_conn_latency;
3535 params->supervision_timeout = hdev->le_supv_timeout;
3536 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3538 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3543 static void hci_conn_params_free(struct hci_conn_params *params)
3546 hci_conn_drop(params->conn);
3547 hci_conn_put(params->conn);
3550 list_del(¶ms->action);
3551 list_del(¶ms->list);
3555 /* This function requires the caller holds hdev->lock */
3556 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3558 struct hci_conn_params *params;
3560 params = hci_conn_params_lookup(hdev, addr, addr_type);
3564 hci_conn_params_free(params);
3566 hci_update_background_scan(hdev);
3568 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3571 /* This function requires the caller holds hdev->lock */
3572 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3574 struct hci_conn_params *params, *tmp;
3576 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3577 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3580 /* If trying to establish one time connection to disabled
3581 * device, leave the params, but mark them as just once.
3583 if (params->explicit_connect) {
3584 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3588 list_del(¶ms->list);
3592 BT_DBG("All LE disabled connection parameters were removed");
3595 /* This function requires the caller holds hdev->lock */
3596 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3598 struct hci_conn_params *params, *tmp;
3600 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3601 hci_conn_params_free(params);
3603 BT_DBG("All LE connection parameters were removed");
3606 /* Copy the Identity Address of the controller.
3608 * If the controller has a public BD_ADDR, then by default use that one.
3609 * If this is a LE only controller without a public address, default to
3610 * the static random address.
3612 * For debugging purposes it is possible to force controllers with a
3613 * public address to use the static random address instead.
3615 * In case BR/EDR has been disabled on a dual-mode controller and
3616 * userspace has configured a static address, then that address
3617 * becomes the identity address instead of the public BR/EDR address.
3619 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3622 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3623 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3624 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3625 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3626 bacpy(bdaddr, &hdev->static_addr);
3627 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3629 bacpy(bdaddr, &hdev->bdaddr);
3630 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3634 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3638 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3639 clear_bit(i, hdev->suspend_tasks);
3641 wake_up(&hdev->suspend_wait_q);
3644 static int hci_suspend_wait_event(struct hci_dev *hdev)
3647 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3648 __SUSPEND_NUM_TASKS)
3651 int ret = wait_event_timeout(hdev->suspend_wait_q,
3652 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3655 bt_dev_err(hdev, "Timed out waiting for suspend events");
3656 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3657 if (test_bit(i, hdev->suspend_tasks))
3658 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3659 clear_bit(i, hdev->suspend_tasks);
3670 static void hci_prepare_suspend(struct work_struct *work)
3672 struct hci_dev *hdev =
3673 container_of(work, struct hci_dev, suspend_prepare);
3676 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3677 hci_dev_unlock(hdev);
3680 static int hci_change_suspend_state(struct hci_dev *hdev,
3681 enum suspended_state next)
3683 hdev->suspend_state_next = next;
3684 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3685 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3686 return hci_suspend_wait_event(hdev);
3689 static void hci_clear_wake_reason(struct hci_dev *hdev)
3693 hdev->wake_reason = 0;
3694 bacpy(&hdev->wake_addr, BDADDR_ANY);
3695 hdev->wake_addr_type = 0;
3697 hci_dev_unlock(hdev);
3700 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3703 struct hci_dev *hdev =
3704 container_of(nb, struct hci_dev, suspend_notifier);
3706 u8 state = BT_RUNNING;
3708 /* If powering down, wait for completion. */
3709 if (mgmt_powering_down(hdev)) {
3710 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3711 ret = hci_suspend_wait_event(hdev);
3716 /* Suspend notifier should only act on events when powered. */
3717 if (!hdev_is_powered(hdev) ||
3718 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3721 if (action == PM_SUSPEND_PREPARE) {
3722 /* Suspend consists of two actions:
3723 * - First, disconnect everything and make the controller not
3724 * connectable (disabling scanning)
3725 * - Second, program event filter/accept list and enable scan
3727 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3729 state = BT_SUSPEND_DISCONNECT;
3731 /* Only configure accept list if disconnect succeeded and wake
3732 * isn't being prevented.
3734 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3735 ret = hci_change_suspend_state(hdev,
3736 BT_SUSPEND_CONFIGURE_WAKE);
3738 state = BT_SUSPEND_CONFIGURE_WAKE;
3741 hci_clear_wake_reason(hdev);
3742 mgmt_suspending(hdev, state);
3744 } else if (action == PM_POST_SUSPEND) {
3745 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3747 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3748 hdev->wake_addr_type);
3752 /* We always allow suspend even if suspend preparation failed and
3753 * attempt to recover in resume.
3756 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3762 /* Alloc HCI device */
3763 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3765 struct hci_dev *hdev;
3766 unsigned int alloc_size;
3768 alloc_size = sizeof(*hdev);
3770 /* Fixme: May need ALIGN-ment? */
3771 alloc_size += sizeof_priv;
3774 hdev = kzalloc(alloc_size, GFP_KERNEL);
3778 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3779 hdev->esco_type = (ESCO_HV1);
3780 hdev->link_mode = (HCI_LM_ACCEPT);
3781 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3782 hdev->io_capability = 0x03; /* No Input No Output */
3783 hdev->manufacturer = 0xffff; /* Default to internal use */
3784 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3785 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3786 hdev->adv_instance_cnt = 0;
3787 hdev->cur_adv_instance = 0x00;
3788 hdev->adv_instance_timeout = 0;
3790 hdev->advmon_allowlist_duration = 300;
3791 hdev->advmon_no_filter_duration = 500;
3792 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
3794 hdev->sniff_max_interval = 800;
3795 hdev->sniff_min_interval = 80;
3797 hdev->le_adv_channel_map = 0x07;
3798 hdev->le_adv_min_interval = 0x0800;
3799 hdev->le_adv_max_interval = 0x0800;
3800 hdev->le_scan_interval = 0x0060;
3801 hdev->le_scan_window = 0x0030;
3802 hdev->le_scan_int_suspend = 0x0400;
3803 hdev->le_scan_window_suspend = 0x0012;
3804 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3805 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3806 hdev->le_scan_int_adv_monitor = 0x0060;
3807 hdev->le_scan_window_adv_monitor = 0x0030;
3808 hdev->le_scan_int_connect = 0x0060;
3809 hdev->le_scan_window_connect = 0x0060;
3810 hdev->le_conn_min_interval = 0x0018;
3811 hdev->le_conn_max_interval = 0x0028;
3812 hdev->le_conn_latency = 0x0000;
3813 hdev->le_supv_timeout = 0x002a;
3814 hdev->le_def_tx_len = 0x001b;
3815 hdev->le_def_tx_time = 0x0148;
3816 hdev->le_max_tx_len = 0x001b;
3817 hdev->le_max_tx_time = 0x0148;
3818 hdev->le_max_rx_len = 0x001b;
3819 hdev->le_max_rx_time = 0x0148;
3820 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3821 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3822 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3823 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3824 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3825 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3826 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3827 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3828 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3830 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3831 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3832 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3833 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3834 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3835 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3837 /* default 1.28 sec page scan */
3838 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3839 hdev->def_page_scan_int = 0x0800;
3840 hdev->def_page_scan_window = 0x0012;
3842 mutex_init(&hdev->lock);
3843 mutex_init(&hdev->req_lock);
3845 INIT_LIST_HEAD(&hdev->mgmt_pending);
3846 INIT_LIST_HEAD(&hdev->reject_list);
3847 INIT_LIST_HEAD(&hdev->accept_list);
3848 INIT_LIST_HEAD(&hdev->uuids);
3849 INIT_LIST_HEAD(&hdev->link_keys);
3850 INIT_LIST_HEAD(&hdev->long_term_keys);
3851 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3852 INIT_LIST_HEAD(&hdev->remote_oob_data);
3853 INIT_LIST_HEAD(&hdev->le_accept_list);
3854 INIT_LIST_HEAD(&hdev->le_resolv_list);
3855 INIT_LIST_HEAD(&hdev->le_conn_params);
3856 INIT_LIST_HEAD(&hdev->pend_le_conns);
3857 INIT_LIST_HEAD(&hdev->pend_le_reports);
3858 INIT_LIST_HEAD(&hdev->conn_hash.list);
3859 INIT_LIST_HEAD(&hdev->adv_instances);
3860 INIT_LIST_HEAD(&hdev->blocked_keys);
3862 INIT_WORK(&hdev->rx_work, hci_rx_work);
3863 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3864 INIT_WORK(&hdev->tx_work, hci_tx_work);
3865 INIT_WORK(&hdev->power_on, hci_power_on);
3866 INIT_WORK(&hdev->error_reset, hci_error_reset);
3867 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3869 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3871 skb_queue_head_init(&hdev->rx_q);
3872 skb_queue_head_init(&hdev->cmd_q);
3873 skb_queue_head_init(&hdev->raw_q);
3875 init_waitqueue_head(&hdev->req_wait_q);
3876 init_waitqueue_head(&hdev->suspend_wait_q);
3878 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3879 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3881 hci_request_setup(hdev);
3883 hci_init_sysfs(hdev);
3884 discovery_init(hdev);
3888 EXPORT_SYMBOL(hci_alloc_dev_priv);
3890 /* Free HCI device */
3891 void hci_free_dev(struct hci_dev *hdev)
3893 /* will free via device release */
3894 put_device(&hdev->dev);
3896 EXPORT_SYMBOL(hci_free_dev);
3898 /* Register HCI device */
3899 int hci_register_dev(struct hci_dev *hdev)
3903 if (!hdev->open || !hdev->close || !hdev->send)
3906 /* Do not allow HCI_AMP devices to register at index 0,
3907 * so the index can be used as the AMP controller ID.
3909 switch (hdev->dev_type) {
3911 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3914 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3923 error = dev_set_name(&hdev->dev, "hci%u", id);
3927 hdev->name = dev_name(&hdev->dev);
3930 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3932 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3933 if (!hdev->workqueue) {
3938 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3940 if (!hdev->req_workqueue) {
3941 destroy_workqueue(hdev->workqueue);
3946 if (!IS_ERR_OR_NULL(bt_debugfs))
3947 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3949 error = device_add(&hdev->dev);
3953 hci_leds_init(hdev);
3955 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3956 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3959 if (rfkill_register(hdev->rfkill) < 0) {
3960 rfkill_destroy(hdev->rfkill);
3961 hdev->rfkill = NULL;
3965 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3966 hci_dev_set_flag(hdev, HCI_RFKILLED);
3968 hci_dev_set_flag(hdev, HCI_SETUP);
3969 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3971 if (hdev->dev_type == HCI_PRIMARY) {
3972 /* Assume BR/EDR support until proven otherwise (such as
3973 * through reading supported features during init.
3975 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3978 write_lock(&hci_dev_list_lock);
3979 list_add(&hdev->list, &hci_dev_list);
3980 write_unlock(&hci_dev_list_lock);
3982 /* Devices that are marked for raw-only usage are unconfigured
3983 * and should not be included in normal operation.
3985 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3986 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3988 hci_sock_dev_event(hdev, HCI_DEV_REG);
3991 if (!hdev->suspend_notifier.notifier_call &&
3992 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
3993 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
3994 error = register_pm_notifier(&hdev->suspend_notifier);
3999 queue_work(hdev->req_workqueue, &hdev->power_on);
4001 idr_init(&hdev->adv_monitors_idr);
4006 debugfs_remove_recursive(hdev->debugfs);
4007 destroy_workqueue(hdev->workqueue);
4008 destroy_workqueue(hdev->req_workqueue);
4010 ida_simple_remove(&hci_index_ida, hdev->id);
4014 EXPORT_SYMBOL(hci_register_dev);
4016 /* Unregister HCI device */
4017 void hci_unregister_dev(struct hci_dev *hdev)
4019 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4021 hci_dev_set_flag(hdev, HCI_UNREGISTER);
4023 write_lock(&hci_dev_list_lock);
4024 list_del(&hdev->list);
4025 write_unlock(&hci_dev_list_lock);
4027 cancel_work_sync(&hdev->power_on);
4029 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4030 hci_suspend_clear_tasks(hdev);
4031 unregister_pm_notifier(&hdev->suspend_notifier);
4032 cancel_work_sync(&hdev->suspend_prepare);
4035 hci_dev_do_close(hdev);
4037 if (!test_bit(HCI_INIT, &hdev->flags) &&
4038 !hci_dev_test_flag(hdev, HCI_SETUP) &&
4039 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4041 mgmt_index_removed(hdev);
4042 hci_dev_unlock(hdev);
4045 /* mgmt_index_removed should take care of emptying the
4047 BUG_ON(!list_empty(&hdev->mgmt_pending));
4049 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4052 rfkill_unregister(hdev->rfkill);
4053 rfkill_destroy(hdev->rfkill);
4056 device_del(&hdev->dev);
4057 /* Actual cleanup is deferred until hci_release_dev(). */
4060 EXPORT_SYMBOL(hci_unregister_dev);
4062 /* Release HCI device */
4063 void hci_release_dev(struct hci_dev *hdev)
4065 debugfs_remove_recursive(hdev->debugfs);
4066 kfree_const(hdev->hw_info);
4067 kfree_const(hdev->fw_info);
4069 destroy_workqueue(hdev->workqueue);
4070 destroy_workqueue(hdev->req_workqueue);
4073 hci_bdaddr_list_clear(&hdev->reject_list);
4074 hci_bdaddr_list_clear(&hdev->accept_list);
4075 hci_uuids_clear(hdev);
4076 hci_link_keys_clear(hdev);
4077 hci_smp_ltks_clear(hdev);
4078 hci_smp_irks_clear(hdev);
4079 hci_remote_oob_data_clear(hdev);
4080 hci_adv_instances_clear(hdev);
4081 hci_adv_monitors_clear(hdev);
4082 hci_bdaddr_list_clear(&hdev->le_accept_list);
4083 hci_bdaddr_list_clear(&hdev->le_resolv_list);
4084 hci_conn_params_clear_all(hdev);
4085 hci_discovery_filter_clear(hdev);
4086 hci_blocked_keys_clear(hdev);
4087 hci_dev_unlock(hdev);
4089 ida_simple_remove(&hci_index_ida, hdev->id);
4090 kfree_skb(hdev->sent_cmd);
4093 EXPORT_SYMBOL(hci_release_dev);
4095 /* Suspend HCI device */
4096 int hci_suspend_dev(struct hci_dev *hdev)
4098 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4101 EXPORT_SYMBOL(hci_suspend_dev);
4103 /* Resume HCI device */
4104 int hci_resume_dev(struct hci_dev *hdev)
4106 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4109 EXPORT_SYMBOL(hci_resume_dev);
4111 /* Reset HCI device */
4112 int hci_reset_dev(struct hci_dev *hdev)
4114 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4115 struct sk_buff *skb;
4117 skb = bt_skb_alloc(3, GFP_ATOMIC);
4121 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4122 skb_put_data(skb, hw_err, 3);
4124 bt_dev_err(hdev, "Injecting HCI hardware error event");
4126 /* Send Hardware Error to upper stack */
4127 return hci_recv_frame(hdev, skb);
4129 EXPORT_SYMBOL(hci_reset_dev);
4131 /* Receive frame from HCI drivers */
4132 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4134 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4135 && !test_bit(HCI_INIT, &hdev->flags))) {
4140 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4141 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4142 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4143 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4149 bt_cb(skb)->incoming = 1;
4152 __net_timestamp(skb);
4154 skb_queue_tail(&hdev->rx_q, skb);
4155 queue_work(hdev->workqueue, &hdev->rx_work);
4159 EXPORT_SYMBOL(hci_recv_frame);
4161 /* Receive diagnostic message from HCI drivers */
4162 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4164 /* Mark as diagnostic packet */
4165 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4168 __net_timestamp(skb);
4170 skb_queue_tail(&hdev->rx_q, skb);
4171 queue_work(hdev->workqueue, &hdev->rx_work);
4175 EXPORT_SYMBOL(hci_recv_diag);
4177 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4181 va_start(vargs, fmt);
4182 kfree_const(hdev->hw_info);
4183 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4186 EXPORT_SYMBOL(hci_set_hw_info);
4188 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4192 va_start(vargs, fmt);
4193 kfree_const(hdev->fw_info);
4194 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4197 EXPORT_SYMBOL(hci_set_fw_info);
4199 /* ---- Interface to upper protocols ---- */
4201 int hci_register_cb(struct hci_cb *cb)
4203 BT_DBG("%p name %s", cb, cb->name);
4205 mutex_lock(&hci_cb_list_lock);
4206 list_add_tail(&cb->list, &hci_cb_list);
4207 mutex_unlock(&hci_cb_list_lock);
4211 EXPORT_SYMBOL(hci_register_cb);
4213 int hci_unregister_cb(struct hci_cb *cb)
4215 BT_DBG("%p name %s", cb, cb->name);
4217 mutex_lock(&hci_cb_list_lock);
4218 list_del(&cb->list);
4219 mutex_unlock(&hci_cb_list_lock);
4223 EXPORT_SYMBOL(hci_unregister_cb);
4225 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4229 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4233 __net_timestamp(skb);
4235 /* Send copy to monitor */
4236 hci_send_to_monitor(hdev, skb);
4238 if (atomic_read(&hdev->promisc)) {
4239 /* Send copy to the sockets */
4240 hci_send_to_sock(hdev, skb);
4243 /* Get rid of skb owner, prior to sending to the driver. */
4246 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4251 err = hdev->send(hdev, skb);
4253 bt_dev_err(hdev, "sending frame failed (%d)", err);
4258 /* Send HCI command */
4259 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4262 struct sk_buff *skb;
4264 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4266 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4268 bt_dev_err(hdev, "no memory for command");
4272 /* Stand-alone HCI commands must be flagged as
4273 * single-command requests.
4275 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4277 skb_queue_tail(&hdev->cmd_q, skb);
4278 queue_work(hdev->workqueue, &hdev->cmd_work);
4283 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4286 struct sk_buff *skb;
4288 if (hci_opcode_ogf(opcode) != 0x3f) {
4289 /* A controller receiving a command shall respond with either
4290 * a Command Status Event or a Command Complete Event.
4291 * Therefore, all standard HCI commands must be sent via the
4292 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4293 * Some vendors do not comply with this rule for vendor-specific
4294 * commands and do not return any event. We want to support
4295 * unresponded commands for such cases only.
4297 bt_dev_err(hdev, "unresponded command not supported");
4301 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4303 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4308 hci_send_frame(hdev, skb);
4312 EXPORT_SYMBOL(__hci_cmd_send);
4314 /* Get data from the previously sent command */
4315 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4317 struct hci_command_hdr *hdr;
4319 if (!hdev->sent_cmd)
4322 hdr = (void *) hdev->sent_cmd->data;
4324 if (hdr->opcode != cpu_to_le16(opcode))
4327 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4329 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4332 /* Send HCI command and wait for command complete event */
4333 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4334 const void *param, u32 timeout)
4336 struct sk_buff *skb;
4338 if (!test_bit(HCI_UP, &hdev->flags))
4339 return ERR_PTR(-ENETDOWN);
4341 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4343 hci_req_sync_lock(hdev);
4344 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4345 hci_req_sync_unlock(hdev);
4349 EXPORT_SYMBOL(hci_cmd_sync);
4352 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4354 struct hci_acl_hdr *hdr;
4357 skb_push(skb, HCI_ACL_HDR_SIZE);
4358 skb_reset_transport_header(skb);
4359 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4360 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4361 hdr->dlen = cpu_to_le16(len);
4364 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4365 struct sk_buff *skb, __u16 flags)
4367 struct hci_conn *conn = chan->conn;
4368 struct hci_dev *hdev = conn->hdev;
4369 struct sk_buff *list;
4371 skb->len = skb_headlen(skb);
4374 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4376 switch (hdev->dev_type) {
4378 hci_add_acl_hdr(skb, conn->handle, flags);
4381 hci_add_acl_hdr(skb, chan->handle, flags);
4384 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4388 list = skb_shinfo(skb)->frag_list;
4390 /* Non fragmented */
4391 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4393 skb_queue_tail(queue, skb);
4396 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4398 skb_shinfo(skb)->frag_list = NULL;
4400 /* Queue all fragments atomically. We need to use spin_lock_bh
4401 * here because of 6LoWPAN links, as there this function is
4402 * called from softirq and using normal spin lock could cause
4405 spin_lock_bh(&queue->lock);
4407 __skb_queue_tail(queue, skb);
4409 flags &= ~ACL_START;
4412 skb = list; list = list->next;
4414 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4415 hci_add_acl_hdr(skb, conn->handle, flags);
4417 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4419 __skb_queue_tail(queue, skb);
4422 spin_unlock_bh(&queue->lock);
4426 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4428 struct hci_dev *hdev = chan->conn->hdev;
4430 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4432 hci_queue_acl(chan, &chan->data_q, skb, flags);
4434 queue_work(hdev->workqueue, &hdev->tx_work);
4438 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4440 struct hci_dev *hdev = conn->hdev;
4441 struct hci_sco_hdr hdr;
4443 BT_DBG("%s len %d", hdev->name, skb->len);
4445 hdr.handle = cpu_to_le16(conn->handle);
4446 hdr.dlen = skb->len;
4448 skb_push(skb, HCI_SCO_HDR_SIZE);
4449 skb_reset_transport_header(skb);
4450 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4452 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4454 skb_queue_tail(&conn->data_q, skb);
4455 queue_work(hdev->workqueue, &hdev->tx_work);
4458 /* ---- HCI TX task (outgoing data) ---- */
4460 /* HCI Connection scheduler */
4461 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4464 struct hci_conn_hash *h = &hdev->conn_hash;
4465 struct hci_conn *conn = NULL, *c;
4466 unsigned int num = 0, min = ~0;
4468 /* We don't have to lock device here. Connections are always
4469 * added and removed with TX task disabled. */
4473 list_for_each_entry_rcu(c, &h->list, list) {
4474 if (c->type != type || skb_queue_empty(&c->data_q))
4477 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4482 if (c->sent < min) {
4487 if (hci_conn_num(hdev, type) == num)
4496 switch (conn->type) {
4498 cnt = hdev->acl_cnt;
4502 cnt = hdev->sco_cnt;
4505 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4509 bt_dev_err(hdev, "unknown link type %d", conn->type);
4517 BT_DBG("conn %p quote %d", conn, *quote);
4521 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4523 struct hci_conn_hash *h = &hdev->conn_hash;
4526 bt_dev_err(hdev, "link tx timeout");
4530 /* Kill stalled connections */
4531 list_for_each_entry_rcu(c, &h->list, list) {
4532 if (c->type == type && c->sent) {
4533 bt_dev_err(hdev, "killing stalled connection %pMR",
4535 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4542 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4545 struct hci_conn_hash *h = &hdev->conn_hash;
4546 struct hci_chan *chan = NULL;
4547 unsigned int num = 0, min = ~0, cur_prio = 0;
4548 struct hci_conn *conn;
4549 int cnt, q, conn_num = 0;
4551 BT_DBG("%s", hdev->name);
4555 list_for_each_entry_rcu(conn, &h->list, list) {
4556 struct hci_chan *tmp;
4558 if (conn->type != type)
4561 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4566 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4567 struct sk_buff *skb;
4569 if (skb_queue_empty(&tmp->data_q))
4572 skb = skb_peek(&tmp->data_q);
4573 if (skb->priority < cur_prio)
4576 if (skb->priority > cur_prio) {
4579 cur_prio = skb->priority;
4584 if (conn->sent < min) {
4590 if (hci_conn_num(hdev, type) == conn_num)
4599 switch (chan->conn->type) {
4601 cnt = hdev->acl_cnt;
4604 cnt = hdev->block_cnt;
4608 cnt = hdev->sco_cnt;
4611 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4615 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4620 BT_DBG("chan %p quote %d", chan, *quote);
4624 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4626 struct hci_conn_hash *h = &hdev->conn_hash;
4627 struct hci_conn *conn;
4630 BT_DBG("%s", hdev->name);
4634 list_for_each_entry_rcu(conn, &h->list, list) {
4635 struct hci_chan *chan;
4637 if (conn->type != type)
4640 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4645 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4646 struct sk_buff *skb;
4653 if (skb_queue_empty(&chan->data_q))
4656 skb = skb_peek(&chan->data_q);
4657 if (skb->priority >= HCI_PRIO_MAX - 1)
4660 skb->priority = HCI_PRIO_MAX - 1;
4662 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4666 if (hci_conn_num(hdev, type) == num)
4674 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4676 /* Calculate count of blocks used by this packet */
4677 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4680 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4682 unsigned long last_tx;
4684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4689 last_tx = hdev->le_last_tx;
4692 last_tx = hdev->acl_last_tx;
4696 /* tx timeout must be longer than maximum link supervision timeout
4699 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4700 hci_link_tx_to(hdev, type);
4704 static void hci_sched_sco(struct hci_dev *hdev)
4706 struct hci_conn *conn;
4707 struct sk_buff *skb;
4710 BT_DBG("%s", hdev->name);
4712 if (!hci_conn_num(hdev, SCO_LINK))
4715 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4716 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4717 BT_DBG("skb %p len %d", skb, skb->len);
4718 hci_send_frame(hdev, skb);
4721 if (conn->sent == ~0)
4727 static void hci_sched_esco(struct hci_dev *hdev)
4729 struct hci_conn *conn;
4730 struct sk_buff *skb;
4733 BT_DBG("%s", hdev->name);
4735 if (!hci_conn_num(hdev, ESCO_LINK))
4738 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4740 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4741 BT_DBG("skb %p len %d", skb, skb->len);
4742 hci_send_frame(hdev, skb);
4745 if (conn->sent == ~0)
4751 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4753 unsigned int cnt = hdev->acl_cnt;
4754 struct hci_chan *chan;
4755 struct sk_buff *skb;
4758 __check_timeout(hdev, cnt, ACL_LINK);
4760 while (hdev->acl_cnt &&
4761 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4762 u32 priority = (skb_peek(&chan->data_q))->priority;
4763 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4764 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4765 skb->len, skb->priority);
4767 /* Stop if priority has changed */
4768 if (skb->priority < priority)
4771 skb = skb_dequeue(&chan->data_q);
4773 hci_conn_enter_active_mode(chan->conn,
4774 bt_cb(skb)->force_active);
4776 hci_send_frame(hdev, skb);
4777 hdev->acl_last_tx = jiffies;
4783 /* Send pending SCO packets right away */
4784 hci_sched_sco(hdev);
4785 hci_sched_esco(hdev);
4789 if (cnt != hdev->acl_cnt)
4790 hci_prio_recalculate(hdev, ACL_LINK);
4793 static void hci_sched_acl_blk(struct hci_dev *hdev)
4795 unsigned int cnt = hdev->block_cnt;
4796 struct hci_chan *chan;
4797 struct sk_buff *skb;
4801 BT_DBG("%s", hdev->name);
4803 if (hdev->dev_type == HCI_AMP)
4808 __check_timeout(hdev, cnt, type);
4810 while (hdev->block_cnt > 0 &&
4811 (chan = hci_chan_sent(hdev, type, "e))) {
4812 u32 priority = (skb_peek(&chan->data_q))->priority;
4813 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4816 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4817 skb->len, skb->priority);
4819 /* Stop if priority has changed */
4820 if (skb->priority < priority)
4823 skb = skb_dequeue(&chan->data_q);
4825 blocks = __get_blocks(hdev, skb);
4826 if (blocks > hdev->block_cnt)
4829 hci_conn_enter_active_mode(chan->conn,
4830 bt_cb(skb)->force_active);
4832 hci_send_frame(hdev, skb);
4833 hdev->acl_last_tx = jiffies;
4835 hdev->block_cnt -= blocks;
4838 chan->sent += blocks;
4839 chan->conn->sent += blocks;
4843 if (cnt != hdev->block_cnt)
4844 hci_prio_recalculate(hdev, type);
4847 static void hci_sched_acl(struct hci_dev *hdev)
4849 BT_DBG("%s", hdev->name);
4851 /* No ACL link over BR/EDR controller */
4852 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4855 /* No AMP link over AMP controller */
4856 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4859 switch (hdev->flow_ctl_mode) {
4860 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4861 hci_sched_acl_pkt(hdev);
4864 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4865 hci_sched_acl_blk(hdev);
4870 static void hci_sched_le(struct hci_dev *hdev)
4872 struct hci_chan *chan;
4873 struct sk_buff *skb;
4874 int quote, cnt, tmp;
4876 BT_DBG("%s", hdev->name);
4878 if (!hci_conn_num(hdev, LE_LINK))
4881 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4883 __check_timeout(hdev, cnt, LE_LINK);
4886 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4887 u32 priority = (skb_peek(&chan->data_q))->priority;
4888 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4889 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4890 skb->len, skb->priority);
4892 /* Stop if priority has changed */
4893 if (skb->priority < priority)
4896 skb = skb_dequeue(&chan->data_q);
4898 hci_send_frame(hdev, skb);
4899 hdev->le_last_tx = jiffies;
4905 /* Send pending SCO packets right away */
4906 hci_sched_sco(hdev);
4907 hci_sched_esco(hdev);
4914 hdev->acl_cnt = cnt;
4917 hci_prio_recalculate(hdev, LE_LINK);
4920 static void hci_tx_work(struct work_struct *work)
4922 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4923 struct sk_buff *skb;
4925 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4926 hdev->sco_cnt, hdev->le_cnt);
4928 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4929 /* Schedule queues and send stuff to HCI driver */
4930 hci_sched_sco(hdev);
4931 hci_sched_esco(hdev);
4932 hci_sched_acl(hdev);
4936 /* Send next queued raw (unknown type) packet */
4937 while ((skb = skb_dequeue(&hdev->raw_q)))
4938 hci_send_frame(hdev, skb);
4941 /* ----- HCI RX task (incoming data processing) ----- */
4943 /* ACL data packet */
4944 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4946 struct hci_acl_hdr *hdr = (void *) skb->data;
4947 struct hci_conn *conn;
4948 __u16 handle, flags;
4950 skb_pull(skb, HCI_ACL_HDR_SIZE);
4952 handle = __le16_to_cpu(hdr->handle);
4953 flags = hci_flags(handle);
4954 handle = hci_handle(handle);
4956 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4959 hdev->stat.acl_rx++;
4962 conn = hci_conn_hash_lookup_handle(hdev, handle);
4963 hci_dev_unlock(hdev);
4966 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4968 /* Send to upper protocol */
4969 l2cap_recv_acldata(conn, skb, flags);
4972 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4979 /* SCO data packet */
4980 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4982 struct hci_sco_hdr *hdr = (void *) skb->data;
4983 struct hci_conn *conn;
4984 __u16 handle, flags;
4986 skb_pull(skb, HCI_SCO_HDR_SIZE);
4988 handle = __le16_to_cpu(hdr->handle);
4989 flags = hci_flags(handle);
4990 handle = hci_handle(handle);
4992 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4995 hdev->stat.sco_rx++;
4998 conn = hci_conn_hash_lookup_handle(hdev, handle);
4999 hci_dev_unlock(hdev);
5002 /* Send to upper protocol */
5003 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5004 sco_recv_scodata(conn, skb);
5007 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5014 static bool hci_req_is_complete(struct hci_dev *hdev)
5016 struct sk_buff *skb;
5018 skb = skb_peek(&hdev->cmd_q);
5022 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5025 static void hci_resend_last(struct hci_dev *hdev)
5027 struct hci_command_hdr *sent;
5028 struct sk_buff *skb;
5031 if (!hdev->sent_cmd)
5034 sent = (void *) hdev->sent_cmd->data;
5035 opcode = __le16_to_cpu(sent->opcode);
5036 if (opcode == HCI_OP_RESET)
5039 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5043 skb_queue_head(&hdev->cmd_q, skb);
5044 queue_work(hdev->workqueue, &hdev->cmd_work);
5047 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5048 hci_req_complete_t *req_complete,
5049 hci_req_complete_skb_t *req_complete_skb)
5051 struct sk_buff *skb;
5052 unsigned long flags;
5054 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5056 /* If the completed command doesn't match the last one that was
5057 * sent we need to do special handling of it.
5059 if (!hci_sent_cmd_data(hdev, opcode)) {
5060 /* Some CSR based controllers generate a spontaneous
5061 * reset complete event during init and any pending
5062 * command will never be completed. In such a case we
5063 * need to resend whatever was the last sent
5066 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5067 hci_resend_last(hdev);
5072 /* If we reach this point this event matches the last command sent */
5073 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5075 /* If the command succeeded and there's still more commands in
5076 * this request the request is not yet complete.
5078 if (!status && !hci_req_is_complete(hdev))
5081 /* If this was the last command in a request the complete
5082 * callback would be found in hdev->sent_cmd instead of the
5083 * command queue (hdev->cmd_q).
5085 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5086 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5090 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5091 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5095 /* Remove all pending commands belonging to this request */
5096 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5097 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5098 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5099 __skb_queue_head(&hdev->cmd_q, skb);
5103 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5104 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5106 *req_complete = bt_cb(skb)->hci.req_complete;
5107 dev_kfree_skb_irq(skb);
5109 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5112 static void hci_rx_work(struct work_struct *work)
5114 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5115 struct sk_buff *skb;
5117 BT_DBG("%s", hdev->name);
5119 while ((skb = skb_dequeue(&hdev->rx_q))) {
5120 /* Send copy to monitor */
5121 hci_send_to_monitor(hdev, skb);
5123 if (atomic_read(&hdev->promisc)) {
5124 /* Send copy to the sockets */
5125 hci_send_to_sock(hdev, skb);
5128 /* If the device has been opened in HCI_USER_CHANNEL,
5129 * the userspace has exclusive access to device.
5130 * When device is HCI_INIT, we still need to process
5131 * the data packets to the driver in order
5132 * to complete its setup().
5134 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5135 !test_bit(HCI_INIT, &hdev->flags)) {
5140 if (test_bit(HCI_INIT, &hdev->flags)) {
5141 /* Don't process data packets in this states. */
5142 switch (hci_skb_pkt_type(skb)) {
5143 case HCI_ACLDATA_PKT:
5144 case HCI_SCODATA_PKT:
5145 case HCI_ISODATA_PKT:
5152 switch (hci_skb_pkt_type(skb)) {
5154 BT_DBG("%s Event packet", hdev->name);
5155 hci_event_packet(hdev, skb);
5158 case HCI_ACLDATA_PKT:
5159 BT_DBG("%s ACL data packet", hdev->name);
5160 hci_acldata_packet(hdev, skb);
5163 case HCI_SCODATA_PKT:
5164 BT_DBG("%s SCO data packet", hdev->name);
5165 hci_scodata_packet(hdev, skb);
5175 static void hci_cmd_work(struct work_struct *work)
5177 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5178 struct sk_buff *skb;
5180 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5181 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5183 /* Send queued commands */
5184 if (atomic_read(&hdev->cmd_cnt)) {
5185 skb = skb_dequeue(&hdev->cmd_q);
5189 kfree_skb(hdev->sent_cmd);
5191 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5192 if (hdev->sent_cmd) {
5193 if (hci_req_status_pend(hdev))
5194 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5195 atomic_dec(&hdev->cmd_cnt);
5196 hci_send_frame(hdev, skb);
5197 if (test_bit(HCI_RESET, &hdev->flags))
5198 cancel_delayed_work(&hdev->cmd_timer);
5200 schedule_delayed_work(&hdev->cmd_timer,
5203 skb_queue_head(&hdev->cmd_q, skb);
5204 queue_work(hdev->workqueue, &hdev->cmd_work);