2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 /* Handle HCI Event packets */
46 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
49 __u8 status = *((__u8 *) skb->data);
51 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 /* It is possible that we receive Inquiry Complete event right
54 * before we receive Inquiry Cancel Command Complete event, in
55 * which case the latter event should have status of Command
56 * Disallowed (0x0c). This should not be treated as error, since
57 * we actually achieve what Inquiry Cancel wants to achieve,
58 * which is to end the last Inquiry session.
60 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
61 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
70 clear_bit(HCI_INQUIRY, &hdev->flags);
71 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
72 wake_up_bit(&hdev->flags, HCI_INQUIRY);
75 /* Set discovery state to stopped if we're not doing LE active
78 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
79 hdev->le_scan_type != LE_SCAN_ACTIVE)
80 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
83 hci_conn_check_pending(hdev);
86 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
88 __u8 status = *((__u8 *) skb->data);
90 BT_DBG("%s status 0x%2.2x", hdev->name, status);
95 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
98 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
100 __u8 status = *((__u8 *) skb->data);
102 BT_DBG("%s status 0x%2.2x", hdev->name, status);
107 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
109 hci_conn_check_pending(hdev);
112 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
115 BT_DBG("%s", hdev->name);
118 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
120 struct hci_rp_role_discovery *rp = (void *) skb->data;
121 struct hci_conn *conn;
123 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
132 conn->role = rp->role;
134 hci_dev_unlock(hdev);
137 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
139 struct hci_rp_read_link_policy *rp = (void *) skb->data;
140 struct hci_conn *conn;
142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 conn->link_policy = __le16_to_cpu(rp->policy);
153 hci_dev_unlock(hdev);
156 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
158 struct hci_rp_write_link_policy *rp = (void *) skb->data;
159 struct hci_conn *conn;
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
167 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
173 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 conn->link_policy = get_unaligned_le16(sent + 2);
177 hci_dev_unlock(hdev);
180 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
183 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
185 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
190 hdev->link_policy = __le16_to_cpu(rp->policy);
193 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
196 __u8 status = *((__u8 *) skb->data);
199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
208 hdev->link_policy = get_unaligned_le16(sent);
211 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
213 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
217 clear_bit(HCI_RESET, &hdev->flags);
222 /* Reset all non-persistent flags */
223 hci_dev_clear_volatile_flags(hdev);
225 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
227 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
228 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
230 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
231 hdev->adv_data_len = 0;
233 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
234 hdev->scan_rsp_data_len = 0;
236 hdev->le_scan_type = LE_SCAN_PASSIVE;
238 hdev->ssp_debug_mode = 0;
240 hci_bdaddr_list_clear(&hdev->le_white_list);
243 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
246 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
247 struct hci_cp_read_stored_link_key *sent;
249 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
255 if (!rp->status && sent->read_all == 0x01) {
256 hdev->stored_max_keys = rp->max_keys;
257 hdev->stored_num_keys = rp->num_keys;
261 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
264 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
271 if (rp->num_keys <= hdev->stored_num_keys)
272 hdev->stored_num_keys -= rp->num_keys;
274 hdev->stored_num_keys = 0;
277 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
279 __u8 status = *((__u8 *) skb->data);
282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
290 if (hci_dev_test_flag(hdev, HCI_MGMT))
291 mgmt_set_local_name_complete(hdev, sent, status);
293 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
295 hci_dev_unlock(hdev);
298 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
300 struct hci_rp_read_local_name *rp = (void *) skb->data;
302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
307 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
308 hci_dev_test_flag(hdev, HCI_CONFIG))
309 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
312 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
314 __u8 status = *((__u8 *) skb->data);
317 BT_DBG("%s status 0x%2.2x", hdev->name, status);
319 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
326 __u8 param = *((__u8 *) sent);
328 if (param == AUTH_ENABLED)
329 set_bit(HCI_AUTH, &hdev->flags);
331 clear_bit(HCI_AUTH, &hdev->flags);
334 if (hci_dev_test_flag(hdev, HCI_MGMT))
335 mgmt_auth_enable_complete(hdev, status);
337 hci_dev_unlock(hdev);
340 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
342 __u8 status = *((__u8 *) skb->data);
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
351 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
355 param = *((__u8 *) sent);
358 set_bit(HCI_ENCRYPT, &hdev->flags);
360 clear_bit(HCI_ENCRYPT, &hdev->flags);
363 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
365 __u8 status = *((__u8 *) skb->data);
369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
375 param = *((__u8 *) sent);
380 hdev->discov_timeout = 0;
384 if (param & SCAN_INQUIRY)
385 set_bit(HCI_ISCAN, &hdev->flags);
387 clear_bit(HCI_ISCAN, &hdev->flags);
389 if (param & SCAN_PAGE)
390 set_bit(HCI_PSCAN, &hdev->flags);
392 clear_bit(HCI_PSCAN, &hdev->flags);
395 hci_dev_unlock(hdev);
398 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
400 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
402 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
407 memcpy(hdev->dev_class, rp->dev_class, 3);
409 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
410 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
413 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
415 __u8 status = *((__u8 *) skb->data);
418 BT_DBG("%s status 0x%2.2x", hdev->name, status);
420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
427 memcpy(hdev->dev_class, sent, 3);
429 if (hci_dev_test_flag(hdev, HCI_MGMT))
430 mgmt_set_class_of_dev_complete(hdev, sent, status);
432 hci_dev_unlock(hdev);
435 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
437 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
440 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
445 setting = __le16_to_cpu(rp->voice_setting);
447 if (hdev->voice_setting == setting)
450 hdev->voice_setting = setting;
452 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
455 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
458 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
461 __u8 status = *((__u8 *) skb->data);
465 BT_DBG("%s status 0x%2.2x", hdev->name, status);
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
474 setting = get_unaligned_le16(sent);
476 if (hdev->voice_setting == setting)
479 hdev->voice_setting = setting;
481 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
484 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
487 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
490 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
497 hdev->num_iac = rp->num_iac;
499 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
502 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
504 __u8 status = *((__u8 *) skb->data);
505 struct hci_cp_write_ssp_mode *sent;
507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
509 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
517 hdev->features[1][0] |= LMP_HOST_SSP;
519 hdev->features[1][0] &= ~LMP_HOST_SSP;
522 if (hci_dev_test_flag(hdev, HCI_MGMT))
523 mgmt_ssp_enable_complete(hdev, sent->mode, status);
526 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
528 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
531 hci_dev_unlock(hdev);
534 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
536 u8 status = *((u8 *) skb->data);
537 struct hci_cp_write_sc_support *sent;
539 BT_DBG("%s status 0x%2.2x", hdev->name, status);
541 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
549 hdev->features[1][0] |= LMP_HOST_SC;
551 hdev->features[1][0] &= ~LMP_HOST_SC;
554 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
556 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
558 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
561 hci_dev_unlock(hdev);
564 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
566 struct hci_rp_read_local_version *rp = (void *) skb->data;
568 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
574 hci_dev_test_flag(hdev, HCI_CONFIG)) {
575 hdev->hci_ver = rp->hci_ver;
576 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
577 hdev->lmp_ver = rp->lmp_ver;
578 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
579 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
583 static void hci_cc_read_local_commands(struct hci_dev *hdev,
586 struct hci_rp_read_local_commands *rp = (void *) skb->data;
588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
593 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
594 hci_dev_test_flag(hdev, HCI_CONFIG))
595 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
598 static void hci_cc_read_local_features(struct hci_dev *hdev,
601 struct hci_rp_read_local_features *rp = (void *) skb->data;
603 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608 memcpy(hdev->features, rp->features, 8);
610 /* Adjust default settings according to features
611 * supported by device. */
613 if (hdev->features[0][0] & LMP_3SLOT)
614 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
616 if (hdev->features[0][0] & LMP_5SLOT)
617 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
619 if (hdev->features[0][1] & LMP_HV2) {
620 hdev->pkt_type |= (HCI_HV2);
621 hdev->esco_type |= (ESCO_HV2);
624 if (hdev->features[0][1] & LMP_HV3) {
625 hdev->pkt_type |= (HCI_HV3);
626 hdev->esco_type |= (ESCO_HV3);
629 if (lmp_esco_capable(hdev))
630 hdev->esco_type |= (ESCO_EV3);
632 if (hdev->features[0][4] & LMP_EV4)
633 hdev->esco_type |= (ESCO_EV4);
635 if (hdev->features[0][4] & LMP_EV5)
636 hdev->esco_type |= (ESCO_EV5);
638 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
639 hdev->esco_type |= (ESCO_2EV3);
641 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
642 hdev->esco_type |= (ESCO_3EV3);
644 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
645 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
648 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
651 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
653 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
658 if (hdev->max_page < rp->max_page)
659 hdev->max_page = rp->max_page;
661 if (rp->page < HCI_MAX_PAGES)
662 memcpy(hdev->features[rp->page], rp->features, 8);
665 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
668 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
670 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
675 hdev->flow_ctl_mode = rp->mode;
678 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
680 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
682 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
688 hdev->sco_mtu = rp->sco_mtu;
689 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
690 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
692 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
697 hdev->acl_cnt = hdev->acl_pkts;
698 hdev->sco_cnt = hdev->sco_pkts;
700 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
701 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
704 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
706 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
708 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
713 if (test_bit(HCI_INIT, &hdev->flags))
714 bacpy(&hdev->bdaddr, &rp->bdaddr);
716 if (hci_dev_test_flag(hdev, HCI_SETUP))
717 bacpy(&hdev->setup_addr, &rp->bdaddr);
720 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
723 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
725 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
730 if (test_bit(HCI_INIT, &hdev->flags)) {
731 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
732 hdev->page_scan_window = __le16_to_cpu(rp->window);
736 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
739 u8 status = *((u8 *) skb->data);
740 struct hci_cp_write_page_scan_activity *sent;
742 BT_DBG("%s status 0x%2.2x", hdev->name, status);
747 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
751 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
752 hdev->page_scan_window = __le16_to_cpu(sent->window);
755 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
758 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
760 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
765 if (test_bit(HCI_INIT, &hdev->flags))
766 hdev->page_scan_type = rp->type;
769 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
772 u8 status = *((u8 *) skb->data);
775 BT_DBG("%s status 0x%2.2x", hdev->name, status);
780 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
782 hdev->page_scan_type = *type;
785 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
788 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
790 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
795 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
796 hdev->block_len = __le16_to_cpu(rp->block_len);
797 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
799 hdev->block_cnt = hdev->num_blocks;
801 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
802 hdev->block_cnt, hdev->block_len);
805 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
807 struct hci_rp_read_clock *rp = (void *) skb->data;
808 struct hci_cp_read_clock *cp;
809 struct hci_conn *conn;
811 BT_DBG("%s", hdev->name);
813 if (skb->len < sizeof(*rp))
821 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
825 if (cp->which == 0x00) {
826 hdev->clock = le32_to_cpu(rp->clock);
830 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
832 conn->clock = le32_to_cpu(rp->clock);
833 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
837 hci_dev_unlock(hdev);
840 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850 hdev->amp_status = rp->amp_status;
851 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
852 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
853 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
854 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
855 hdev->amp_type = rp->amp_type;
856 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
857 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
858 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
859 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
862 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
865 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872 hdev->inq_tx_power = rp->tx_power;
875 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
877 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
878 struct hci_cp_pin_code_reply *cp;
879 struct hci_conn *conn;
881 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
885 if (hci_dev_test_flag(hdev, HCI_MGMT))
886 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
891 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
895 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
897 conn->pin_length = cp->pin_len;
900 hci_dev_unlock(hdev);
903 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
905 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911 if (hci_dev_test_flag(hdev, HCI_MGMT))
912 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
915 hci_dev_unlock(hdev);
918 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
921 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
923 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
928 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
929 hdev->le_pkts = rp->le_max_pkt;
931 hdev->le_cnt = hdev->le_pkts;
933 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
936 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
939 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
946 memcpy(hdev->le_features, rp->features, 8);
949 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
952 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
959 hdev->adv_tx_power = rp->tx_power;
962 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
974 hci_dev_unlock(hdev);
977 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
986 if (hci_dev_test_flag(hdev, HCI_MGMT))
987 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
988 ACL_LINK, 0, rp->status);
990 hci_dev_unlock(hdev);
993 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1005 hci_dev_unlock(hdev);
1008 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1017 if (hci_dev_test_flag(hdev, HCI_MGMT))
1018 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1019 ACL_LINK, 0, rp->status);
1021 hci_dev_unlock(hdev);
1024 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1027 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1032 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1033 struct sk_buff *skb)
1035 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1037 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1040 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1042 __u8 status = *((__u8 *) skb->data);
1045 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1050 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1056 bacpy(&hdev->random_addr, sent);
1058 hci_dev_unlock(hdev);
1061 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1063 __u8 *sent, status = *((__u8 *) skb->data);
1065 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1070 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1076 /* If we're doing connection initiation as peripheral. Set a
1077 * timeout in case something goes wrong.
1080 struct hci_conn *conn;
1082 hci_dev_set_flag(hdev, HCI_LE_ADV);
1084 conn = hci_lookup_le_connect(hdev);
1086 queue_delayed_work(hdev->workqueue,
1087 &conn->le_conn_timeout,
1088 conn->conn_timeout);
1090 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1093 hci_dev_unlock(hdev);
1096 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1098 struct hci_cp_le_set_scan_param *cp;
1099 __u8 status = *((__u8 *) skb->data);
1101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1106 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1112 hdev->le_scan_type = cp->type;
1114 hci_dev_unlock(hdev);
1117 static bool has_pending_adv_report(struct hci_dev *hdev)
1119 struct discovery_state *d = &hdev->discovery;
1121 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1124 static void clear_pending_adv_report(struct hci_dev *hdev)
1126 struct discovery_state *d = &hdev->discovery;
1128 bacpy(&d->last_adv_addr, BDADDR_ANY);
1129 d->last_adv_data_len = 0;
1132 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1133 u8 bdaddr_type, s8 rssi, u32 flags,
1136 struct discovery_state *d = &hdev->discovery;
1138 if (len > HCI_MAX_AD_LENGTH)
1141 bacpy(&d->last_adv_addr, bdaddr);
1142 d->last_adv_addr_type = bdaddr_type;
1143 d->last_adv_rssi = rssi;
1144 d->last_adv_flags = flags;
1145 memcpy(d->last_adv_data, data, len);
1146 d->last_adv_data_len = len;
1149 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1150 struct sk_buff *skb)
1152 struct hci_cp_le_set_scan_enable *cp;
1153 __u8 status = *((__u8 *) skb->data);
1155 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1160 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1166 switch (cp->enable) {
1167 case LE_SCAN_ENABLE:
1168 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1169 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1170 clear_pending_adv_report(hdev);
1173 case LE_SCAN_DISABLE:
1174 /* We do this here instead of when setting DISCOVERY_STOPPED
1175 * since the latter would potentially require waiting for
1176 * inquiry to stop too.
1178 if (has_pending_adv_report(hdev)) {
1179 struct discovery_state *d = &hdev->discovery;
1181 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1182 d->last_adv_addr_type, NULL,
1183 d->last_adv_rssi, d->last_adv_flags,
1185 d->last_adv_data_len, NULL, 0);
1188 /* Cancel this timer so that we don't try to disable scanning
1189 * when it's already disabled.
1191 cancel_delayed_work(&hdev->le_scan_disable);
1193 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1195 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1196 * interrupted scanning due to a connect request. Mark
1197 * therefore discovery as stopped. If this was not
1198 * because of a connect request advertising might have
1199 * been disabled because of active scanning, so
1200 * re-enable it again if necessary.
1202 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1204 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1205 hdev->discovery.state == DISCOVERY_FINDING)
1206 hci_req_reenable_advertising(hdev);
1211 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1215 hci_dev_unlock(hdev);
1218 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1219 struct sk_buff *skb)
1221 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1223 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1228 hdev->le_white_list_size = rp->size;
1231 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1232 struct sk_buff *skb)
1234 __u8 status = *((__u8 *) skb->data);
1236 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241 hci_bdaddr_list_clear(&hdev->le_white_list);
1244 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1245 struct sk_buff *skb)
1247 struct hci_cp_le_add_to_white_list *sent;
1248 __u8 status = *((__u8 *) skb->data);
1250 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1255 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1259 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1263 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1264 struct sk_buff *skb)
1266 struct hci_cp_le_del_from_white_list *sent;
1267 __u8 status = *((__u8 *) skb->data);
1269 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1274 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1278 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1282 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1283 struct sk_buff *skb)
1285 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1287 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1292 memcpy(hdev->le_states, rp->le_states, 8);
1295 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1296 struct sk_buff *skb)
1298 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1300 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1305 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1306 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1309 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1310 struct sk_buff *skb)
1312 struct hci_cp_le_write_def_data_len *sent;
1313 __u8 status = *((__u8 *) skb->data);
1315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1320 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1324 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1325 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1328 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1329 struct sk_buff *skb)
1331 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1333 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1338 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1339 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1340 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1341 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1344 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1345 struct sk_buff *skb)
1347 struct hci_cp_write_le_host_supported *sent;
1348 __u8 status = *((__u8 *) skb->data);
1350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1362 hdev->features[1][0] |= LMP_HOST_LE;
1363 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1365 hdev->features[1][0] &= ~LMP_HOST_LE;
1366 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1367 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1371 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1373 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1375 hci_dev_unlock(hdev);
1378 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1380 struct hci_cp_le_set_adv_param *cp;
1381 u8 status = *((u8 *) skb->data);
1383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1388 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1393 hdev->adv_addr_type = cp->own_address_type;
1394 hci_dev_unlock(hdev);
1397 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1399 struct hci_rp_read_rssi *rp = (void *) skb->data;
1400 struct hci_conn *conn;
1402 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1409 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1411 conn->rssi = rp->rssi;
1413 hci_dev_unlock(hdev);
1416 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1418 struct hci_cp_read_tx_power *sent;
1419 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1420 struct hci_conn *conn;
1422 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1427 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1437 switch (sent->type) {
1439 conn->tx_power = rp->tx_power;
1442 conn->max_tx_power = rp->tx_power;
1447 hci_dev_unlock(hdev);
1450 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1452 u8 status = *((u8 *) skb->data);
1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1460 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1462 hdev->ssp_debug_mode = *mode;
1465 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1470 hci_conn_check_pending(hdev);
1474 set_bit(HCI_INQUIRY, &hdev->flags);
1477 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1479 struct hci_cp_create_conn *cp;
1480 struct hci_conn *conn;
1482 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1490 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1492 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1495 if (conn && conn->state == BT_CONNECT) {
1496 if (status != 0x0c || conn->attempt > 2) {
1497 conn->state = BT_CLOSED;
1498 hci_connect_cfm(conn, status);
1501 conn->state = BT_CONNECT2;
1505 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1508 BT_ERR("No memory for new connection");
1512 hci_dev_unlock(hdev);
1515 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1517 struct hci_cp_add_sco *cp;
1518 struct hci_conn *acl, *sco;
1521 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1526 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1530 handle = __le16_to_cpu(cp->handle);
1532 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1536 acl = hci_conn_hash_lookup_handle(hdev, handle);
1540 sco->state = BT_CLOSED;
1542 hci_connect_cfm(sco, status);
1547 hci_dev_unlock(hdev);
1550 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1552 struct hci_cp_auth_requested *cp;
1553 struct hci_conn *conn;
1555 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1566 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1568 if (conn->state == BT_CONFIG) {
1569 hci_connect_cfm(conn, status);
1570 hci_conn_drop(conn);
1574 hci_dev_unlock(hdev);
1577 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1579 struct hci_cp_set_conn_encrypt *cp;
1580 struct hci_conn *conn;
1582 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1587 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1593 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1595 if (conn->state == BT_CONFIG) {
1596 hci_connect_cfm(conn, status);
1597 hci_conn_drop(conn);
1601 hci_dev_unlock(hdev);
1604 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1605 struct hci_conn *conn)
1607 if (conn->state != BT_CONFIG || !conn->out)
1610 if (conn->pending_sec_level == BT_SECURITY_SDP)
1613 /* Only request authentication for SSP connections or non-SSP
1614 * devices with sec_level MEDIUM or HIGH or if MITM protection
1617 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1618 conn->pending_sec_level != BT_SECURITY_FIPS &&
1619 conn->pending_sec_level != BT_SECURITY_HIGH &&
1620 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1626 static int hci_resolve_name(struct hci_dev *hdev,
1627 struct inquiry_entry *e)
1629 struct hci_cp_remote_name_req cp;
1631 memset(&cp, 0, sizeof(cp));
1633 bacpy(&cp.bdaddr, &e->data.bdaddr);
1634 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1635 cp.pscan_mode = e->data.pscan_mode;
1636 cp.clock_offset = e->data.clock_offset;
1638 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1641 static bool hci_resolve_next_name(struct hci_dev *hdev)
1643 struct discovery_state *discov = &hdev->discovery;
1644 struct inquiry_entry *e;
1646 if (list_empty(&discov->resolve))
1649 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1653 if (hci_resolve_name(hdev, e) == 0) {
1654 e->name_state = NAME_PENDING;
1661 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1662 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1664 struct discovery_state *discov = &hdev->discovery;
1665 struct inquiry_entry *e;
1667 /* Update the mgmt connected state if necessary. Be careful with
1668 * conn objects that exist but are not (yet) connected however.
1669 * Only those in BT_CONFIG or BT_CONNECTED states can be
1670 * considered connected.
1673 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1674 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1675 mgmt_device_connected(hdev, conn, 0, name, name_len);
1677 if (discov->state == DISCOVERY_STOPPED)
1680 if (discov->state == DISCOVERY_STOPPING)
1681 goto discov_complete;
1683 if (discov->state != DISCOVERY_RESOLVING)
1686 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1687 /* If the device was not found in a list of found devices names of which
1688 * are pending. there is no need to continue resolving a next name as it
1689 * will be done upon receiving another Remote Name Request Complete
1696 e->name_state = NAME_KNOWN;
1697 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1698 e->data.rssi, name, name_len);
1700 e->name_state = NAME_NOT_KNOWN;
1703 if (hci_resolve_next_name(hdev))
1707 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1710 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1712 struct hci_cp_remote_name_req *cp;
1713 struct hci_conn *conn;
1715 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1717 /* If successful wait for the name req complete event before
1718 * checking for the need to do authentication */
1722 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1728 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1730 if (hci_dev_test_flag(hdev, HCI_MGMT))
1731 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1736 if (!hci_outgoing_auth_needed(hdev, conn))
1739 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1740 struct hci_cp_auth_requested auth_cp;
1742 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1744 auth_cp.handle = __cpu_to_le16(conn->handle);
1745 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1746 sizeof(auth_cp), &auth_cp);
1750 hci_dev_unlock(hdev);
1753 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1755 struct hci_cp_read_remote_features *cp;
1756 struct hci_conn *conn;
1758 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1763 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1771 if (conn->state == BT_CONFIG) {
1772 hci_connect_cfm(conn, status);
1773 hci_conn_drop(conn);
1777 hci_dev_unlock(hdev);
1780 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1782 struct hci_cp_read_remote_ext_features *cp;
1783 struct hci_conn *conn;
1785 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1790 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1796 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1798 if (conn->state == BT_CONFIG) {
1799 hci_connect_cfm(conn, status);
1800 hci_conn_drop(conn);
1804 hci_dev_unlock(hdev);
1807 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1809 struct hci_cp_setup_sync_conn *cp;
1810 struct hci_conn *acl, *sco;
1813 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1818 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1822 handle = __le16_to_cpu(cp->handle);
1824 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1828 acl = hci_conn_hash_lookup_handle(hdev, handle);
1832 sco->state = BT_CLOSED;
1834 hci_connect_cfm(sco, status);
1839 hci_dev_unlock(hdev);
1842 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1844 struct hci_cp_sniff_mode *cp;
1845 struct hci_conn *conn;
1847 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1852 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1858 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1860 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1862 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1863 hci_sco_setup(conn, status);
1866 hci_dev_unlock(hdev);
1869 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1871 struct hci_cp_exit_sniff_mode *cp;
1872 struct hci_conn *conn;
1874 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1887 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1889 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1890 hci_sco_setup(conn, status);
1893 hci_dev_unlock(hdev);
1896 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1898 struct hci_cp_disconnect *cp;
1899 struct hci_conn *conn;
1904 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1910 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1912 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1913 conn->dst_type, status);
1915 hci_dev_unlock(hdev);
1918 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1920 struct hci_cp_le_create_conn *cp;
1921 struct hci_conn *conn;
1923 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1925 /* All connection failure handling is taken care of by the
1926 * hci_le_conn_failed function which is triggered by the HCI
1927 * request completion callbacks used for connecting.
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1938 conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1939 cp->peer_addr_type);
1943 /* Store the initiator and responder address information which
1944 * is needed for SMP. These values will not change during the
1945 * lifetime of the connection.
1947 conn->init_addr_type = cp->own_address_type;
1948 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1949 bacpy(&conn->init_addr, &hdev->random_addr);
1951 bacpy(&conn->init_addr, &hdev->bdaddr);
1953 conn->resp_addr_type = cp->peer_addr_type;
1954 bacpy(&conn->resp_addr, &cp->peer_addr);
1956 /* We don't want the connection attempt to stick around
1957 * indefinitely since LE doesn't have a page timeout concept
1958 * like BR/EDR. Set a timer for any connection that doesn't use
1959 * the white list for connecting.
1961 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1962 queue_delayed_work(conn->hdev->workqueue,
1963 &conn->le_conn_timeout,
1964 conn->conn_timeout);
1967 hci_dev_unlock(hdev);
1970 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1972 struct hci_cp_le_read_remote_features *cp;
1973 struct hci_conn *conn;
1975 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1980 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1988 if (conn->state == BT_CONFIG) {
1989 hci_connect_cfm(conn, status);
1990 hci_conn_drop(conn);
1994 hci_dev_unlock(hdev);
1997 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1999 struct hci_cp_le_start_enc *cp;
2000 struct hci_conn *conn;
2002 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2009 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2013 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2017 if (conn->state != BT_CONNECTED)
2020 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2021 hci_conn_drop(conn);
2024 hci_dev_unlock(hdev);
2027 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2029 struct hci_cp_switch_role *cp;
2030 struct hci_conn *conn;
2032 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2045 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2047 hci_dev_unlock(hdev);
2050 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2052 __u8 status = *((__u8 *) skb->data);
2053 struct discovery_state *discov = &hdev->discovery;
2054 struct inquiry_entry *e;
2056 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2058 hci_conn_check_pending(hdev);
2060 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2063 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2064 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2066 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2071 if (discov->state != DISCOVERY_FINDING)
2074 if (list_empty(&discov->resolve)) {
2075 /* When BR/EDR inquiry is active and no LE scanning is in
2076 * progress, then change discovery state to indicate completion.
2078 * When running LE scanning and BR/EDR inquiry simultaneously
2079 * and the LE scan already finished, then change the discovery
2080 * state to indicate completion.
2082 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2083 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2084 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2088 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2089 if (e && hci_resolve_name(hdev, e) == 0) {
2090 e->name_state = NAME_PENDING;
2091 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2093 /* When BR/EDR inquiry is active and no LE scanning is in
2094 * progress, then change discovery state to indicate completion.
2096 * When running LE scanning and BR/EDR inquiry simultaneously
2097 * and the LE scan already finished, then change the discovery
2098 * state to indicate completion.
2100 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2101 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2102 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2106 hci_dev_unlock(hdev);
2109 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2111 struct inquiry_data data;
2112 struct inquiry_info *info = (void *) (skb->data + 1);
2113 int num_rsp = *((__u8 *) skb->data);
2115 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2117 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2120 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2125 for (; num_rsp; num_rsp--, info++) {
2128 bacpy(&data.bdaddr, &info->bdaddr);
2129 data.pscan_rep_mode = info->pscan_rep_mode;
2130 data.pscan_period_mode = info->pscan_period_mode;
2131 data.pscan_mode = info->pscan_mode;
2132 memcpy(data.dev_class, info->dev_class, 3);
2133 data.clock_offset = info->clock_offset;
2134 data.rssi = HCI_RSSI_INVALID;
2135 data.ssp_mode = 0x00;
2137 flags = hci_inquiry_cache_update(hdev, &data, false);
2139 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2140 info->dev_class, HCI_RSSI_INVALID,
2141 flags, NULL, 0, NULL, 0);
2144 hci_dev_unlock(hdev);
2147 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2149 struct hci_ev_conn_complete *ev = (void *) skb->data;
2150 struct hci_conn *conn;
2152 BT_DBG("%s", hdev->name);
2156 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2158 if (ev->link_type != SCO_LINK)
2161 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2165 conn->type = SCO_LINK;
2169 conn->handle = __le16_to_cpu(ev->handle);
2171 if (conn->type == ACL_LINK) {
2172 conn->state = BT_CONFIG;
2173 hci_conn_hold(conn);
2175 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2176 !hci_find_link_key(hdev, &ev->bdaddr))
2177 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2179 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2181 conn->state = BT_CONNECTED;
2183 hci_debugfs_create_conn(conn);
2184 hci_conn_add_sysfs(conn);
2186 if (test_bit(HCI_AUTH, &hdev->flags))
2187 set_bit(HCI_CONN_AUTH, &conn->flags);
2189 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2190 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2192 /* Get remote features */
2193 if (conn->type == ACL_LINK) {
2194 struct hci_cp_read_remote_features cp;
2195 cp.handle = ev->handle;
2196 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2199 hci_req_update_scan(hdev);
2202 /* Set packet type for incoming connection */
2203 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2204 struct hci_cp_change_conn_ptype cp;
2205 cp.handle = ev->handle;
2206 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2207 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2211 conn->state = BT_CLOSED;
2212 if (conn->type == ACL_LINK)
2213 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2214 conn->dst_type, ev->status);
2217 if (conn->type == ACL_LINK)
2218 hci_sco_setup(conn, ev->status);
2221 hci_connect_cfm(conn, ev->status);
2223 } else if (ev->link_type != ACL_LINK)
2224 hci_connect_cfm(conn, ev->status);
2227 hci_dev_unlock(hdev);
2229 hci_conn_check_pending(hdev);
2232 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2234 struct hci_cp_reject_conn_req cp;
2236 bacpy(&cp.bdaddr, bdaddr);
2237 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2238 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2241 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2243 struct hci_ev_conn_request *ev = (void *) skb->data;
2244 int mask = hdev->link_mode;
2245 struct inquiry_entry *ie;
2246 struct hci_conn *conn;
2249 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2252 /* Reject incoming connection from device with same BD ADDR against
2255 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2256 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2258 hci_reject_conn(hdev, &ev->bdaddr);
2262 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2265 if (!(mask & HCI_LM_ACCEPT)) {
2266 hci_reject_conn(hdev, &ev->bdaddr);
2270 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2272 hci_reject_conn(hdev, &ev->bdaddr);
2276 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2277 * connection. These features are only touched through mgmt so
2278 * only do the checks if HCI_MGMT is set.
2280 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2281 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2282 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2284 hci_reject_conn(hdev, &ev->bdaddr);
2288 /* Connection accepted */
2292 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2294 memcpy(ie->data.dev_class, ev->dev_class, 3);
2296 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2299 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2302 BT_ERR("No memory for new connection");
2303 hci_dev_unlock(hdev);
2308 memcpy(conn->dev_class, ev->dev_class, 3);
2310 hci_dev_unlock(hdev);
2312 if (ev->link_type == ACL_LINK ||
2313 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2314 struct hci_cp_accept_conn_req cp;
2315 conn->state = BT_CONNECT;
2317 bacpy(&cp.bdaddr, &ev->bdaddr);
2319 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2320 cp.role = 0x00; /* Become master */
2322 cp.role = 0x01; /* Remain slave */
2324 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2325 } else if (!(flags & HCI_PROTO_DEFER)) {
2326 struct hci_cp_accept_sync_conn_req cp;
2327 conn->state = BT_CONNECT;
2329 bacpy(&cp.bdaddr, &ev->bdaddr);
2330 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2332 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2333 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2334 cp.max_latency = cpu_to_le16(0xffff);
2335 cp.content_format = cpu_to_le16(hdev->voice_setting);
2336 cp.retrans_effort = 0xff;
2338 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2341 conn->state = BT_CONNECT2;
2342 hci_connect_cfm(conn, 0);
2346 static u8 hci_to_mgmt_reason(u8 err)
2349 case HCI_ERROR_CONNECTION_TIMEOUT:
2350 return MGMT_DEV_DISCONN_TIMEOUT;
2351 case HCI_ERROR_REMOTE_USER_TERM:
2352 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2353 case HCI_ERROR_REMOTE_POWER_OFF:
2354 return MGMT_DEV_DISCONN_REMOTE;
2355 case HCI_ERROR_LOCAL_HOST_TERM:
2356 return MGMT_DEV_DISCONN_LOCAL_HOST;
2358 return MGMT_DEV_DISCONN_UNKNOWN;
2362 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2364 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2366 struct hci_conn_params *params;
2367 struct hci_conn *conn;
2368 bool mgmt_connected;
2371 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2375 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2380 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2381 conn->dst_type, ev->status);
2385 conn->state = BT_CLOSED;
2387 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2389 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2390 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2392 reason = hci_to_mgmt_reason(ev->reason);
2394 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2395 reason, mgmt_connected);
2397 if (conn->type == ACL_LINK) {
2398 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2399 hci_remove_link_key(hdev, &conn->dst);
2401 hci_req_update_scan(hdev);
2404 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2406 switch (params->auto_connect) {
2407 case HCI_AUTO_CONN_LINK_LOSS:
2408 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2412 case HCI_AUTO_CONN_DIRECT:
2413 case HCI_AUTO_CONN_ALWAYS:
2414 list_del_init(¶ms->action);
2415 list_add(¶ms->action, &hdev->pend_le_conns);
2416 hci_update_background_scan(hdev);
2426 hci_disconn_cfm(conn, ev->reason);
2429 /* Re-enable advertising if necessary, since it might
2430 * have been disabled by the connection. From the
2431 * HCI_LE_Set_Advertise_Enable command description in
2432 * the core specification (v4.0):
2433 * "The Controller shall continue advertising until the Host
2434 * issues an LE_Set_Advertise_Enable command with
2435 * Advertising_Enable set to 0x00 (Advertising is disabled)
2436 * or until a connection is created or until the Advertising
2437 * is timed out due to Directed Advertising."
2439 if (type == LE_LINK)
2440 hci_req_reenable_advertising(hdev);
2443 hci_dev_unlock(hdev);
2446 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2448 struct hci_ev_auth_complete *ev = (void *) skb->data;
2449 struct hci_conn *conn;
2451 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2455 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2460 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2462 if (!hci_conn_ssp_enabled(conn) &&
2463 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2464 BT_INFO("re-auth of legacy device is not possible.");
2466 set_bit(HCI_CONN_AUTH, &conn->flags);
2467 conn->sec_level = conn->pending_sec_level;
2470 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2471 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2473 mgmt_auth_failed(conn, ev->status);
2476 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2477 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2479 if (conn->state == BT_CONFIG) {
2480 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2481 struct hci_cp_set_conn_encrypt cp;
2482 cp.handle = ev->handle;
2484 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2487 conn->state = BT_CONNECTED;
2488 hci_connect_cfm(conn, ev->status);
2489 hci_conn_drop(conn);
2492 hci_auth_cfm(conn, ev->status);
2494 hci_conn_hold(conn);
2495 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2496 hci_conn_drop(conn);
2499 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2501 struct hci_cp_set_conn_encrypt cp;
2502 cp.handle = ev->handle;
2504 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2507 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2508 hci_encrypt_cfm(conn, ev->status);
2513 hci_dev_unlock(hdev);
2516 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2518 struct hci_ev_remote_name *ev = (void *) skb->data;
2519 struct hci_conn *conn;
2521 BT_DBG("%s", hdev->name);
2523 hci_conn_check_pending(hdev);
2527 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2529 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2532 if (ev->status == 0)
2533 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2534 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2536 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2542 if (!hci_outgoing_auth_needed(hdev, conn))
2545 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2546 struct hci_cp_auth_requested cp;
2548 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2550 cp.handle = __cpu_to_le16(conn->handle);
2551 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2555 hci_dev_unlock(hdev);
2558 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2559 u16 opcode, struct sk_buff *skb)
2561 const struct hci_rp_read_enc_key_size *rp;
2562 struct hci_conn *conn;
2565 BT_DBG("%s status 0x%02x", hdev->name, status);
2567 if (!skb || skb->len < sizeof(*rp)) {
2568 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2573 rp = (void *)skb->data;
2574 handle = le16_to_cpu(rp->handle);
2578 conn = hci_conn_hash_lookup_handle(hdev, handle);
2582 /* If we fail to read the encryption key size, assume maximum
2583 * (which is the same we do also when this HCI command isn't
2587 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2589 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2591 conn->enc_key_size = rp->key_size;
2594 hci_encrypt_cfm(conn, 0);
2597 hci_dev_unlock(hdev);
2600 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2602 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2603 struct hci_conn *conn;
2605 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2609 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2615 /* Encryption implies authentication */
2616 set_bit(HCI_CONN_AUTH, &conn->flags);
2617 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2618 conn->sec_level = conn->pending_sec_level;
2620 /* P-256 authentication key implies FIPS */
2621 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2622 set_bit(HCI_CONN_FIPS, &conn->flags);
2624 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2625 conn->type == LE_LINK)
2626 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2628 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2629 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2633 /* We should disregard the current RPA and generate a new one
2634 * whenever the encryption procedure fails.
2636 if (ev->status && conn->type == LE_LINK)
2637 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2639 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2641 /* Check link security requirements are met */
2642 if (!hci_conn_check_link_mode(conn))
2643 ev->status = HCI_ERROR_AUTH_FAILURE;
2645 if (ev->status && conn->state == BT_CONNECTED) {
2646 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2647 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2649 /* Notify upper layers so they can cleanup before
2652 hci_encrypt_cfm(conn, ev->status);
2653 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2654 hci_conn_drop(conn);
2658 /* Try reading the encryption key size for encrypted ACL links */
2659 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2660 struct hci_cp_read_enc_key_size cp;
2661 struct hci_request req;
2663 /* Only send HCI_Read_Encryption_Key_Size if the
2664 * controller really supports it. If it doesn't, assume
2665 * the default size (16).
2667 if (!(hdev->commands[20] & 0x10)) {
2668 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2672 hci_req_init(&req, hdev);
2674 cp.handle = cpu_to_le16(conn->handle);
2675 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2677 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2678 BT_ERR("Sending HCI Read Encryption Key Size failed");
2679 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2687 hci_encrypt_cfm(conn, ev->status);
2690 hci_dev_unlock(hdev);
2693 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2694 struct sk_buff *skb)
2696 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2697 struct hci_conn *conn;
2699 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2703 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2706 set_bit(HCI_CONN_SECURE, &conn->flags);
2708 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2710 hci_key_change_cfm(conn, ev->status);
2713 hci_dev_unlock(hdev);
2716 static void hci_remote_features_evt(struct hci_dev *hdev,
2717 struct sk_buff *skb)
2719 struct hci_ev_remote_features *ev = (void *) skb->data;
2720 struct hci_conn *conn;
2722 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2731 memcpy(conn->features[0], ev->features, 8);
2733 if (conn->state != BT_CONFIG)
2736 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2737 lmp_ext_feat_capable(conn)) {
2738 struct hci_cp_read_remote_ext_features cp;
2739 cp.handle = ev->handle;
2741 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2746 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2747 struct hci_cp_remote_name_req cp;
2748 memset(&cp, 0, sizeof(cp));
2749 bacpy(&cp.bdaddr, &conn->dst);
2750 cp.pscan_rep_mode = 0x02;
2751 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2752 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2753 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2755 if (!hci_outgoing_auth_needed(hdev, conn)) {
2756 conn->state = BT_CONNECTED;
2757 hci_connect_cfm(conn, ev->status);
2758 hci_conn_drop(conn);
2762 hci_dev_unlock(hdev);
2765 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2766 u16 *opcode, u8 *status,
2767 hci_req_complete_t *req_complete,
2768 hci_req_complete_skb_t *req_complete_skb)
2770 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2772 *opcode = __le16_to_cpu(ev->opcode);
2773 *status = skb->data[sizeof(*ev)];
2775 skb_pull(skb, sizeof(*ev));
2778 case HCI_OP_INQUIRY_CANCEL:
2779 hci_cc_inquiry_cancel(hdev, skb, status);
2782 case HCI_OP_PERIODIC_INQ:
2783 hci_cc_periodic_inq(hdev, skb);
2786 case HCI_OP_EXIT_PERIODIC_INQ:
2787 hci_cc_exit_periodic_inq(hdev, skb);
2790 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2791 hci_cc_remote_name_req_cancel(hdev, skb);
2794 case HCI_OP_ROLE_DISCOVERY:
2795 hci_cc_role_discovery(hdev, skb);
2798 case HCI_OP_READ_LINK_POLICY:
2799 hci_cc_read_link_policy(hdev, skb);
2802 case HCI_OP_WRITE_LINK_POLICY:
2803 hci_cc_write_link_policy(hdev, skb);
2806 case HCI_OP_READ_DEF_LINK_POLICY:
2807 hci_cc_read_def_link_policy(hdev, skb);
2810 case HCI_OP_WRITE_DEF_LINK_POLICY:
2811 hci_cc_write_def_link_policy(hdev, skb);
2815 hci_cc_reset(hdev, skb);
2818 case HCI_OP_READ_STORED_LINK_KEY:
2819 hci_cc_read_stored_link_key(hdev, skb);
2822 case HCI_OP_DELETE_STORED_LINK_KEY:
2823 hci_cc_delete_stored_link_key(hdev, skb);
2826 case HCI_OP_WRITE_LOCAL_NAME:
2827 hci_cc_write_local_name(hdev, skb);
2830 case HCI_OP_READ_LOCAL_NAME:
2831 hci_cc_read_local_name(hdev, skb);
2834 case HCI_OP_WRITE_AUTH_ENABLE:
2835 hci_cc_write_auth_enable(hdev, skb);
2838 case HCI_OP_WRITE_ENCRYPT_MODE:
2839 hci_cc_write_encrypt_mode(hdev, skb);
2842 case HCI_OP_WRITE_SCAN_ENABLE:
2843 hci_cc_write_scan_enable(hdev, skb);
2846 case HCI_OP_READ_CLASS_OF_DEV:
2847 hci_cc_read_class_of_dev(hdev, skb);
2850 case HCI_OP_WRITE_CLASS_OF_DEV:
2851 hci_cc_write_class_of_dev(hdev, skb);
2854 case HCI_OP_READ_VOICE_SETTING:
2855 hci_cc_read_voice_setting(hdev, skb);
2858 case HCI_OP_WRITE_VOICE_SETTING:
2859 hci_cc_write_voice_setting(hdev, skb);
2862 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2863 hci_cc_read_num_supported_iac(hdev, skb);
2866 case HCI_OP_WRITE_SSP_MODE:
2867 hci_cc_write_ssp_mode(hdev, skb);
2870 case HCI_OP_WRITE_SC_SUPPORT:
2871 hci_cc_write_sc_support(hdev, skb);
2874 case HCI_OP_READ_LOCAL_VERSION:
2875 hci_cc_read_local_version(hdev, skb);
2878 case HCI_OP_READ_LOCAL_COMMANDS:
2879 hci_cc_read_local_commands(hdev, skb);
2882 case HCI_OP_READ_LOCAL_FEATURES:
2883 hci_cc_read_local_features(hdev, skb);
2886 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2887 hci_cc_read_local_ext_features(hdev, skb);
2890 case HCI_OP_READ_BUFFER_SIZE:
2891 hci_cc_read_buffer_size(hdev, skb);
2894 case HCI_OP_READ_BD_ADDR:
2895 hci_cc_read_bd_addr(hdev, skb);
2898 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2899 hci_cc_read_page_scan_activity(hdev, skb);
2902 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2903 hci_cc_write_page_scan_activity(hdev, skb);
2906 case HCI_OP_READ_PAGE_SCAN_TYPE:
2907 hci_cc_read_page_scan_type(hdev, skb);
2910 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2911 hci_cc_write_page_scan_type(hdev, skb);
2914 case HCI_OP_READ_DATA_BLOCK_SIZE:
2915 hci_cc_read_data_block_size(hdev, skb);
2918 case HCI_OP_READ_FLOW_CONTROL_MODE:
2919 hci_cc_read_flow_control_mode(hdev, skb);
2922 case HCI_OP_READ_LOCAL_AMP_INFO:
2923 hci_cc_read_local_amp_info(hdev, skb);
2926 case HCI_OP_READ_CLOCK:
2927 hci_cc_read_clock(hdev, skb);
2930 case HCI_OP_READ_INQ_RSP_TX_POWER:
2931 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2934 case HCI_OP_PIN_CODE_REPLY:
2935 hci_cc_pin_code_reply(hdev, skb);
2938 case HCI_OP_PIN_CODE_NEG_REPLY:
2939 hci_cc_pin_code_neg_reply(hdev, skb);
2942 case HCI_OP_READ_LOCAL_OOB_DATA:
2943 hci_cc_read_local_oob_data(hdev, skb);
2946 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2947 hci_cc_read_local_oob_ext_data(hdev, skb);
2950 case HCI_OP_LE_READ_BUFFER_SIZE:
2951 hci_cc_le_read_buffer_size(hdev, skb);
2954 case HCI_OP_LE_READ_LOCAL_FEATURES:
2955 hci_cc_le_read_local_features(hdev, skb);
2958 case HCI_OP_LE_READ_ADV_TX_POWER:
2959 hci_cc_le_read_adv_tx_power(hdev, skb);
2962 case HCI_OP_USER_CONFIRM_REPLY:
2963 hci_cc_user_confirm_reply(hdev, skb);
2966 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2967 hci_cc_user_confirm_neg_reply(hdev, skb);
2970 case HCI_OP_USER_PASSKEY_REPLY:
2971 hci_cc_user_passkey_reply(hdev, skb);
2974 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2975 hci_cc_user_passkey_neg_reply(hdev, skb);
2978 case HCI_OP_LE_SET_RANDOM_ADDR:
2979 hci_cc_le_set_random_addr(hdev, skb);
2982 case HCI_OP_LE_SET_ADV_ENABLE:
2983 hci_cc_le_set_adv_enable(hdev, skb);
2986 case HCI_OP_LE_SET_SCAN_PARAM:
2987 hci_cc_le_set_scan_param(hdev, skb);
2990 case HCI_OP_LE_SET_SCAN_ENABLE:
2991 hci_cc_le_set_scan_enable(hdev, skb);
2994 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2995 hci_cc_le_read_white_list_size(hdev, skb);
2998 case HCI_OP_LE_CLEAR_WHITE_LIST:
2999 hci_cc_le_clear_white_list(hdev, skb);
3002 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3003 hci_cc_le_add_to_white_list(hdev, skb);
3006 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3007 hci_cc_le_del_from_white_list(hdev, skb);
3010 case HCI_OP_LE_READ_SUPPORTED_STATES:
3011 hci_cc_le_read_supported_states(hdev, skb);
3014 case HCI_OP_LE_READ_DEF_DATA_LEN:
3015 hci_cc_le_read_def_data_len(hdev, skb);
3018 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3019 hci_cc_le_write_def_data_len(hdev, skb);
3022 case HCI_OP_LE_READ_MAX_DATA_LEN:
3023 hci_cc_le_read_max_data_len(hdev, skb);
3026 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3027 hci_cc_write_le_host_supported(hdev, skb);
3030 case HCI_OP_LE_SET_ADV_PARAM:
3031 hci_cc_set_adv_param(hdev, skb);
3034 case HCI_OP_READ_RSSI:
3035 hci_cc_read_rssi(hdev, skb);
3038 case HCI_OP_READ_TX_POWER:
3039 hci_cc_read_tx_power(hdev, skb);
3042 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3043 hci_cc_write_ssp_debug_mode(hdev, skb);
3047 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3051 if (*opcode != HCI_OP_NOP)
3052 cancel_delayed_work(&hdev->cmd_timer);
3054 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3055 atomic_set(&hdev->cmd_cnt, 1);
3057 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3060 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3061 queue_work(hdev->workqueue, &hdev->cmd_work);
3064 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3065 u16 *opcode, u8 *status,
3066 hci_req_complete_t *req_complete,
3067 hci_req_complete_skb_t *req_complete_skb)
3069 struct hci_ev_cmd_status *ev = (void *) skb->data;
3071 skb_pull(skb, sizeof(*ev));
3073 *opcode = __le16_to_cpu(ev->opcode);
3074 *status = ev->status;
3077 case HCI_OP_INQUIRY:
3078 hci_cs_inquiry(hdev, ev->status);
3081 case HCI_OP_CREATE_CONN:
3082 hci_cs_create_conn(hdev, ev->status);
3085 case HCI_OP_DISCONNECT:
3086 hci_cs_disconnect(hdev, ev->status);
3089 case HCI_OP_ADD_SCO:
3090 hci_cs_add_sco(hdev, ev->status);
3093 case HCI_OP_AUTH_REQUESTED:
3094 hci_cs_auth_requested(hdev, ev->status);
3097 case HCI_OP_SET_CONN_ENCRYPT:
3098 hci_cs_set_conn_encrypt(hdev, ev->status);
3101 case HCI_OP_REMOTE_NAME_REQ:
3102 hci_cs_remote_name_req(hdev, ev->status);
3105 case HCI_OP_READ_REMOTE_FEATURES:
3106 hci_cs_read_remote_features(hdev, ev->status);
3109 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3110 hci_cs_read_remote_ext_features(hdev, ev->status);
3113 case HCI_OP_SETUP_SYNC_CONN:
3114 hci_cs_setup_sync_conn(hdev, ev->status);
3117 case HCI_OP_SNIFF_MODE:
3118 hci_cs_sniff_mode(hdev, ev->status);
3121 case HCI_OP_EXIT_SNIFF_MODE:
3122 hci_cs_exit_sniff_mode(hdev, ev->status);
3125 case HCI_OP_SWITCH_ROLE:
3126 hci_cs_switch_role(hdev, ev->status);
3129 case HCI_OP_LE_CREATE_CONN:
3130 hci_cs_le_create_conn(hdev, ev->status);
3133 case HCI_OP_LE_READ_REMOTE_FEATURES:
3134 hci_cs_le_read_remote_features(hdev, ev->status);
3137 case HCI_OP_LE_START_ENC:
3138 hci_cs_le_start_enc(hdev, ev->status);
3142 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3146 if (*opcode != HCI_OP_NOP)
3147 cancel_delayed_work(&hdev->cmd_timer);
3149 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3150 atomic_set(&hdev->cmd_cnt, 1);
3152 /* Indicate request completion if the command failed. Also, if
3153 * we're not waiting for a special event and we get a success
3154 * command status we should try to flag the request as completed
3155 * (since for this kind of commands there will not be a command
3159 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3160 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3163 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3164 queue_work(hdev->workqueue, &hdev->cmd_work);
3167 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3169 struct hci_ev_hardware_error *ev = (void *) skb->data;
3171 hdev->hw_error_code = ev->code;
3173 queue_work(hdev->req_workqueue, &hdev->error_reset);
3176 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3178 struct hci_ev_role_change *ev = (void *) skb->data;
3179 struct hci_conn *conn;
3181 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3185 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3188 conn->role = ev->role;
3190 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3192 hci_role_switch_cfm(conn, ev->status, ev->role);
3195 hci_dev_unlock(hdev);
3198 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3200 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3203 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3204 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3208 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3209 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3210 BT_DBG("%s bad parameters", hdev->name);
3214 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3216 for (i = 0; i < ev->num_hndl; i++) {
3217 struct hci_comp_pkts_info *info = &ev->handles[i];
3218 struct hci_conn *conn;
3219 __u16 handle, count;
3221 handle = __le16_to_cpu(info->handle);
3222 count = __le16_to_cpu(info->count);
3224 conn = hci_conn_hash_lookup_handle(hdev, handle);
3228 conn->sent -= count;
3230 switch (conn->type) {
3232 hdev->acl_cnt += count;
3233 if (hdev->acl_cnt > hdev->acl_pkts)
3234 hdev->acl_cnt = hdev->acl_pkts;
3238 if (hdev->le_pkts) {
3239 hdev->le_cnt += count;
3240 if (hdev->le_cnt > hdev->le_pkts)
3241 hdev->le_cnt = hdev->le_pkts;
3243 hdev->acl_cnt += count;
3244 if (hdev->acl_cnt > hdev->acl_pkts)
3245 hdev->acl_cnt = hdev->acl_pkts;
3250 hdev->sco_cnt += count;
3251 if (hdev->sco_cnt > hdev->sco_pkts)
3252 hdev->sco_cnt = hdev->sco_pkts;
3256 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3261 queue_work(hdev->workqueue, &hdev->tx_work);
3264 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3267 struct hci_chan *chan;
3269 switch (hdev->dev_type) {
3271 return hci_conn_hash_lookup_handle(hdev, handle);
3273 chan = hci_chan_lookup_handle(hdev, handle);
3278 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3285 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3287 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3290 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3291 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3295 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3296 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3297 BT_DBG("%s bad parameters", hdev->name);
3301 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3304 for (i = 0; i < ev->num_hndl; i++) {
3305 struct hci_comp_blocks_info *info = &ev->handles[i];
3306 struct hci_conn *conn = NULL;
3307 __u16 handle, block_count;
3309 handle = __le16_to_cpu(info->handle);
3310 block_count = __le16_to_cpu(info->blocks);
3312 conn = __hci_conn_lookup_handle(hdev, handle);
3316 conn->sent -= block_count;
3318 switch (conn->type) {
3321 hdev->block_cnt += block_count;
3322 if (hdev->block_cnt > hdev->num_blocks)
3323 hdev->block_cnt = hdev->num_blocks;
3327 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3332 queue_work(hdev->workqueue, &hdev->tx_work);
3335 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3337 struct hci_ev_mode_change *ev = (void *) skb->data;
3338 struct hci_conn *conn;
3340 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3344 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3346 conn->mode = ev->mode;
3348 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3350 if (conn->mode == HCI_CM_ACTIVE)
3351 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3353 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3356 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3357 hci_sco_setup(conn, ev->status);
3360 hci_dev_unlock(hdev);
3363 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3365 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3366 struct hci_conn *conn;
3368 BT_DBG("%s", hdev->name);
3372 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3376 if (conn->state == BT_CONNECTED) {
3377 hci_conn_hold(conn);
3378 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3379 hci_conn_drop(conn);
3382 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3383 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3384 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3385 sizeof(ev->bdaddr), &ev->bdaddr);
3386 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3389 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3394 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3398 hci_dev_unlock(hdev);
3401 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3403 if (key_type == HCI_LK_CHANGED_COMBINATION)
3406 conn->pin_length = pin_len;
3407 conn->key_type = key_type;
3410 case HCI_LK_LOCAL_UNIT:
3411 case HCI_LK_REMOTE_UNIT:
3412 case HCI_LK_DEBUG_COMBINATION:
3414 case HCI_LK_COMBINATION:
3416 conn->pending_sec_level = BT_SECURITY_HIGH;
3418 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3420 case HCI_LK_UNAUTH_COMBINATION_P192:
3421 case HCI_LK_UNAUTH_COMBINATION_P256:
3422 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3424 case HCI_LK_AUTH_COMBINATION_P192:
3425 conn->pending_sec_level = BT_SECURITY_HIGH;
3427 case HCI_LK_AUTH_COMBINATION_P256:
3428 conn->pending_sec_level = BT_SECURITY_FIPS;
3433 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3435 struct hci_ev_link_key_req *ev = (void *) skb->data;
3436 struct hci_cp_link_key_reply cp;
3437 struct hci_conn *conn;
3438 struct link_key *key;
3440 BT_DBG("%s", hdev->name);
3442 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3447 key = hci_find_link_key(hdev, &ev->bdaddr);
3449 BT_DBG("%s link key not found for %pMR", hdev->name,
3454 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3457 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3459 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3461 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3462 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3463 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3464 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3468 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3469 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3470 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3471 BT_DBG("%s ignoring key unauthenticated for high security",
3476 conn_set_key(conn, key->type, key->pin_len);
3479 bacpy(&cp.bdaddr, &ev->bdaddr);
3480 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3482 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3484 hci_dev_unlock(hdev);
3489 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3490 hci_dev_unlock(hdev);
3493 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3495 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3496 struct hci_conn *conn;
3497 struct link_key *key;
3501 BT_DBG("%s", hdev->name);
3505 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3509 /* Ignore NULL link key against CVE-2020-26555 */
3510 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
3511 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
3513 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3514 hci_conn_drop(conn);
3518 hci_conn_hold(conn);
3519 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3520 hci_conn_drop(conn);
3522 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3523 conn_set_key(conn, ev->key_type, conn->pin_length);
3525 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3528 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3529 ev->key_type, pin_len, &persistent);
3533 /* Update connection information since adding the key will have
3534 * fixed up the type in the case of changed combination keys.
3536 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3537 conn_set_key(conn, key->type, key->pin_len);
3539 mgmt_new_link_key(hdev, key, persistent);
3541 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3542 * is set. If it's not set simply remove the key from the kernel
3543 * list (we've still notified user space about it but with
3544 * store_hint being 0).
3546 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3547 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3548 list_del_rcu(&key->list);
3549 kfree_rcu(key, rcu);
3554 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3556 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3559 hci_dev_unlock(hdev);
3562 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3564 struct hci_ev_clock_offset *ev = (void *) skb->data;
3565 struct hci_conn *conn;
3567 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3571 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3572 if (conn && !ev->status) {
3573 struct inquiry_entry *ie;
3575 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3577 ie->data.clock_offset = ev->clock_offset;
3578 ie->timestamp = jiffies;
3582 hci_dev_unlock(hdev);
3585 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3587 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3588 struct hci_conn *conn;
3590 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3594 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3595 if (conn && !ev->status)
3596 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3598 hci_dev_unlock(hdev);
3601 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3603 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3604 struct inquiry_entry *ie;
3606 BT_DBG("%s", hdev->name);
3610 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3612 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3613 ie->timestamp = jiffies;
3616 hci_dev_unlock(hdev);
3619 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3620 struct sk_buff *skb)
3622 struct inquiry_data data;
3623 int num_rsp = *((__u8 *) skb->data);
3625 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3630 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3635 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3636 struct inquiry_info_with_rssi_and_pscan_mode *info;
3637 info = (void *) (skb->data + 1);
3639 if (skb->len < num_rsp * sizeof(*info) + 1)
3642 for (; num_rsp; num_rsp--, info++) {
3645 bacpy(&data.bdaddr, &info->bdaddr);
3646 data.pscan_rep_mode = info->pscan_rep_mode;
3647 data.pscan_period_mode = info->pscan_period_mode;
3648 data.pscan_mode = info->pscan_mode;
3649 memcpy(data.dev_class, info->dev_class, 3);
3650 data.clock_offset = info->clock_offset;
3651 data.rssi = info->rssi;
3652 data.ssp_mode = 0x00;
3654 flags = hci_inquiry_cache_update(hdev, &data, false);
3656 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3657 info->dev_class, info->rssi,
3658 flags, NULL, 0, NULL, 0);
3661 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3663 if (skb->len < num_rsp * sizeof(*info) + 1)
3666 for (; num_rsp; num_rsp--, info++) {
3669 bacpy(&data.bdaddr, &info->bdaddr);
3670 data.pscan_rep_mode = info->pscan_rep_mode;
3671 data.pscan_period_mode = info->pscan_period_mode;
3672 data.pscan_mode = 0x00;
3673 memcpy(data.dev_class, info->dev_class, 3);
3674 data.clock_offset = info->clock_offset;
3675 data.rssi = info->rssi;
3676 data.ssp_mode = 0x00;
3678 flags = hci_inquiry_cache_update(hdev, &data, false);
3680 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3681 info->dev_class, info->rssi,
3682 flags, NULL, 0, NULL, 0);
3687 hci_dev_unlock(hdev);
3690 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3691 struct sk_buff *skb)
3693 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3694 struct hci_conn *conn;
3696 BT_DBG("%s", hdev->name);
3700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3704 if (ev->page < HCI_MAX_PAGES)
3705 memcpy(conn->features[ev->page], ev->features, 8);
3707 if (!ev->status && ev->page == 0x01) {
3708 struct inquiry_entry *ie;
3710 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3712 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3714 if (ev->features[0] & LMP_HOST_SSP) {
3715 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3717 /* It is mandatory by the Bluetooth specification that
3718 * Extended Inquiry Results are only used when Secure
3719 * Simple Pairing is enabled, but some devices violate
3722 * To make these devices work, the internal SSP
3723 * enabled flag needs to be cleared if the remote host
3724 * features do not indicate SSP support */
3725 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3728 if (ev->features[0] & LMP_HOST_SC)
3729 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3732 if (conn->state != BT_CONFIG)
3735 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3736 struct hci_cp_remote_name_req cp;
3737 memset(&cp, 0, sizeof(cp));
3738 bacpy(&cp.bdaddr, &conn->dst);
3739 cp.pscan_rep_mode = 0x02;
3740 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3741 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3742 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3744 if (!hci_outgoing_auth_needed(hdev, conn)) {
3745 conn->state = BT_CONNECTED;
3746 hci_connect_cfm(conn, ev->status);
3747 hci_conn_drop(conn);
3751 hci_dev_unlock(hdev);
3754 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3755 struct sk_buff *skb)
3757 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3758 struct hci_conn *conn;
3760 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3764 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3766 if (ev->link_type == ESCO_LINK)
3769 /* When the link type in the event indicates SCO connection
3770 * and lookup of the connection object fails, then check
3771 * if an eSCO connection object exists.
3773 * The core limits the synchronous connections to either
3774 * SCO or eSCO. The eSCO connection is preferred and tried
3775 * to be setup first and until successfully established,
3776 * the link type will be hinted as eSCO.
3778 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3783 switch (ev->status) {
3785 /* The synchronous connection complete event should only be
3786 * sent once per new connection. Receiving a successful
3787 * complete event when the connection status is already
3788 * BT_CONNECTED means that the device is misbehaving and sent
3789 * multiple complete event packets for the same new connection.
3791 * Registering the device more than once can corrupt kernel
3792 * memory, hence upon detecting this invalid event, we report
3793 * an error and ignore the packet.
3795 if (conn->state == BT_CONNECTED) {
3796 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
3800 conn->handle = __le16_to_cpu(ev->handle);
3801 conn->state = BT_CONNECTED;
3802 conn->type = ev->link_type;
3804 hci_debugfs_create_conn(conn);
3805 hci_conn_add_sysfs(conn);
3808 case 0x10: /* Connection Accept Timeout */
3809 case 0x0d: /* Connection Rejected due to Limited Resources */
3810 case 0x11: /* Unsupported Feature or Parameter Value */
3811 case 0x1c: /* SCO interval rejected */
3812 case 0x1a: /* Unsupported Remote Feature */
3813 case 0x1e: /* Invalid LMP Parameters */
3814 case 0x1f: /* Unspecified error */
3815 case 0x20: /* Unsupported LMP Parameter value */
3817 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3818 (hdev->esco_type & EDR_ESCO_MASK);
3819 if (hci_setup_sync(conn, conn->link->handle))
3825 conn->state = BT_CLOSED;
3829 hci_connect_cfm(conn, ev->status);
3834 hci_dev_unlock(hdev);
3837 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3841 while (parsed < eir_len) {
3842 u8 field_len = eir[0];
3847 parsed += field_len + 1;
3848 eir += field_len + 1;
3854 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3855 struct sk_buff *skb)
3857 struct inquiry_data data;
3858 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3859 int num_rsp = *((__u8 *) skb->data);
3862 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3864 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
3867 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3872 for (; num_rsp; num_rsp--, info++) {
3876 bacpy(&data.bdaddr, &info->bdaddr);
3877 data.pscan_rep_mode = info->pscan_rep_mode;
3878 data.pscan_period_mode = info->pscan_period_mode;
3879 data.pscan_mode = 0x00;
3880 memcpy(data.dev_class, info->dev_class, 3);
3881 data.clock_offset = info->clock_offset;
3882 data.rssi = info->rssi;
3883 data.ssp_mode = 0x01;
3885 if (hci_dev_test_flag(hdev, HCI_MGMT))
3886 name_known = eir_get_data(info->data,
3888 EIR_NAME_COMPLETE, NULL);
3892 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3894 eir_len = eir_get_length(info->data, sizeof(info->data));
3896 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3897 info->dev_class, info->rssi,
3898 flags, info->data, eir_len, NULL, 0);
3901 hci_dev_unlock(hdev);
3904 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3905 struct sk_buff *skb)
3907 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3908 struct hci_conn *conn;
3910 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3911 __le16_to_cpu(ev->handle));
3915 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3919 /* For BR/EDR the necessary steps are taken through the
3920 * auth_complete event.
3922 if (conn->type != LE_LINK)
3926 conn->sec_level = conn->pending_sec_level;
3928 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3930 if (ev->status && conn->state == BT_CONNECTED) {
3931 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3932 hci_conn_drop(conn);
3936 if (conn->state == BT_CONFIG) {
3938 conn->state = BT_CONNECTED;
3940 hci_connect_cfm(conn, ev->status);
3941 hci_conn_drop(conn);
3943 hci_auth_cfm(conn, ev->status);
3945 hci_conn_hold(conn);
3946 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3947 hci_conn_drop(conn);
3951 hci_dev_unlock(hdev);
3954 static u8 hci_get_auth_req(struct hci_conn *conn)
3956 /* If remote requests no-bonding follow that lead */
3957 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3958 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3959 return conn->remote_auth | (conn->auth_type & 0x01);
3961 /* If both remote and local have enough IO capabilities, require
3964 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3965 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3966 return conn->remote_auth | 0x01;
3968 /* No MITM protection possible so ignore remote requirement */
3969 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3972 static u8 bredr_oob_data_present(struct hci_conn *conn)
3974 struct hci_dev *hdev = conn->hdev;
3975 struct oob_data *data;
3977 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3981 if (bredr_sc_enabled(hdev)) {
3982 /* When Secure Connections is enabled, then just
3983 * return the present value stored with the OOB
3984 * data. The stored value contains the right present
3985 * information. However it can only be trusted when
3986 * not in Secure Connection Only mode.
3988 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3989 return data->present;
3991 /* When Secure Connections Only mode is enabled, then
3992 * the P-256 values are required. If they are not
3993 * available, then do not declare that OOB data is
3996 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
3997 !crypto_memneq(data->hash256, ZERO_KEY, 16))
4003 /* When Secure Connections is not enabled or actually
4004 * not supported by the hardware, then check that if
4005 * P-192 data values are present.
4007 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4008 !crypto_memneq(data->hash192, ZERO_KEY, 16))
4014 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4016 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4017 struct hci_conn *conn;
4019 BT_DBG("%s", hdev->name);
4023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4024 if (!conn || !hci_conn_ssp_enabled(conn))
4027 hci_conn_hold(conn);
4029 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4032 /* Allow pairing if we're pairable, the initiators of the
4033 * pairing or if the remote is not requesting bonding.
4035 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4036 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4037 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4038 struct hci_cp_io_capability_reply cp;
4040 bacpy(&cp.bdaddr, &ev->bdaddr);
4041 /* Change the IO capability from KeyboardDisplay
4042 * to DisplayYesNo as it is not supported by BT spec. */
4043 cp.capability = (conn->io_capability == 0x04) ?
4044 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4046 /* If we are initiators, there is no remote information yet */
4047 if (conn->remote_auth == 0xff) {
4048 /* Request MITM protection if our IO caps allow it
4049 * except for the no-bonding case.
4051 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4052 conn->auth_type != HCI_AT_NO_BONDING)
4053 conn->auth_type |= 0x01;
4055 conn->auth_type = hci_get_auth_req(conn);
4058 /* If we're not bondable, force one of the non-bondable
4059 * authentication requirement values.
4061 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4062 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4064 cp.authentication = conn->auth_type;
4065 cp.oob_data = bredr_oob_data_present(conn);
4067 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4070 struct hci_cp_io_capability_neg_reply cp;
4072 bacpy(&cp.bdaddr, &ev->bdaddr);
4073 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4075 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4080 hci_dev_unlock(hdev);
4083 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4085 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4086 struct hci_conn *conn;
4088 BT_DBG("%s", hdev->name);
4092 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4096 conn->remote_cap = ev->capability;
4097 conn->remote_auth = ev->authentication;
4100 hci_dev_unlock(hdev);
4103 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4104 struct sk_buff *skb)
4106 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4107 int loc_mitm, rem_mitm, confirm_hint = 0;
4108 struct hci_conn *conn;
4110 BT_DBG("%s", hdev->name);
4114 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4121 loc_mitm = (conn->auth_type & 0x01);
4122 rem_mitm = (conn->remote_auth & 0x01);
4124 /* If we require MITM but the remote device can't provide that
4125 * (it has NoInputNoOutput) then reject the confirmation
4126 * request. We check the security level here since it doesn't
4127 * necessarily match conn->auth_type.
4129 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4130 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4131 BT_DBG("Rejecting request: remote device can't provide MITM");
4132 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4133 sizeof(ev->bdaddr), &ev->bdaddr);
4137 /* If no side requires MITM protection; auto-accept */
4138 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4139 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4141 /* If we're not the initiators request authorization to
4142 * proceed from user space (mgmt_user_confirm with
4143 * confirm_hint set to 1). The exception is if neither
4144 * side had MITM or if the local IO capability is
4145 * NoInputNoOutput, in which case we do auto-accept
4147 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4148 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4149 (loc_mitm || rem_mitm)) {
4150 BT_DBG("Confirming auto-accept as acceptor");
4155 BT_DBG("Auto-accept of user confirmation with %ums delay",
4156 hdev->auto_accept_delay);
4158 if (hdev->auto_accept_delay > 0) {
4159 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4160 queue_delayed_work(conn->hdev->workqueue,
4161 &conn->auto_accept_work, delay);
4165 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4166 sizeof(ev->bdaddr), &ev->bdaddr);
4171 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4172 le32_to_cpu(ev->passkey), confirm_hint);
4175 hci_dev_unlock(hdev);
4178 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4179 struct sk_buff *skb)
4181 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4183 BT_DBG("%s", hdev->name);
4185 if (hci_dev_test_flag(hdev, HCI_MGMT))
4186 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4189 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4190 struct sk_buff *skb)
4192 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4193 struct hci_conn *conn;
4195 BT_DBG("%s", hdev->name);
4197 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4201 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4202 conn->passkey_entered = 0;
4204 if (hci_dev_test_flag(hdev, HCI_MGMT))
4205 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4206 conn->dst_type, conn->passkey_notify,
4207 conn->passkey_entered);
4210 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4212 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4213 struct hci_conn *conn;
4215 BT_DBG("%s", hdev->name);
4217 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4222 case HCI_KEYPRESS_STARTED:
4223 conn->passkey_entered = 0;
4226 case HCI_KEYPRESS_ENTERED:
4227 conn->passkey_entered++;
4230 case HCI_KEYPRESS_ERASED:
4231 conn->passkey_entered--;
4234 case HCI_KEYPRESS_CLEARED:
4235 conn->passkey_entered = 0;
4238 case HCI_KEYPRESS_COMPLETED:
4242 if (hci_dev_test_flag(hdev, HCI_MGMT))
4243 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4244 conn->dst_type, conn->passkey_notify,
4245 conn->passkey_entered);
4248 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4249 struct sk_buff *skb)
4251 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4252 struct hci_conn *conn;
4254 BT_DBG("%s", hdev->name);
4258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4259 if (!conn || !hci_conn_ssp_enabled(conn))
4262 /* Reset the authentication requirement to unknown */
4263 conn->remote_auth = 0xff;
4265 /* To avoid duplicate auth_failed events to user space we check
4266 * the HCI_CONN_AUTH_PEND flag which will be set if we
4267 * initiated the authentication. A traditional auth_complete
4268 * event gets always produced as initiator and is also mapped to
4269 * the mgmt_auth_failed event */
4270 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4271 mgmt_auth_failed(conn, ev->status);
4273 hci_conn_drop(conn);
4276 hci_dev_unlock(hdev);
4279 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4280 struct sk_buff *skb)
4282 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4283 struct inquiry_entry *ie;
4284 struct hci_conn *conn;
4286 BT_DBG("%s", hdev->name);
4290 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4292 memcpy(conn->features[1], ev->features, 8);
4294 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4296 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4298 hci_dev_unlock(hdev);
4301 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4302 struct sk_buff *skb)
4304 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4305 struct oob_data *data;
4307 BT_DBG("%s", hdev->name);
4311 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4314 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4316 struct hci_cp_remote_oob_data_neg_reply cp;
4318 bacpy(&cp.bdaddr, &ev->bdaddr);
4319 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4324 if (bredr_sc_enabled(hdev)) {
4325 struct hci_cp_remote_oob_ext_data_reply cp;
4327 bacpy(&cp.bdaddr, &ev->bdaddr);
4328 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4329 memset(cp.hash192, 0, sizeof(cp.hash192));
4330 memset(cp.rand192, 0, sizeof(cp.rand192));
4332 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4333 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4335 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4336 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4338 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4341 struct hci_cp_remote_oob_data_reply cp;
4343 bacpy(&cp.bdaddr, &ev->bdaddr);
4344 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4345 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4347 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4352 hci_dev_unlock(hdev);
4355 #if IS_ENABLED(CONFIG_BT_HS)
4356 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4358 struct hci_ev_channel_selected *ev = (void *)skb->data;
4359 struct hci_conn *hcon;
4361 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4363 skb_pull(skb, sizeof(*ev));
4365 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4369 amp_read_loc_assoc_final_data(hdev, hcon);
4372 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4373 struct sk_buff *skb)
4375 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4376 struct hci_conn *hcon, *bredr_hcon;
4378 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4383 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4385 hci_dev_unlock(hdev);
4389 if (!hcon->amp_mgr) {
4390 hci_dev_unlock(hdev);
4396 hci_dev_unlock(hdev);
4400 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4402 hcon->state = BT_CONNECTED;
4403 bacpy(&hcon->dst, &bredr_hcon->dst);
4405 hci_conn_hold(hcon);
4406 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4407 hci_conn_drop(hcon);
4409 hci_debugfs_create_conn(hcon);
4410 hci_conn_add_sysfs(hcon);
4412 amp_physical_cfm(bredr_hcon, hcon);
4414 hci_dev_unlock(hdev);
4417 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4419 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4420 struct hci_conn *hcon;
4421 struct hci_chan *hchan;
4422 struct amp_mgr *mgr;
4424 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4425 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4428 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4432 /* Create AMP hchan */
4433 hchan = hci_chan_create(hcon);
4437 hchan->handle = le16_to_cpu(ev->handle);
4440 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4442 mgr = hcon->amp_mgr;
4443 if (mgr && mgr->bredr_chan) {
4444 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4446 l2cap_chan_lock(bredr_chan);
4448 bredr_chan->conn->mtu = hdev->block_mtu;
4449 l2cap_logical_cfm(bredr_chan, hchan, 0);
4450 hci_conn_hold(hcon);
4452 l2cap_chan_unlock(bredr_chan);
4456 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4457 struct sk_buff *skb)
4459 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4460 struct hci_chan *hchan;
4462 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4463 le16_to_cpu(ev->handle), ev->status);
4470 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4471 if (!hchan || !hchan->amp)
4474 amp_destroy_logical_link(hchan, ev->reason);
4477 hci_dev_unlock(hdev);
4480 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4481 struct sk_buff *skb)
4483 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4484 struct hci_conn *hcon;
4486 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4493 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4494 if (hcon && hcon->type == AMP_LINK) {
4495 hcon->state = BT_CLOSED;
4496 hci_disconn_cfm(hcon, ev->reason);
4500 hci_dev_unlock(hdev);
4504 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4506 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4507 struct hci_conn_params *params;
4508 struct hci_conn *conn;
4509 struct smp_irk *irk;
4512 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4516 /* All controllers implicitly stop advertising in the event of a
4517 * connection, so ensure that the state bit is cleared.
4519 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4521 conn = hci_lookup_le_connect(hdev);
4523 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4525 BT_ERR("No memory for new connection");
4529 conn->dst_type = ev->bdaddr_type;
4531 /* If we didn't have a hci_conn object previously
4532 * but we're in master role this must be something
4533 * initiated using a white list. Since white list based
4534 * connections are not "first class citizens" we don't
4535 * have full tracking of them. Therefore, we go ahead
4536 * with a "best effort" approach of determining the
4537 * initiator address based on the HCI_PRIVACY flag.
4540 conn->resp_addr_type = ev->bdaddr_type;
4541 bacpy(&conn->resp_addr, &ev->bdaddr);
4542 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4543 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4544 bacpy(&conn->init_addr, &hdev->rpa);
4546 hci_copy_identity_address(hdev,
4548 &conn->init_addr_type);
4552 cancel_delayed_work(&conn->le_conn_timeout);
4556 /* Set the responder (our side) address type based on
4557 * the advertising address type.
4559 conn->resp_addr_type = hdev->adv_addr_type;
4560 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4561 bacpy(&conn->resp_addr, &hdev->random_addr);
4563 bacpy(&conn->resp_addr, &hdev->bdaddr);
4565 conn->init_addr_type = ev->bdaddr_type;
4566 bacpy(&conn->init_addr, &ev->bdaddr);
4568 /* For incoming connections, set the default minimum
4569 * and maximum connection interval. They will be used
4570 * to check if the parameters are in range and if not
4571 * trigger the connection update procedure.
4573 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4574 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4577 /* Lookup the identity address from the stored connection
4578 * address and address type.
4580 * When establishing connections to an identity address, the
4581 * connection procedure will store the resolvable random
4582 * address first. Now if it can be converted back into the
4583 * identity address, start using the identity address from
4586 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4588 bacpy(&conn->dst, &irk->bdaddr);
4589 conn->dst_type = irk->addr_type;
4593 hci_le_conn_failed(conn, ev->status);
4597 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4598 addr_type = BDADDR_LE_PUBLIC;
4600 addr_type = BDADDR_LE_RANDOM;
4602 /* Drop the connection if the device is blocked */
4603 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4604 hci_conn_drop(conn);
4608 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4609 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4611 conn->sec_level = BT_SECURITY_LOW;
4612 conn->handle = __le16_to_cpu(ev->handle);
4613 conn->state = BT_CONFIG;
4615 conn->le_conn_interval = le16_to_cpu(ev->interval);
4616 conn->le_conn_latency = le16_to_cpu(ev->latency);
4617 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4619 hci_debugfs_create_conn(conn);
4620 hci_conn_add_sysfs(conn);
4623 /* The remote features procedure is defined for master
4624 * role only. So only in case of an initiated connection
4625 * request the remote features.
4627 * If the local controller supports slave-initiated features
4628 * exchange, then requesting the remote features in slave
4629 * role is possible. Otherwise just transition into the
4630 * connected state without requesting the remote features.
4633 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4634 struct hci_cp_le_read_remote_features cp;
4636 cp.handle = __cpu_to_le16(conn->handle);
4638 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4641 hci_conn_hold(conn);
4643 conn->state = BT_CONNECTED;
4644 hci_connect_cfm(conn, ev->status);
4647 hci_connect_cfm(conn, ev->status);
4650 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4653 list_del_init(¶ms->action);
4655 hci_conn_drop(params->conn);
4656 hci_conn_put(params->conn);
4657 params->conn = NULL;
4662 hci_update_background_scan(hdev);
4663 hci_dev_unlock(hdev);
4666 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4667 struct sk_buff *skb)
4669 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4670 struct hci_conn *conn;
4672 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4681 conn->le_conn_interval = le16_to_cpu(ev->interval);
4682 conn->le_conn_latency = le16_to_cpu(ev->latency);
4683 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4686 hci_dev_unlock(hdev);
4689 /* This function requires the caller holds hdev->lock */
4690 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4692 u8 addr_type, u8 adv_type,
4693 bdaddr_t *direct_rpa)
4695 struct hci_conn *conn;
4696 struct hci_conn_params *params;
4698 /* If the event is not connectable don't proceed further */
4699 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4702 /* Ignore if the device is blocked */
4703 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4706 /* Most controller will fail if we try to create new connections
4707 * while we have an existing one in slave role.
4709 if (hdev->conn_hash.le_num_slave > 0)
4712 /* If we're not connectable only connect devices that we have in
4713 * our pend_le_conns list.
4715 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4720 if (!params->explicit_connect) {
4721 switch (params->auto_connect) {
4722 case HCI_AUTO_CONN_DIRECT:
4723 /* Only devices advertising with ADV_DIRECT_IND are
4724 * triggering a connection attempt. This is allowing
4725 * incoming connections from slave devices.
4727 if (adv_type != LE_ADV_DIRECT_IND)
4730 case HCI_AUTO_CONN_ALWAYS:
4731 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4732 * are triggering a connection attempt. This means
4733 * that incoming connectioms from slave device are
4734 * accepted and also outgoing connections to slave
4735 * devices are established when found.
4743 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4744 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4746 if (!IS_ERR(conn)) {
4747 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4748 * by higher layer that tried to connect, if no then
4749 * store the pointer since we don't really have any
4750 * other owner of the object besides the params that
4751 * triggered it. This way we can abort the connection if
4752 * the parameters get removed and keep the reference
4753 * count consistent once the connection is established.
4756 if (!params->explicit_connect)
4757 params->conn = hci_conn_get(conn);
4762 switch (PTR_ERR(conn)) {
4764 /* If hci_connect() returns -EBUSY it means there is already
4765 * an LE connection attempt going on. Since controllers don't
4766 * support more than one connection attempt at the time, we
4767 * don't consider this an error case.
4771 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4778 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4779 u8 bdaddr_type, bdaddr_t *direct_addr,
4780 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4782 struct discovery_state *d = &hdev->discovery;
4783 struct smp_irk *irk;
4784 struct hci_conn *conn;
4791 case LE_ADV_DIRECT_IND:
4792 case LE_ADV_SCAN_IND:
4793 case LE_ADV_NONCONN_IND:
4794 case LE_ADV_SCAN_RSP:
4797 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
4802 if (len > HCI_MAX_AD_LENGTH) {
4803 pr_err_ratelimited("legacy adv larger than 31 bytes");
4807 /* Find the end of the data in case the report contains padded zero
4808 * bytes at the end causing an invalid length value.
4810 * When data is NULL, len is 0 so there is no need for extra ptr
4811 * check as 'ptr < data + 0' is already false in such case.
4813 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4814 if (ptr + 1 + *ptr > data + len)
4818 real_len = ptr - data;
4820 /* Adjust for actual length */
4821 if (len != real_len) {
4822 BT_ERR_RATELIMITED("%s advertising data length corrected",
4827 /* If the direct address is present, then this report is from
4828 * a LE Direct Advertising Report event. In that case it is
4829 * important to see if the address is matching the local
4830 * controller address.
4833 /* Only resolvable random addresses are valid for these
4834 * kind of reports and others can be ignored.
4836 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4839 /* If the controller is not using resolvable random
4840 * addresses, then this report can be ignored.
4842 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4845 /* If the local IRK of the controller does not match
4846 * with the resolvable random address provided, then
4847 * this report can be ignored.
4849 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4853 /* Check if we need to convert to identity address */
4854 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4856 bdaddr = &irk->bdaddr;
4857 bdaddr_type = irk->addr_type;
4860 /* Check if we have been requested to connect to this device.
4862 * direct_addr is set only for directed advertising reports (it is NULL
4863 * for advertising reports) and is already verified to be RPA above.
4865 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4867 if (conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
4868 /* Store report for later inclusion by
4869 * mgmt_device_connected
4871 memcpy(conn->le_adv_data, data, len);
4872 conn->le_adv_data_len = len;
4875 /* Passive scanning shouldn't trigger any device found events,
4876 * except for devices marked as CONN_REPORT for which we do send
4877 * device found events.
4879 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4880 if (type == LE_ADV_DIRECT_IND)
4883 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4884 bdaddr, bdaddr_type))
4887 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4888 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4891 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4892 rssi, flags, data, len, NULL, 0);
4896 /* When receiving non-connectable or scannable undirected
4897 * advertising reports, this means that the remote device is
4898 * not connectable and then clearly indicate this in the
4899 * device found event.
4901 * When receiving a scan response, then there is no way to
4902 * know if the remote device is connectable or not. However
4903 * since scan responses are merged with a previously seen
4904 * advertising report, the flags field from that report
4907 * In the really unlikely case that a controller get confused
4908 * and just sends a scan response event, then it is marked as
4909 * not connectable as well.
4911 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4912 type == LE_ADV_SCAN_RSP)
4913 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4917 /* If there's nothing pending either store the data from this
4918 * event or send an immediate device found event if the data
4919 * should not be stored for later.
4921 if (!has_pending_adv_report(hdev)) {
4922 /* If the report will trigger a SCAN_REQ store it for
4925 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4926 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4927 rssi, flags, data, len);
4931 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4932 rssi, flags, data, len, NULL, 0);
4936 /* Check if the pending report is for the same device as the new one */
4937 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4938 bdaddr_type == d->last_adv_addr_type);
4940 /* If the pending data doesn't match this report or this isn't a
4941 * scan response (e.g. we got a duplicate ADV_IND) then force
4942 * sending of the pending data.
4944 if (type != LE_ADV_SCAN_RSP || !match) {
4945 /* Send out whatever is in the cache, but skip duplicates */
4947 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4948 d->last_adv_addr_type, NULL,
4949 d->last_adv_rssi, d->last_adv_flags,
4951 d->last_adv_data_len, NULL, 0);
4953 /* If the new report will trigger a SCAN_REQ store it for
4956 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4957 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4958 rssi, flags, data, len);
4962 /* The advertising reports cannot be merged, so clear
4963 * the pending report and send out a device found event.
4965 clear_pending_adv_report(hdev);
4966 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4967 rssi, flags, data, len, NULL, 0);
4971 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4972 * the new event is a SCAN_RSP. We can therefore proceed with
4973 * sending a merged device found event.
4975 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4976 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4977 d->last_adv_data, d->last_adv_data_len, data, len);
4978 clear_pending_adv_report(hdev);
4981 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4983 u8 num_reports = skb->data[0];
4984 void *ptr = &skb->data[1];
4988 while (num_reports--) {
4989 struct hci_ev_le_advertising_info *ev = ptr;
4992 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
4993 bt_dev_err(hdev, "Malicious advertising data.");
4997 if (ev->length <= HCI_MAX_AD_LENGTH &&
4998 ev->data + ev->length <= skb_tail_pointer(skb)) {
4999 rssi = ev->data[ev->length];
5000 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5001 ev->bdaddr_type, NULL, 0, rssi,
5002 ev->data, ev->length);
5004 bt_dev_err(hdev, "Dropping invalid advertising data");
5007 ptr += sizeof(*ev) + ev->length + 1;
5010 hci_dev_unlock(hdev);
5013 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5014 struct sk_buff *skb)
5016 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5017 struct hci_conn *conn;
5019 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5023 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5026 memcpy(conn->features[0], ev->features, 8);
5028 if (conn->state == BT_CONFIG) {
5031 /* If the local controller supports slave-initiated
5032 * features exchange, but the remote controller does
5033 * not, then it is possible that the error code 0x1a
5034 * for unsupported remote feature gets returned.
5036 * In this specific case, allow the connection to
5037 * transition into connected state and mark it as
5040 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5041 !conn->out && ev->status == 0x1a)
5044 status = ev->status;
5046 conn->state = BT_CONNECTED;
5047 hci_connect_cfm(conn, status);
5048 hci_conn_drop(conn);
5052 hci_dev_unlock(hdev);
5055 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5057 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5058 struct hci_cp_le_ltk_reply cp;
5059 struct hci_cp_le_ltk_neg_reply neg;
5060 struct hci_conn *conn;
5061 struct smp_ltk *ltk;
5063 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5067 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5071 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5075 if (smp_ltk_is_sc(ltk)) {
5076 /* With SC both EDiv and Rand are set to zero */
5077 if (ev->ediv || ev->rand)
5080 /* For non-SC keys check that EDiv and Rand match */
5081 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5085 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5086 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5087 cp.handle = cpu_to_le16(conn->handle);
5089 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5091 conn->enc_key_size = ltk->enc_size;
5093 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5095 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5096 * temporary key used to encrypt a connection following
5097 * pairing. It is used during the Encrypted Session Setup to
5098 * distribute the keys. Later, security can be re-established
5099 * using a distributed LTK.
5101 if (ltk->type == SMP_STK) {
5102 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5103 list_del_rcu(<k->list);
5104 kfree_rcu(ltk, rcu);
5106 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5109 hci_dev_unlock(hdev);
5114 neg.handle = ev->handle;
5115 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5116 hci_dev_unlock(hdev);
5119 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5122 struct hci_cp_le_conn_param_req_neg_reply cp;
5124 cp.handle = cpu_to_le16(handle);
5127 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5131 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5132 struct sk_buff *skb)
5134 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5135 struct hci_cp_le_conn_param_req_reply cp;
5136 struct hci_conn *hcon;
5137 u16 handle, min, max, latency, timeout;
5139 handle = le16_to_cpu(ev->handle);
5140 min = le16_to_cpu(ev->interval_min);
5141 max = le16_to_cpu(ev->interval_max);
5142 latency = le16_to_cpu(ev->latency);
5143 timeout = le16_to_cpu(ev->timeout);
5145 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5146 if (!hcon || hcon->state != BT_CONNECTED)
5147 return send_conn_param_neg_reply(hdev, handle,
5148 HCI_ERROR_UNKNOWN_CONN_ID);
5150 if (hci_check_conn_params(min, max, latency, timeout))
5151 return send_conn_param_neg_reply(hdev, handle,
5152 HCI_ERROR_INVALID_LL_PARAMS);
5154 if (hcon->role == HCI_ROLE_MASTER) {
5155 struct hci_conn_params *params;
5160 params = hci_conn_params_lookup(hdev, &hcon->dst,
5163 params->conn_min_interval = min;
5164 params->conn_max_interval = max;
5165 params->conn_latency = latency;
5166 params->supervision_timeout = timeout;
5172 hci_dev_unlock(hdev);
5174 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5175 store_hint, min, max, latency, timeout);
5178 cp.handle = ev->handle;
5179 cp.interval_min = ev->interval_min;
5180 cp.interval_max = ev->interval_max;
5181 cp.latency = ev->latency;
5182 cp.timeout = ev->timeout;
5186 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5189 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5190 struct sk_buff *skb)
5192 u8 num_reports = skb->data[0];
5193 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5195 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5200 for (; num_reports; num_reports--, ev++)
5201 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5202 ev->bdaddr_type, &ev->direct_addr,
5203 ev->direct_addr_type, ev->rssi, NULL, 0);
5205 hci_dev_unlock(hdev);
5208 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5210 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5212 skb_pull(skb, sizeof(*le_ev));
5214 switch (le_ev->subevent) {
5215 case HCI_EV_LE_CONN_COMPLETE:
5216 hci_le_conn_complete_evt(hdev, skb);
5219 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5220 hci_le_conn_update_complete_evt(hdev, skb);
5223 case HCI_EV_LE_ADVERTISING_REPORT:
5224 hci_le_adv_report_evt(hdev, skb);
5227 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5228 hci_le_remote_feat_complete_evt(hdev, skb);
5231 case HCI_EV_LE_LTK_REQ:
5232 hci_le_ltk_request_evt(hdev, skb);
5235 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5236 hci_le_remote_conn_param_req_evt(hdev, skb);
5239 case HCI_EV_LE_DIRECT_ADV_REPORT:
5240 hci_le_direct_adv_report_evt(hdev, skb);
5248 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5249 u8 event, struct sk_buff *skb)
5251 struct hci_ev_cmd_complete *ev;
5252 struct hci_event_hdr *hdr;
5257 if (skb->len < sizeof(*hdr)) {
5258 BT_ERR("Too short HCI event");
5262 hdr = (void *) skb->data;
5263 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5266 if (hdr->evt != event)
5271 /* Check if request ended in Command Status - no way to retreive
5272 * any extra parameters in this case.
5274 if (hdr->evt == HCI_EV_CMD_STATUS)
5277 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5278 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5282 if (skb->len < sizeof(*ev)) {
5283 BT_ERR("Too short cmd_complete event");
5287 ev = (void *) skb->data;
5288 skb_pull(skb, sizeof(*ev));
5290 if (opcode != __le16_to_cpu(ev->opcode)) {
5291 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5292 __le16_to_cpu(ev->opcode));
5299 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5301 struct hci_event_hdr *hdr = (void *) skb->data;
5302 hci_req_complete_t req_complete = NULL;
5303 hci_req_complete_skb_t req_complete_skb = NULL;
5304 struct sk_buff *orig_skb = NULL;
5305 u8 status = 0, event = hdr->evt, req_evt = 0;
5306 u16 opcode = HCI_OP_NOP;
5309 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5313 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5314 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5315 opcode = __le16_to_cpu(cmd_hdr->opcode);
5316 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5321 /* If it looks like we might end up having to call
5322 * req_complete_skb, store a pristine copy of the skb since the
5323 * various handlers may modify the original one through
5324 * skb_pull() calls, etc.
5326 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5327 event == HCI_EV_CMD_COMPLETE)
5328 orig_skb = skb_clone(skb, GFP_KERNEL);
5330 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5333 case HCI_EV_INQUIRY_COMPLETE:
5334 hci_inquiry_complete_evt(hdev, skb);
5337 case HCI_EV_INQUIRY_RESULT:
5338 hci_inquiry_result_evt(hdev, skb);
5341 case HCI_EV_CONN_COMPLETE:
5342 hci_conn_complete_evt(hdev, skb);
5345 case HCI_EV_CONN_REQUEST:
5346 hci_conn_request_evt(hdev, skb);
5349 case HCI_EV_DISCONN_COMPLETE:
5350 hci_disconn_complete_evt(hdev, skb);
5353 case HCI_EV_AUTH_COMPLETE:
5354 hci_auth_complete_evt(hdev, skb);
5357 case HCI_EV_REMOTE_NAME:
5358 hci_remote_name_evt(hdev, skb);
5361 case HCI_EV_ENCRYPT_CHANGE:
5362 hci_encrypt_change_evt(hdev, skb);
5365 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5366 hci_change_link_key_complete_evt(hdev, skb);
5369 case HCI_EV_REMOTE_FEATURES:
5370 hci_remote_features_evt(hdev, skb);
5373 case HCI_EV_CMD_COMPLETE:
5374 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5375 &req_complete, &req_complete_skb);
5378 case HCI_EV_CMD_STATUS:
5379 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5383 case HCI_EV_HARDWARE_ERROR:
5384 hci_hardware_error_evt(hdev, skb);
5387 case HCI_EV_ROLE_CHANGE:
5388 hci_role_change_evt(hdev, skb);
5391 case HCI_EV_NUM_COMP_PKTS:
5392 hci_num_comp_pkts_evt(hdev, skb);
5395 case HCI_EV_MODE_CHANGE:
5396 hci_mode_change_evt(hdev, skb);
5399 case HCI_EV_PIN_CODE_REQ:
5400 hci_pin_code_request_evt(hdev, skb);
5403 case HCI_EV_LINK_KEY_REQ:
5404 hci_link_key_request_evt(hdev, skb);
5407 case HCI_EV_LINK_KEY_NOTIFY:
5408 hci_link_key_notify_evt(hdev, skb);
5411 case HCI_EV_CLOCK_OFFSET:
5412 hci_clock_offset_evt(hdev, skb);
5415 case HCI_EV_PKT_TYPE_CHANGE:
5416 hci_pkt_type_change_evt(hdev, skb);
5419 case HCI_EV_PSCAN_REP_MODE:
5420 hci_pscan_rep_mode_evt(hdev, skb);
5423 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5424 hci_inquiry_result_with_rssi_evt(hdev, skb);
5427 case HCI_EV_REMOTE_EXT_FEATURES:
5428 hci_remote_ext_features_evt(hdev, skb);
5431 case HCI_EV_SYNC_CONN_COMPLETE:
5432 hci_sync_conn_complete_evt(hdev, skb);
5435 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5436 hci_extended_inquiry_result_evt(hdev, skb);
5439 case HCI_EV_KEY_REFRESH_COMPLETE:
5440 hci_key_refresh_complete_evt(hdev, skb);
5443 case HCI_EV_IO_CAPA_REQUEST:
5444 hci_io_capa_request_evt(hdev, skb);
5447 case HCI_EV_IO_CAPA_REPLY:
5448 hci_io_capa_reply_evt(hdev, skb);
5451 case HCI_EV_USER_CONFIRM_REQUEST:
5452 hci_user_confirm_request_evt(hdev, skb);
5455 case HCI_EV_USER_PASSKEY_REQUEST:
5456 hci_user_passkey_request_evt(hdev, skb);
5459 case HCI_EV_USER_PASSKEY_NOTIFY:
5460 hci_user_passkey_notify_evt(hdev, skb);
5463 case HCI_EV_KEYPRESS_NOTIFY:
5464 hci_keypress_notify_evt(hdev, skb);
5467 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5468 hci_simple_pair_complete_evt(hdev, skb);
5471 case HCI_EV_REMOTE_HOST_FEATURES:
5472 hci_remote_host_features_evt(hdev, skb);
5475 case HCI_EV_LE_META:
5476 hci_le_meta_evt(hdev, skb);
5479 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5480 hci_remote_oob_data_request_evt(hdev, skb);
5483 #if IS_ENABLED(CONFIG_BT_HS)
5484 case HCI_EV_CHANNEL_SELECTED:
5485 hci_chan_selected_evt(hdev, skb);
5488 case HCI_EV_PHY_LINK_COMPLETE:
5489 hci_phy_link_complete_evt(hdev, skb);
5492 case HCI_EV_LOGICAL_LINK_COMPLETE:
5493 hci_loglink_complete_evt(hdev, skb);
5496 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5497 hci_disconn_loglink_complete_evt(hdev, skb);
5500 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5501 hci_disconn_phylink_complete_evt(hdev, skb);
5505 case HCI_EV_NUM_COMP_BLOCKS:
5506 hci_num_comp_blocks_evt(hdev, skb);
5510 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5515 req_complete(hdev, status, opcode);
5516 } else if (req_complete_skb) {
5517 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5518 kfree_skb(orig_skb);
5521 req_complete_skb(hdev, status, opcode, orig_skb);
5525 kfree_skb(orig_skb);
5527 hdev->stat.evt_rx++;