2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
47 __u8 status = *((__u8 *) skb->data);
49 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 /* It is possible that we receive Inquiry Complete event right
52 * before we receive Inquiry Cancel Command Complete event, in
53 * which case the latter event should have status of Command
54 * Disallowed (0x0c). This should not be treated as error, since
55 * we actually achieve what Inquiry Cancel wants to achieve,
56 * which is to end the last Inquiry session.
58 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
59 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
68 clear_bit(HCI_INQUIRY, &hdev->flags);
69 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
70 wake_up_bit(&hdev->flags, HCI_INQUIRY);
73 /* Set discovery state to stopped if we're not doing LE active
76 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
77 hdev->le_scan_type != LE_SCAN_ACTIVE)
78 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 __u8 status = *((__u8 *) skb->data);
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
96 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 __u8 status = *((__u8 *) skb->data);
100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
105 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 hci_conn_check_pending(hdev);
110 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
113 BT_DBG("%s", hdev->name);
116 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 struct hci_rp_role_discovery *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 conn->role = rp->role;
132 hci_dev_unlock(hdev);
135 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 struct hci_rp_read_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 conn->link_policy = __le16_to_cpu(rp->policy);
151 hci_dev_unlock(hdev);
154 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 struct hci_rp_write_link_policy *rp = (void *) skb->data;
157 struct hci_conn *conn;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 conn->link_policy = get_unaligned_le16(sent + 2);
175 hci_dev_unlock(hdev);
178 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
181 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
188 hdev->link_policy = __le16_to_cpu(rp->policy);
191 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
194 __u8 status = *((__u8 *) skb->data);
197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 hdev->link_policy = get_unaligned_le16(sent);
209 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 __u8 status = *((__u8 *) skb->data);
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 clear_bit(HCI_RESET, &hdev->flags);
220 /* Reset all non-persistent flags */
221 hci_dev_clear_volatile_flags(hdev);
223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
226 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
229 hdev->adv_data_len = 0;
231 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
232 hdev->scan_rsp_data_len = 0;
234 hdev->le_scan_type = LE_SCAN_PASSIVE;
236 hdev->ssp_debug_mode = 0;
238 hci_bdaddr_list_clear(&hdev->le_white_list);
241 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
245 struct hci_cp_read_stored_link_key *sent;
247 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
253 if (!rp->status && sent->read_all == 0x01) {
254 hdev->stored_max_keys = rp->max_keys;
255 hdev->stored_num_keys = rp->num_keys;
259 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
264 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
269 if (rp->num_keys <= hdev->stored_num_keys)
270 hdev->stored_num_keys -= rp->num_keys;
272 hdev->stored_num_keys = 0;
275 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 __u8 status = *((__u8 *) skb->data);
280 BT_DBG("%s status 0x%2.2x", hdev->name, status);
282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
288 if (hci_dev_test_flag(hdev, HCI_MGMT))
289 mgmt_set_local_name_complete(hdev, sent, status);
291 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
293 hci_dev_unlock(hdev);
296 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
298 struct hci_rp_read_local_name *rp = (void *) skb->data;
300 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
305 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
306 hci_dev_test_flag(hdev, HCI_CONFIG))
307 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
312 __u8 status = *((__u8 *) skb->data);
315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
317 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
324 __u8 param = *((__u8 *) sent);
326 if (param == AUTH_ENABLED)
327 set_bit(HCI_AUTH, &hdev->flags);
329 clear_bit(HCI_AUTH, &hdev->flags);
332 if (hci_dev_test_flag(hdev, HCI_MGMT))
333 mgmt_auth_enable_complete(hdev, status);
335 hci_dev_unlock(hdev);
338 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
340 __u8 status = *((__u8 *) skb->data);
344 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
353 param = *((__u8 *) sent);
356 set_bit(HCI_ENCRYPT, &hdev->flags);
358 clear_bit(HCI_ENCRYPT, &hdev->flags);
361 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
363 __u8 status = *((__u8 *) skb->data);
367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
369 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
373 param = *((__u8 *) sent);
378 hdev->discov_timeout = 0;
382 if (param & SCAN_INQUIRY)
383 set_bit(HCI_ISCAN, &hdev->flags);
385 clear_bit(HCI_ISCAN, &hdev->flags);
387 if (param & SCAN_PAGE)
388 set_bit(HCI_PSCAN, &hdev->flags);
390 clear_bit(HCI_PSCAN, &hdev->flags);
393 hci_dev_unlock(hdev);
396 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
400 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
405 memcpy(hdev->dev_class, rp->dev_class, 3);
407 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
408 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
413 __u8 status = *((__u8 *) skb->data);
416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
418 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
425 memcpy(hdev->dev_class, sent, 3);
427 if (hci_dev_test_flag(hdev, HCI_MGMT))
428 mgmt_set_class_of_dev_complete(hdev, sent, status);
430 hci_dev_unlock(hdev);
433 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
435 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
443 setting = __le16_to_cpu(rp->voice_setting);
445 if (hdev->voice_setting == setting)
448 hdev->voice_setting = setting;
450 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459 __u8 status = *((__u8 *) skb->data);
463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
468 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
472 setting = get_unaligned_le16(sent);
474 if (hdev->voice_setting == setting)
477 hdev->voice_setting = setting;
479 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
495 hdev->num_iac = rp->num_iac;
497 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
502 __u8 status = *((__u8 *) skb->data);
503 struct hci_cp_write_ssp_mode *sent;
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
507 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
515 hdev->features[1][0] |= LMP_HOST_SSP;
517 hdev->features[1][0] &= ~LMP_HOST_SSP;
520 if (hci_dev_test_flag(hdev, HCI_MGMT))
521 mgmt_ssp_enable_complete(hdev, sent->mode, status);
524 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
526 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529 hci_dev_unlock(hdev);
532 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
534 u8 status = *((u8 *) skb->data);
535 struct hci_cp_write_sc_support *sent;
537 BT_DBG("%s status 0x%2.2x", hdev->name, status);
539 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
547 hdev->features[1][0] |= LMP_HOST_SC;
549 hdev->features[1][0] &= ~LMP_HOST_SC;
552 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
554 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
556 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559 hci_dev_unlock(hdev);
562 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
564 struct hci_rp_read_local_version *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG)) {
573 hdev->hci_ver = rp->hci_ver;
574 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
575 hdev->lmp_ver = rp->lmp_ver;
576 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
577 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
581 static void hci_cc_read_local_commands(struct hci_dev *hdev,
584 struct hci_rp_read_local_commands *rp = (void *) skb->data;
586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
592 hci_dev_test_flag(hdev, HCI_CONFIG))
593 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596 static void hci_cc_read_local_features(struct hci_dev *hdev,
599 struct hci_rp_read_local_features *rp = (void *) skb->data;
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
606 memcpy(hdev->features, rp->features, 8);
608 /* Adjust default settings according to features
609 * supported by device. */
611 if (hdev->features[0][0] & LMP_3SLOT)
612 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
614 if (hdev->features[0][0] & LMP_5SLOT)
615 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
617 if (hdev->features[0][1] & LMP_HV2) {
618 hdev->pkt_type |= (HCI_HV2);
619 hdev->esco_type |= (ESCO_HV2);
622 if (hdev->features[0][1] & LMP_HV3) {
623 hdev->pkt_type |= (HCI_HV3);
624 hdev->esco_type |= (ESCO_HV3);
627 if (lmp_esco_capable(hdev))
628 hdev->esco_type |= (ESCO_EV3);
630 if (hdev->features[0][4] & LMP_EV4)
631 hdev->esco_type |= (ESCO_EV4);
633 if (hdev->features[0][4] & LMP_EV5)
634 hdev->esco_type |= (ESCO_EV5);
636 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
637 hdev->esco_type |= (ESCO_2EV3);
639 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
640 hdev->esco_type |= (ESCO_3EV3);
642 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
643 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
646 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
649 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
651 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
656 if (hdev->max_page < rp->max_page)
657 hdev->max_page = rp->max_page;
659 if (rp->page < HCI_MAX_PAGES)
660 memcpy(hdev->features[rp->page], rp->features, 8);
663 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
666 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
673 hdev->flow_ctl_mode = rp->mode;
676 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
678 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
686 hdev->sco_mtu = rp->sco_mtu;
687 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
688 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
690 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
695 hdev->acl_cnt = hdev->acl_pkts;
696 hdev->sco_cnt = hdev->sco_pkts;
698 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
699 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
702 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
704 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
706 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711 if (test_bit(HCI_INIT, &hdev->flags))
712 bacpy(&hdev->bdaddr, &rp->bdaddr);
714 if (hci_dev_test_flag(hdev, HCI_SETUP))
715 bacpy(&hdev->setup_addr, &rp->bdaddr);
718 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
721 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (test_bit(HCI_INIT, &hdev->flags)) {
729 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
730 hdev->page_scan_window = __le16_to_cpu(rp->window);
734 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
737 u8 status = *((u8 *) skb->data);
738 struct hci_cp_write_page_scan_activity *sent;
740 BT_DBG("%s status 0x%2.2x", hdev->name, status);
745 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
749 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
750 hdev->page_scan_window = __le16_to_cpu(sent->window);
753 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
756 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
758 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763 if (test_bit(HCI_INIT, &hdev->flags))
764 hdev->page_scan_type = rp->type;
767 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
770 u8 status = *((u8 *) skb->data);
773 BT_DBG("%s status 0x%2.2x", hdev->name, status);
778 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
780 hdev->page_scan_type = *type;
783 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
786 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
794 hdev->block_len = __le16_to_cpu(rp->block_len);
795 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
797 hdev->block_cnt = hdev->num_blocks;
799 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
800 hdev->block_cnt, hdev->block_len);
803 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
805 struct hci_rp_read_clock *rp = (void *) skb->data;
806 struct hci_cp_read_clock *cp;
807 struct hci_conn *conn;
809 BT_DBG("%s", hdev->name);
811 if (skb->len < sizeof(*rp))
819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
823 if (cp->which == 0x00) {
824 hdev->clock = le32_to_cpu(rp->clock);
828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
830 conn->clock = le32_to_cpu(rp->clock);
831 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
835 hci_dev_unlock(hdev);
838 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848 hdev->amp_status = rp->amp_status;
849 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
850 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
851 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
852 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
853 hdev->amp_type = rp->amp_type;
854 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
855 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
856 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
857 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
863 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870 hdev->inq_tx_power = rp->tx_power;
873 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
875 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
876 struct hci_cp_pin_code_reply *cp;
877 struct hci_conn *conn;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (hci_dev_test_flag(hdev, HCI_MGMT))
884 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
889 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
893 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
895 conn->pin_length = cp->pin_len;
898 hci_dev_unlock(hdev);
901 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
903 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909 if (hci_dev_test_flag(hdev, HCI_MGMT))
910 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
913 hci_dev_unlock(hdev);
916 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
919 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
927 hdev->le_pkts = rp->le_max_pkt;
929 hdev->le_cnt = hdev->le_pkts;
931 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
934 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
937 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
939 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
944 memcpy(hdev->le_features, rp->features, 8);
947 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
950 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 hdev->adv_tx_power = rp->tx_power;
960 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
962 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 if (hci_dev_test_flag(hdev, HCI_MGMT))
969 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
972 hci_dev_unlock(hdev);
975 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
978 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984 if (hci_dev_test_flag(hdev, HCI_MGMT))
985 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
986 ACL_LINK, 0, rp->status);
988 hci_dev_unlock(hdev);
991 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 if (hci_dev_test_flag(hdev, HCI_MGMT))
1000 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1003 hci_dev_unlock(hdev);
1006 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1009 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 if (hci_dev_test_flag(hdev, HCI_MGMT))
1016 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1017 ACL_LINK, 0, rp->status);
1019 hci_dev_unlock(hdev);
1022 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1023 struct sk_buff *skb)
1025 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1027 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1031 struct sk_buff *skb)
1033 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1035 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1038 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1040 __u8 status = *((__u8 *) skb->data);
1043 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1048 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1054 bacpy(&hdev->random_addr, sent);
1056 hci_dev_unlock(hdev);
1059 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1061 __u8 *sent, status = *((__u8 *) skb->data);
1063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1068 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1074 /* If we're doing connection initiation as peripheral. Set a
1075 * timeout in case something goes wrong.
1078 struct hci_conn *conn;
1080 hci_dev_set_flag(hdev, HCI_LE_ADV);
1082 conn = hci_lookup_le_connect(hdev);
1084 queue_delayed_work(hdev->workqueue,
1085 &conn->le_conn_timeout,
1086 conn->conn_timeout);
1088 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1091 hci_dev_unlock(hdev);
1094 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1096 struct hci_cp_le_set_scan_param *cp;
1097 __u8 status = *((__u8 *) skb->data);
1099 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1110 hdev->le_scan_type = cp->type;
1112 hci_dev_unlock(hdev);
1115 static bool has_pending_adv_report(struct hci_dev *hdev)
1117 struct discovery_state *d = &hdev->discovery;
1119 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1122 static void clear_pending_adv_report(struct hci_dev *hdev)
1124 struct discovery_state *d = &hdev->discovery;
1126 bacpy(&d->last_adv_addr, BDADDR_ANY);
1127 d->last_adv_data_len = 0;
1130 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1131 u8 bdaddr_type, s8 rssi, u32 flags,
1134 struct discovery_state *d = &hdev->discovery;
1136 if (len > HCI_MAX_AD_LENGTH)
1139 bacpy(&d->last_adv_addr, bdaddr);
1140 d->last_adv_addr_type = bdaddr_type;
1141 d->last_adv_rssi = rssi;
1142 d->last_adv_flags = flags;
1143 memcpy(d->last_adv_data, data, len);
1144 d->last_adv_data_len = len;
1147 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1148 struct sk_buff *skb)
1150 struct hci_cp_le_set_scan_enable *cp;
1151 __u8 status = *((__u8 *) skb->data);
1153 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1158 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1164 switch (cp->enable) {
1165 case LE_SCAN_ENABLE:
1166 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1167 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1168 clear_pending_adv_report(hdev);
1171 case LE_SCAN_DISABLE:
1172 /* We do this here instead of when setting DISCOVERY_STOPPED
1173 * since the latter would potentially require waiting for
1174 * inquiry to stop too.
1176 if (has_pending_adv_report(hdev)) {
1177 struct discovery_state *d = &hdev->discovery;
1179 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1180 d->last_adv_addr_type, NULL,
1181 d->last_adv_rssi, d->last_adv_flags,
1183 d->last_adv_data_len, NULL, 0);
1186 /* Cancel this timer so that we don't try to disable scanning
1187 * when it's already disabled.
1189 cancel_delayed_work(&hdev->le_scan_disable);
1191 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1193 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1194 * interrupted scanning due to a connect request. Mark
1195 * therefore discovery as stopped. If this was not
1196 * because of a connect request advertising might have
1197 * been disabled because of active scanning, so
1198 * re-enable it again if necessary.
1200 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1201 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1202 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1203 hdev->discovery.state == DISCOVERY_FINDING)
1204 hci_req_reenable_advertising(hdev);
1209 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1213 hci_dev_unlock(hdev);
1216 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1217 struct sk_buff *skb)
1219 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1221 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1226 hdev->le_white_list_size = rp->size;
1229 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1230 struct sk_buff *skb)
1232 __u8 status = *((__u8 *) skb->data);
1234 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1239 hci_bdaddr_list_clear(&hdev->le_white_list);
1242 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1243 struct sk_buff *skb)
1245 struct hci_cp_le_add_to_white_list *sent;
1246 __u8 status = *((__u8 *) skb->data);
1248 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1257 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1261 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1262 struct sk_buff *skb)
1264 struct hci_cp_le_del_from_white_list *sent;
1265 __u8 status = *((__u8 *) skb->data);
1267 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1272 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1276 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1280 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1281 struct sk_buff *skb)
1283 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1285 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1290 memcpy(hdev->le_states, rp->le_states, 8);
1293 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1294 struct sk_buff *skb)
1296 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1298 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1303 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1304 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1307 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1308 struct sk_buff *skb)
1310 struct hci_cp_le_write_def_data_len *sent;
1311 __u8 status = *((__u8 *) skb->data);
1313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1318 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1322 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1323 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1326 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1327 struct sk_buff *skb)
1329 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1331 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1336 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1337 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1338 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1339 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1342 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1343 struct sk_buff *skb)
1345 struct hci_cp_write_le_host_supported *sent;
1346 __u8 status = *((__u8 *) skb->data);
1348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1360 hdev->features[1][0] |= LMP_HOST_LE;
1361 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1363 hdev->features[1][0] &= ~LMP_HOST_LE;
1364 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1365 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1369 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1371 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1373 hci_dev_unlock(hdev);
1376 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1378 struct hci_cp_le_set_adv_param *cp;
1379 u8 status = *((u8 *) skb->data);
1381 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1391 hdev->adv_addr_type = cp->own_address_type;
1392 hci_dev_unlock(hdev);
1395 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1397 struct hci_rp_read_rssi *rp = (void *) skb->data;
1398 struct hci_conn *conn;
1400 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1407 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1409 conn->rssi = rp->rssi;
1411 hci_dev_unlock(hdev);
1414 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1416 struct hci_cp_read_tx_power *sent;
1417 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1418 struct hci_conn *conn;
1420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1425 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1435 switch (sent->type) {
1437 conn->tx_power = rp->tx_power;
1440 conn->max_tx_power = rp->tx_power;
1445 hci_dev_unlock(hdev);
1448 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1450 u8 status = *((u8 *) skb->data);
1453 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1458 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1460 hdev->ssp_debug_mode = *mode;
1463 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1465 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1468 hci_conn_check_pending(hdev);
1472 set_bit(HCI_INQUIRY, &hdev->flags);
1475 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1477 struct hci_cp_create_conn *cp;
1478 struct hci_conn *conn;
1480 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1482 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1488 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1490 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1493 if (conn && conn->state == BT_CONNECT) {
1494 if (status != 0x0c || conn->attempt > 2) {
1495 conn->state = BT_CLOSED;
1496 hci_connect_cfm(conn, status);
1499 conn->state = BT_CONNECT2;
1503 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1506 BT_ERR("No memory for new connection");
1510 hci_dev_unlock(hdev);
1513 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1515 struct hci_cp_add_sco *cp;
1516 struct hci_conn *acl, *sco;
1519 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1524 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1528 handle = __le16_to_cpu(cp->handle);
1530 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1534 acl = hci_conn_hash_lookup_handle(hdev, handle);
1538 sco->state = BT_CLOSED;
1540 hci_connect_cfm(sco, status);
1545 hci_dev_unlock(hdev);
1548 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1550 struct hci_cp_auth_requested *cp;
1551 struct hci_conn *conn;
1553 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1564 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1566 if (conn->state == BT_CONFIG) {
1567 hci_connect_cfm(conn, status);
1568 hci_conn_drop(conn);
1572 hci_dev_unlock(hdev);
1575 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1577 struct hci_cp_set_conn_encrypt *cp;
1578 struct hci_conn *conn;
1580 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1585 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1591 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1593 if (conn->state == BT_CONFIG) {
1594 hci_connect_cfm(conn, status);
1595 hci_conn_drop(conn);
1599 hci_dev_unlock(hdev);
1602 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1603 struct hci_conn *conn)
1605 if (conn->state != BT_CONFIG || !conn->out)
1608 if (conn->pending_sec_level == BT_SECURITY_SDP)
1611 /* Only request authentication for SSP connections or non-SSP
1612 * devices with sec_level MEDIUM or HIGH or if MITM protection
1615 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1616 conn->pending_sec_level != BT_SECURITY_FIPS &&
1617 conn->pending_sec_level != BT_SECURITY_HIGH &&
1618 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1624 static int hci_resolve_name(struct hci_dev *hdev,
1625 struct inquiry_entry *e)
1627 struct hci_cp_remote_name_req cp;
1629 memset(&cp, 0, sizeof(cp));
1631 bacpy(&cp.bdaddr, &e->data.bdaddr);
1632 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1633 cp.pscan_mode = e->data.pscan_mode;
1634 cp.clock_offset = e->data.clock_offset;
1636 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1639 static bool hci_resolve_next_name(struct hci_dev *hdev)
1641 struct discovery_state *discov = &hdev->discovery;
1642 struct inquiry_entry *e;
1644 if (list_empty(&discov->resolve))
1647 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1651 if (hci_resolve_name(hdev, e) == 0) {
1652 e->name_state = NAME_PENDING;
1659 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1660 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1662 struct discovery_state *discov = &hdev->discovery;
1663 struct inquiry_entry *e;
1665 /* Update the mgmt connected state if necessary. Be careful with
1666 * conn objects that exist but are not (yet) connected however.
1667 * Only those in BT_CONFIG or BT_CONNECTED states can be
1668 * considered connected.
1671 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1672 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1673 mgmt_device_connected(hdev, conn, 0, name, name_len);
1675 if (discov->state == DISCOVERY_STOPPED)
1678 if (discov->state == DISCOVERY_STOPPING)
1679 goto discov_complete;
1681 if (discov->state != DISCOVERY_RESOLVING)
1684 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1685 /* If the device was not found in a list of found devices names of which
1686 * are pending. there is no need to continue resolving a next name as it
1687 * will be done upon receiving another Remote Name Request Complete
1694 e->name_state = NAME_KNOWN;
1695 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1696 e->data.rssi, name, name_len);
1698 e->name_state = NAME_NOT_KNOWN;
1701 if (hci_resolve_next_name(hdev))
1705 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1708 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1710 struct hci_cp_remote_name_req *cp;
1711 struct hci_conn *conn;
1713 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1715 /* If successful wait for the name req complete event before
1716 * checking for the need to do authentication */
1720 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1726 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1728 if (hci_dev_test_flag(hdev, HCI_MGMT))
1729 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1734 if (!hci_outgoing_auth_needed(hdev, conn))
1737 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1738 struct hci_cp_auth_requested auth_cp;
1740 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1742 auth_cp.handle = __cpu_to_le16(conn->handle);
1743 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1744 sizeof(auth_cp), &auth_cp);
1748 hci_dev_unlock(hdev);
1751 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1753 struct hci_cp_read_remote_features *cp;
1754 struct hci_conn *conn;
1756 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1761 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1767 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1769 if (conn->state == BT_CONFIG) {
1770 hci_connect_cfm(conn, status);
1771 hci_conn_drop(conn);
1775 hci_dev_unlock(hdev);
1778 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1780 struct hci_cp_read_remote_ext_features *cp;
1781 struct hci_conn *conn;
1783 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1788 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1796 if (conn->state == BT_CONFIG) {
1797 hci_connect_cfm(conn, status);
1798 hci_conn_drop(conn);
1802 hci_dev_unlock(hdev);
1805 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1807 struct hci_cp_setup_sync_conn *cp;
1808 struct hci_conn *acl, *sco;
1811 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1820 handle = __le16_to_cpu(cp->handle);
1822 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1826 acl = hci_conn_hash_lookup_handle(hdev, handle);
1830 sco->state = BT_CLOSED;
1832 hci_connect_cfm(sco, status);
1837 hci_dev_unlock(hdev);
1840 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1842 struct hci_cp_sniff_mode *cp;
1843 struct hci_conn *conn;
1845 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1858 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1860 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1861 hci_sco_setup(conn, status);
1864 hci_dev_unlock(hdev);
1867 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1869 struct hci_cp_exit_sniff_mode *cp;
1870 struct hci_conn *conn;
1872 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1883 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1885 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1887 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1888 hci_sco_setup(conn, status);
1891 hci_dev_unlock(hdev);
1894 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1896 struct hci_cp_disconnect *cp;
1897 struct hci_conn *conn;
1902 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1908 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1911 conn->dst_type, status);
1913 hci_dev_unlock(hdev);
1916 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1918 struct hci_cp_le_create_conn *cp;
1919 struct hci_conn *conn;
1921 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1923 /* All connection failure handling is taken care of by the
1924 * hci_le_conn_failed function which is triggered by the HCI
1925 * request completion callbacks used for connecting.
1930 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1936 conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1937 cp->peer_addr_type);
1941 /* Store the initiator and responder address information which
1942 * is needed for SMP. These values will not change during the
1943 * lifetime of the connection.
1945 conn->init_addr_type = cp->own_address_type;
1946 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1947 bacpy(&conn->init_addr, &hdev->random_addr);
1949 bacpy(&conn->init_addr, &hdev->bdaddr);
1951 conn->resp_addr_type = cp->peer_addr_type;
1952 bacpy(&conn->resp_addr, &cp->peer_addr);
1954 /* We don't want the connection attempt to stick around
1955 * indefinitely since LE doesn't have a page timeout concept
1956 * like BR/EDR. Set a timer for any connection that doesn't use
1957 * the white list for connecting.
1959 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1960 queue_delayed_work(conn->hdev->workqueue,
1961 &conn->le_conn_timeout,
1962 conn->conn_timeout);
1965 hci_dev_unlock(hdev);
1968 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1970 struct hci_cp_le_read_remote_features *cp;
1971 struct hci_conn *conn;
1973 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1978 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1986 if (conn->state == BT_CONFIG) {
1987 hci_connect_cfm(conn, status);
1988 hci_conn_drop(conn);
1992 hci_dev_unlock(hdev);
1995 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1997 struct hci_cp_le_start_enc *cp;
1998 struct hci_conn *conn;
2000 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2011 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2015 if (conn->state != BT_CONNECTED)
2018 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2019 hci_conn_drop(conn);
2022 hci_dev_unlock(hdev);
2025 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2027 struct hci_cp_switch_role *cp;
2028 struct hci_conn *conn;
2030 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2035 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2041 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2043 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2045 hci_dev_unlock(hdev);
2048 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2050 __u8 status = *((__u8 *) skb->data);
2051 struct discovery_state *discov = &hdev->discovery;
2052 struct inquiry_entry *e;
2054 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2056 hci_conn_check_pending(hdev);
2058 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2061 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2062 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2064 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2069 if (discov->state != DISCOVERY_FINDING)
2072 if (list_empty(&discov->resolve)) {
2073 /* When BR/EDR inquiry is active and no LE scanning is in
2074 * progress, then change discovery state to indicate completion.
2076 * When running LE scanning and BR/EDR inquiry simultaneously
2077 * and the LE scan already finished, then change the discovery
2078 * state to indicate completion.
2080 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2081 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2086 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2087 if (e && hci_resolve_name(hdev, e) == 0) {
2088 e->name_state = NAME_PENDING;
2089 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2091 /* When BR/EDR inquiry is active and no LE scanning is in
2092 * progress, then change discovery state to indicate completion.
2094 * When running LE scanning and BR/EDR inquiry simultaneously
2095 * and the LE scan already finished, then change the discovery
2096 * state to indicate completion.
2098 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2099 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2100 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2104 hci_dev_unlock(hdev);
2107 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2109 struct inquiry_data data;
2110 struct inquiry_info *info = (void *) (skb->data + 1);
2111 int num_rsp = *((__u8 *) skb->data);
2113 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2115 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2118 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2123 for (; num_rsp; num_rsp--, info++) {
2126 bacpy(&data.bdaddr, &info->bdaddr);
2127 data.pscan_rep_mode = info->pscan_rep_mode;
2128 data.pscan_period_mode = info->pscan_period_mode;
2129 data.pscan_mode = info->pscan_mode;
2130 memcpy(data.dev_class, info->dev_class, 3);
2131 data.clock_offset = info->clock_offset;
2132 data.rssi = HCI_RSSI_INVALID;
2133 data.ssp_mode = 0x00;
2135 flags = hci_inquiry_cache_update(hdev, &data, false);
2137 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2138 info->dev_class, HCI_RSSI_INVALID,
2139 flags, NULL, 0, NULL, 0);
2142 hci_dev_unlock(hdev);
2145 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2147 struct hci_ev_conn_complete *ev = (void *) skb->data;
2148 struct hci_conn *conn;
2150 BT_DBG("%s", hdev->name);
2154 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2156 if (ev->link_type != SCO_LINK)
2159 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2163 conn->type = SCO_LINK;
2167 conn->handle = __le16_to_cpu(ev->handle);
2169 if (conn->type == ACL_LINK) {
2170 conn->state = BT_CONFIG;
2171 hci_conn_hold(conn);
2173 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2174 !hci_find_link_key(hdev, &ev->bdaddr))
2175 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2177 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2179 conn->state = BT_CONNECTED;
2181 hci_debugfs_create_conn(conn);
2182 hci_conn_add_sysfs(conn);
2184 if (test_bit(HCI_AUTH, &hdev->flags))
2185 set_bit(HCI_CONN_AUTH, &conn->flags);
2187 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2188 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2190 /* Get remote features */
2191 if (conn->type == ACL_LINK) {
2192 struct hci_cp_read_remote_features cp;
2193 cp.handle = ev->handle;
2194 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2197 hci_req_update_scan(hdev);
2200 /* Set packet type for incoming connection */
2201 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2202 struct hci_cp_change_conn_ptype cp;
2203 cp.handle = ev->handle;
2204 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2205 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2209 conn->state = BT_CLOSED;
2210 if (conn->type == ACL_LINK)
2211 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2212 conn->dst_type, ev->status);
2215 if (conn->type == ACL_LINK)
2216 hci_sco_setup(conn, ev->status);
2219 hci_connect_cfm(conn, ev->status);
2221 } else if (ev->link_type != ACL_LINK)
2222 hci_connect_cfm(conn, ev->status);
2225 hci_dev_unlock(hdev);
2227 hci_conn_check_pending(hdev);
2230 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2232 struct hci_cp_reject_conn_req cp;
2234 bacpy(&cp.bdaddr, bdaddr);
2235 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2236 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2239 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2241 struct hci_ev_conn_request *ev = (void *) skb->data;
2242 int mask = hdev->link_mode;
2243 struct inquiry_entry *ie;
2244 struct hci_conn *conn;
2247 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2250 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2253 if (!(mask & HCI_LM_ACCEPT)) {
2254 hci_reject_conn(hdev, &ev->bdaddr);
2258 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2260 hci_reject_conn(hdev, &ev->bdaddr);
2264 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2265 * connection. These features are only touched through mgmt so
2266 * only do the checks if HCI_MGMT is set.
2268 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2269 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2270 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2272 hci_reject_conn(hdev, &ev->bdaddr);
2276 /* Connection accepted */
2280 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2282 memcpy(ie->data.dev_class, ev->dev_class, 3);
2284 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2287 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2290 BT_ERR("No memory for new connection");
2291 hci_dev_unlock(hdev);
2296 memcpy(conn->dev_class, ev->dev_class, 3);
2298 hci_dev_unlock(hdev);
2300 if (ev->link_type == ACL_LINK ||
2301 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2302 struct hci_cp_accept_conn_req cp;
2303 conn->state = BT_CONNECT;
2305 bacpy(&cp.bdaddr, &ev->bdaddr);
2307 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2308 cp.role = 0x00; /* Become master */
2310 cp.role = 0x01; /* Remain slave */
2312 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2313 } else if (!(flags & HCI_PROTO_DEFER)) {
2314 struct hci_cp_accept_sync_conn_req cp;
2315 conn->state = BT_CONNECT;
2317 bacpy(&cp.bdaddr, &ev->bdaddr);
2318 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2320 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2321 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2322 cp.max_latency = cpu_to_le16(0xffff);
2323 cp.content_format = cpu_to_le16(hdev->voice_setting);
2324 cp.retrans_effort = 0xff;
2326 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2329 conn->state = BT_CONNECT2;
2330 hci_connect_cfm(conn, 0);
2334 static u8 hci_to_mgmt_reason(u8 err)
2337 case HCI_ERROR_CONNECTION_TIMEOUT:
2338 return MGMT_DEV_DISCONN_TIMEOUT;
2339 case HCI_ERROR_REMOTE_USER_TERM:
2340 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2341 case HCI_ERROR_REMOTE_POWER_OFF:
2342 return MGMT_DEV_DISCONN_REMOTE;
2343 case HCI_ERROR_LOCAL_HOST_TERM:
2344 return MGMT_DEV_DISCONN_LOCAL_HOST;
2346 return MGMT_DEV_DISCONN_UNKNOWN;
2350 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2354 struct hci_conn_params *params;
2355 struct hci_conn *conn;
2356 bool mgmt_connected;
2359 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2363 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2368 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2369 conn->dst_type, ev->status);
2373 conn->state = BT_CLOSED;
2375 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2377 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2378 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2380 reason = hci_to_mgmt_reason(ev->reason);
2382 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2383 reason, mgmt_connected);
2385 if (conn->type == ACL_LINK) {
2386 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2387 hci_remove_link_key(hdev, &conn->dst);
2389 hci_req_update_scan(hdev);
2392 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2394 switch (params->auto_connect) {
2395 case HCI_AUTO_CONN_LINK_LOSS:
2396 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2400 case HCI_AUTO_CONN_DIRECT:
2401 case HCI_AUTO_CONN_ALWAYS:
2402 list_del_init(¶ms->action);
2403 list_add(¶ms->action, &hdev->pend_le_conns);
2404 hci_update_background_scan(hdev);
2414 hci_disconn_cfm(conn, ev->reason);
2417 /* Re-enable advertising if necessary, since it might
2418 * have been disabled by the connection. From the
2419 * HCI_LE_Set_Advertise_Enable command description in
2420 * the core specification (v4.0):
2421 * "The Controller shall continue advertising until the Host
2422 * issues an LE_Set_Advertise_Enable command with
2423 * Advertising_Enable set to 0x00 (Advertising is disabled)
2424 * or until a connection is created or until the Advertising
2425 * is timed out due to Directed Advertising."
2427 if (type == LE_LINK)
2428 hci_req_reenable_advertising(hdev);
2431 hci_dev_unlock(hdev);
2434 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2436 struct hci_ev_auth_complete *ev = (void *) skb->data;
2437 struct hci_conn *conn;
2439 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2443 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2448 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2450 if (!hci_conn_ssp_enabled(conn) &&
2451 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2452 BT_INFO("re-auth of legacy device is not possible.");
2454 set_bit(HCI_CONN_AUTH, &conn->flags);
2455 conn->sec_level = conn->pending_sec_level;
2458 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2459 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2461 mgmt_auth_failed(conn, ev->status);
2464 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2465 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2467 if (conn->state == BT_CONFIG) {
2468 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2469 struct hci_cp_set_conn_encrypt cp;
2470 cp.handle = ev->handle;
2472 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2475 conn->state = BT_CONNECTED;
2476 hci_connect_cfm(conn, ev->status);
2477 hci_conn_drop(conn);
2480 hci_auth_cfm(conn, ev->status);
2482 hci_conn_hold(conn);
2483 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2484 hci_conn_drop(conn);
2487 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2489 struct hci_cp_set_conn_encrypt cp;
2490 cp.handle = ev->handle;
2492 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2495 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2496 hci_encrypt_cfm(conn, ev->status);
2501 hci_dev_unlock(hdev);
2504 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2506 struct hci_ev_remote_name *ev = (void *) skb->data;
2507 struct hci_conn *conn;
2509 BT_DBG("%s", hdev->name);
2511 hci_conn_check_pending(hdev);
2515 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2517 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2520 if (ev->status == 0)
2521 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2522 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2524 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2530 if (!hci_outgoing_auth_needed(hdev, conn))
2533 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2534 struct hci_cp_auth_requested cp;
2536 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2538 cp.handle = __cpu_to_le16(conn->handle);
2539 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2543 hci_dev_unlock(hdev);
2546 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2547 u16 opcode, struct sk_buff *skb)
2549 const struct hci_rp_read_enc_key_size *rp;
2550 struct hci_conn *conn;
2553 BT_DBG("%s status 0x%02x", hdev->name, status);
2555 if (!skb || skb->len < sizeof(*rp)) {
2556 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2561 rp = (void *)skb->data;
2562 handle = le16_to_cpu(rp->handle);
2566 conn = hci_conn_hash_lookup_handle(hdev, handle);
2570 /* If we fail to read the encryption key size, assume maximum
2571 * (which is the same we do also when this HCI command isn't
2575 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2577 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2579 conn->enc_key_size = rp->key_size;
2582 hci_encrypt_cfm(conn, 0);
2585 hci_dev_unlock(hdev);
2588 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2590 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2591 struct hci_conn *conn;
2593 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2597 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2603 /* Encryption implies authentication */
2604 set_bit(HCI_CONN_AUTH, &conn->flags);
2605 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2606 conn->sec_level = conn->pending_sec_level;
2608 /* P-256 authentication key implies FIPS */
2609 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2610 set_bit(HCI_CONN_FIPS, &conn->flags);
2612 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2613 conn->type == LE_LINK)
2614 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2616 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2617 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2621 /* We should disregard the current RPA and generate a new one
2622 * whenever the encryption procedure fails.
2624 if (ev->status && conn->type == LE_LINK)
2625 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2627 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2629 /* Check link security requirements are met */
2630 if (!hci_conn_check_link_mode(conn))
2631 ev->status = HCI_ERROR_AUTH_FAILURE;
2633 if (ev->status && conn->state == BT_CONNECTED) {
2634 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2635 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2637 /* Notify upper layers so they can cleanup before
2640 hci_encrypt_cfm(conn, ev->status);
2641 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2642 hci_conn_drop(conn);
2646 /* Try reading the encryption key size for encrypted ACL links */
2647 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2648 struct hci_cp_read_enc_key_size cp;
2649 struct hci_request req;
2651 /* Only send HCI_Read_Encryption_Key_Size if the
2652 * controller really supports it. If it doesn't, assume
2653 * the default size (16).
2655 if (!(hdev->commands[20] & 0x10)) {
2656 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2660 hci_req_init(&req, hdev);
2662 cp.handle = cpu_to_le16(conn->handle);
2663 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2665 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2666 BT_ERR("Sending HCI Read Encryption Key Size failed");
2667 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2675 hci_encrypt_cfm(conn, ev->status);
2678 hci_dev_unlock(hdev);
2681 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2682 struct sk_buff *skb)
2684 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2685 struct hci_conn *conn;
2687 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2691 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2694 set_bit(HCI_CONN_SECURE, &conn->flags);
2696 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2698 hci_key_change_cfm(conn, ev->status);
2701 hci_dev_unlock(hdev);
2704 static void hci_remote_features_evt(struct hci_dev *hdev,
2705 struct sk_buff *skb)
2707 struct hci_ev_remote_features *ev = (void *) skb->data;
2708 struct hci_conn *conn;
2710 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2714 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2719 memcpy(conn->features[0], ev->features, 8);
2721 if (conn->state != BT_CONFIG)
2724 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2725 lmp_ext_feat_capable(conn)) {
2726 struct hci_cp_read_remote_ext_features cp;
2727 cp.handle = ev->handle;
2729 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2734 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2735 struct hci_cp_remote_name_req cp;
2736 memset(&cp, 0, sizeof(cp));
2737 bacpy(&cp.bdaddr, &conn->dst);
2738 cp.pscan_rep_mode = 0x02;
2739 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2740 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2741 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2743 if (!hci_outgoing_auth_needed(hdev, conn)) {
2744 conn->state = BT_CONNECTED;
2745 hci_connect_cfm(conn, ev->status);
2746 hci_conn_drop(conn);
2750 hci_dev_unlock(hdev);
2753 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2754 u16 *opcode, u8 *status,
2755 hci_req_complete_t *req_complete,
2756 hci_req_complete_skb_t *req_complete_skb)
2758 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2760 *opcode = __le16_to_cpu(ev->opcode);
2761 *status = skb->data[sizeof(*ev)];
2763 skb_pull(skb, sizeof(*ev));
2766 case HCI_OP_INQUIRY_CANCEL:
2767 hci_cc_inquiry_cancel(hdev, skb, status);
2770 case HCI_OP_PERIODIC_INQ:
2771 hci_cc_periodic_inq(hdev, skb);
2774 case HCI_OP_EXIT_PERIODIC_INQ:
2775 hci_cc_exit_periodic_inq(hdev, skb);
2778 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2779 hci_cc_remote_name_req_cancel(hdev, skb);
2782 case HCI_OP_ROLE_DISCOVERY:
2783 hci_cc_role_discovery(hdev, skb);
2786 case HCI_OP_READ_LINK_POLICY:
2787 hci_cc_read_link_policy(hdev, skb);
2790 case HCI_OP_WRITE_LINK_POLICY:
2791 hci_cc_write_link_policy(hdev, skb);
2794 case HCI_OP_READ_DEF_LINK_POLICY:
2795 hci_cc_read_def_link_policy(hdev, skb);
2798 case HCI_OP_WRITE_DEF_LINK_POLICY:
2799 hci_cc_write_def_link_policy(hdev, skb);
2803 hci_cc_reset(hdev, skb);
2806 case HCI_OP_READ_STORED_LINK_KEY:
2807 hci_cc_read_stored_link_key(hdev, skb);
2810 case HCI_OP_DELETE_STORED_LINK_KEY:
2811 hci_cc_delete_stored_link_key(hdev, skb);
2814 case HCI_OP_WRITE_LOCAL_NAME:
2815 hci_cc_write_local_name(hdev, skb);
2818 case HCI_OP_READ_LOCAL_NAME:
2819 hci_cc_read_local_name(hdev, skb);
2822 case HCI_OP_WRITE_AUTH_ENABLE:
2823 hci_cc_write_auth_enable(hdev, skb);
2826 case HCI_OP_WRITE_ENCRYPT_MODE:
2827 hci_cc_write_encrypt_mode(hdev, skb);
2830 case HCI_OP_WRITE_SCAN_ENABLE:
2831 hci_cc_write_scan_enable(hdev, skb);
2834 case HCI_OP_READ_CLASS_OF_DEV:
2835 hci_cc_read_class_of_dev(hdev, skb);
2838 case HCI_OP_WRITE_CLASS_OF_DEV:
2839 hci_cc_write_class_of_dev(hdev, skb);
2842 case HCI_OP_READ_VOICE_SETTING:
2843 hci_cc_read_voice_setting(hdev, skb);
2846 case HCI_OP_WRITE_VOICE_SETTING:
2847 hci_cc_write_voice_setting(hdev, skb);
2850 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2851 hci_cc_read_num_supported_iac(hdev, skb);
2854 case HCI_OP_WRITE_SSP_MODE:
2855 hci_cc_write_ssp_mode(hdev, skb);
2858 case HCI_OP_WRITE_SC_SUPPORT:
2859 hci_cc_write_sc_support(hdev, skb);
2862 case HCI_OP_READ_LOCAL_VERSION:
2863 hci_cc_read_local_version(hdev, skb);
2866 case HCI_OP_READ_LOCAL_COMMANDS:
2867 hci_cc_read_local_commands(hdev, skb);
2870 case HCI_OP_READ_LOCAL_FEATURES:
2871 hci_cc_read_local_features(hdev, skb);
2874 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2875 hci_cc_read_local_ext_features(hdev, skb);
2878 case HCI_OP_READ_BUFFER_SIZE:
2879 hci_cc_read_buffer_size(hdev, skb);
2882 case HCI_OP_READ_BD_ADDR:
2883 hci_cc_read_bd_addr(hdev, skb);
2886 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2887 hci_cc_read_page_scan_activity(hdev, skb);
2890 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2891 hci_cc_write_page_scan_activity(hdev, skb);
2894 case HCI_OP_READ_PAGE_SCAN_TYPE:
2895 hci_cc_read_page_scan_type(hdev, skb);
2898 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2899 hci_cc_write_page_scan_type(hdev, skb);
2902 case HCI_OP_READ_DATA_BLOCK_SIZE:
2903 hci_cc_read_data_block_size(hdev, skb);
2906 case HCI_OP_READ_FLOW_CONTROL_MODE:
2907 hci_cc_read_flow_control_mode(hdev, skb);
2910 case HCI_OP_READ_LOCAL_AMP_INFO:
2911 hci_cc_read_local_amp_info(hdev, skb);
2914 case HCI_OP_READ_CLOCK:
2915 hci_cc_read_clock(hdev, skb);
2918 case HCI_OP_READ_INQ_RSP_TX_POWER:
2919 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2922 case HCI_OP_PIN_CODE_REPLY:
2923 hci_cc_pin_code_reply(hdev, skb);
2926 case HCI_OP_PIN_CODE_NEG_REPLY:
2927 hci_cc_pin_code_neg_reply(hdev, skb);
2930 case HCI_OP_READ_LOCAL_OOB_DATA:
2931 hci_cc_read_local_oob_data(hdev, skb);
2934 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2935 hci_cc_read_local_oob_ext_data(hdev, skb);
2938 case HCI_OP_LE_READ_BUFFER_SIZE:
2939 hci_cc_le_read_buffer_size(hdev, skb);
2942 case HCI_OP_LE_READ_LOCAL_FEATURES:
2943 hci_cc_le_read_local_features(hdev, skb);
2946 case HCI_OP_LE_READ_ADV_TX_POWER:
2947 hci_cc_le_read_adv_tx_power(hdev, skb);
2950 case HCI_OP_USER_CONFIRM_REPLY:
2951 hci_cc_user_confirm_reply(hdev, skb);
2954 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2955 hci_cc_user_confirm_neg_reply(hdev, skb);
2958 case HCI_OP_USER_PASSKEY_REPLY:
2959 hci_cc_user_passkey_reply(hdev, skb);
2962 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2963 hci_cc_user_passkey_neg_reply(hdev, skb);
2966 case HCI_OP_LE_SET_RANDOM_ADDR:
2967 hci_cc_le_set_random_addr(hdev, skb);
2970 case HCI_OP_LE_SET_ADV_ENABLE:
2971 hci_cc_le_set_adv_enable(hdev, skb);
2974 case HCI_OP_LE_SET_SCAN_PARAM:
2975 hci_cc_le_set_scan_param(hdev, skb);
2978 case HCI_OP_LE_SET_SCAN_ENABLE:
2979 hci_cc_le_set_scan_enable(hdev, skb);
2982 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2983 hci_cc_le_read_white_list_size(hdev, skb);
2986 case HCI_OP_LE_CLEAR_WHITE_LIST:
2987 hci_cc_le_clear_white_list(hdev, skb);
2990 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2991 hci_cc_le_add_to_white_list(hdev, skb);
2994 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2995 hci_cc_le_del_from_white_list(hdev, skb);
2998 case HCI_OP_LE_READ_SUPPORTED_STATES:
2999 hci_cc_le_read_supported_states(hdev, skb);
3002 case HCI_OP_LE_READ_DEF_DATA_LEN:
3003 hci_cc_le_read_def_data_len(hdev, skb);
3006 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3007 hci_cc_le_write_def_data_len(hdev, skb);
3010 case HCI_OP_LE_READ_MAX_DATA_LEN:
3011 hci_cc_le_read_max_data_len(hdev, skb);
3014 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3015 hci_cc_write_le_host_supported(hdev, skb);
3018 case HCI_OP_LE_SET_ADV_PARAM:
3019 hci_cc_set_adv_param(hdev, skb);
3022 case HCI_OP_READ_RSSI:
3023 hci_cc_read_rssi(hdev, skb);
3026 case HCI_OP_READ_TX_POWER:
3027 hci_cc_read_tx_power(hdev, skb);
3030 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3031 hci_cc_write_ssp_debug_mode(hdev, skb);
3035 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3039 if (*opcode != HCI_OP_NOP)
3040 cancel_delayed_work(&hdev->cmd_timer);
3042 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3043 atomic_set(&hdev->cmd_cnt, 1);
3045 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3048 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3049 queue_work(hdev->workqueue, &hdev->cmd_work);
3052 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3053 u16 *opcode, u8 *status,
3054 hci_req_complete_t *req_complete,
3055 hci_req_complete_skb_t *req_complete_skb)
3057 struct hci_ev_cmd_status *ev = (void *) skb->data;
3059 skb_pull(skb, sizeof(*ev));
3061 *opcode = __le16_to_cpu(ev->opcode);
3062 *status = ev->status;
3065 case HCI_OP_INQUIRY:
3066 hci_cs_inquiry(hdev, ev->status);
3069 case HCI_OP_CREATE_CONN:
3070 hci_cs_create_conn(hdev, ev->status);
3073 case HCI_OP_DISCONNECT:
3074 hci_cs_disconnect(hdev, ev->status);
3077 case HCI_OP_ADD_SCO:
3078 hci_cs_add_sco(hdev, ev->status);
3081 case HCI_OP_AUTH_REQUESTED:
3082 hci_cs_auth_requested(hdev, ev->status);
3085 case HCI_OP_SET_CONN_ENCRYPT:
3086 hci_cs_set_conn_encrypt(hdev, ev->status);
3089 case HCI_OP_REMOTE_NAME_REQ:
3090 hci_cs_remote_name_req(hdev, ev->status);
3093 case HCI_OP_READ_REMOTE_FEATURES:
3094 hci_cs_read_remote_features(hdev, ev->status);
3097 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3098 hci_cs_read_remote_ext_features(hdev, ev->status);
3101 case HCI_OP_SETUP_SYNC_CONN:
3102 hci_cs_setup_sync_conn(hdev, ev->status);
3105 case HCI_OP_SNIFF_MODE:
3106 hci_cs_sniff_mode(hdev, ev->status);
3109 case HCI_OP_EXIT_SNIFF_MODE:
3110 hci_cs_exit_sniff_mode(hdev, ev->status);
3113 case HCI_OP_SWITCH_ROLE:
3114 hci_cs_switch_role(hdev, ev->status);
3117 case HCI_OP_LE_CREATE_CONN:
3118 hci_cs_le_create_conn(hdev, ev->status);
3121 case HCI_OP_LE_READ_REMOTE_FEATURES:
3122 hci_cs_le_read_remote_features(hdev, ev->status);
3125 case HCI_OP_LE_START_ENC:
3126 hci_cs_le_start_enc(hdev, ev->status);
3130 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3134 if (*opcode != HCI_OP_NOP)
3135 cancel_delayed_work(&hdev->cmd_timer);
3137 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3138 atomic_set(&hdev->cmd_cnt, 1);
3140 /* Indicate request completion if the command failed. Also, if
3141 * we're not waiting for a special event and we get a success
3142 * command status we should try to flag the request as completed
3143 * (since for this kind of commands there will not be a command
3147 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3148 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3151 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3152 queue_work(hdev->workqueue, &hdev->cmd_work);
3155 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3157 struct hci_ev_hardware_error *ev = (void *) skb->data;
3159 hdev->hw_error_code = ev->code;
3161 queue_work(hdev->req_workqueue, &hdev->error_reset);
3164 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3166 struct hci_ev_role_change *ev = (void *) skb->data;
3167 struct hci_conn *conn;
3169 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3173 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3176 conn->role = ev->role;
3178 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3180 hci_role_switch_cfm(conn, ev->status, ev->role);
3183 hci_dev_unlock(hdev);
3186 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3188 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3191 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3192 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3196 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3197 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3198 BT_DBG("%s bad parameters", hdev->name);
3202 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3204 for (i = 0; i < ev->num_hndl; i++) {
3205 struct hci_comp_pkts_info *info = &ev->handles[i];
3206 struct hci_conn *conn;
3207 __u16 handle, count;
3209 handle = __le16_to_cpu(info->handle);
3210 count = __le16_to_cpu(info->count);
3212 conn = hci_conn_hash_lookup_handle(hdev, handle);
3216 conn->sent -= count;
3218 switch (conn->type) {
3220 hdev->acl_cnt += count;
3221 if (hdev->acl_cnt > hdev->acl_pkts)
3222 hdev->acl_cnt = hdev->acl_pkts;
3226 if (hdev->le_pkts) {
3227 hdev->le_cnt += count;
3228 if (hdev->le_cnt > hdev->le_pkts)
3229 hdev->le_cnt = hdev->le_pkts;
3231 hdev->acl_cnt += count;
3232 if (hdev->acl_cnt > hdev->acl_pkts)
3233 hdev->acl_cnt = hdev->acl_pkts;
3238 hdev->sco_cnt += count;
3239 if (hdev->sco_cnt > hdev->sco_pkts)
3240 hdev->sco_cnt = hdev->sco_pkts;
3244 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3249 queue_work(hdev->workqueue, &hdev->tx_work);
3252 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3255 struct hci_chan *chan;
3257 switch (hdev->dev_type) {
3259 return hci_conn_hash_lookup_handle(hdev, handle);
3261 chan = hci_chan_lookup_handle(hdev, handle);
3266 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3273 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3275 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3278 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3279 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3283 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3284 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3285 BT_DBG("%s bad parameters", hdev->name);
3289 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3292 for (i = 0; i < ev->num_hndl; i++) {
3293 struct hci_comp_blocks_info *info = &ev->handles[i];
3294 struct hci_conn *conn = NULL;
3295 __u16 handle, block_count;
3297 handle = __le16_to_cpu(info->handle);
3298 block_count = __le16_to_cpu(info->blocks);
3300 conn = __hci_conn_lookup_handle(hdev, handle);
3304 conn->sent -= block_count;
3306 switch (conn->type) {
3309 hdev->block_cnt += block_count;
3310 if (hdev->block_cnt > hdev->num_blocks)
3311 hdev->block_cnt = hdev->num_blocks;
3315 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3320 queue_work(hdev->workqueue, &hdev->tx_work);
3323 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3325 struct hci_ev_mode_change *ev = (void *) skb->data;
3326 struct hci_conn *conn;
3328 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3332 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3334 conn->mode = ev->mode;
3336 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3338 if (conn->mode == HCI_CM_ACTIVE)
3339 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3341 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3344 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3345 hci_sco_setup(conn, ev->status);
3348 hci_dev_unlock(hdev);
3351 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3353 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3354 struct hci_conn *conn;
3356 BT_DBG("%s", hdev->name);
3360 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3364 if (conn->state == BT_CONNECTED) {
3365 hci_conn_hold(conn);
3366 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3367 hci_conn_drop(conn);
3370 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3371 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3372 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3373 sizeof(ev->bdaddr), &ev->bdaddr);
3374 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3377 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3382 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3386 hci_dev_unlock(hdev);
3389 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3391 if (key_type == HCI_LK_CHANGED_COMBINATION)
3394 conn->pin_length = pin_len;
3395 conn->key_type = key_type;
3398 case HCI_LK_LOCAL_UNIT:
3399 case HCI_LK_REMOTE_UNIT:
3400 case HCI_LK_DEBUG_COMBINATION:
3402 case HCI_LK_COMBINATION:
3404 conn->pending_sec_level = BT_SECURITY_HIGH;
3406 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3408 case HCI_LK_UNAUTH_COMBINATION_P192:
3409 case HCI_LK_UNAUTH_COMBINATION_P256:
3410 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3412 case HCI_LK_AUTH_COMBINATION_P192:
3413 conn->pending_sec_level = BT_SECURITY_HIGH;
3415 case HCI_LK_AUTH_COMBINATION_P256:
3416 conn->pending_sec_level = BT_SECURITY_FIPS;
3421 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3423 struct hci_ev_link_key_req *ev = (void *) skb->data;
3424 struct hci_cp_link_key_reply cp;
3425 struct hci_conn *conn;
3426 struct link_key *key;
3428 BT_DBG("%s", hdev->name);
3430 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3435 key = hci_find_link_key(hdev, &ev->bdaddr);
3437 BT_DBG("%s link key not found for %pMR", hdev->name,
3442 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3445 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3447 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3449 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3450 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3451 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3452 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3456 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3457 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3458 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3459 BT_DBG("%s ignoring key unauthenticated for high security",
3464 conn_set_key(conn, key->type, key->pin_len);
3467 bacpy(&cp.bdaddr, &ev->bdaddr);
3468 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3470 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3472 hci_dev_unlock(hdev);
3477 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3478 hci_dev_unlock(hdev);
3481 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3483 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3484 struct hci_conn *conn;
3485 struct link_key *key;
3489 BT_DBG("%s", hdev->name);
3493 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3497 hci_conn_hold(conn);
3498 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3499 hci_conn_drop(conn);
3501 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3502 conn_set_key(conn, ev->key_type, conn->pin_length);
3504 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3507 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3508 ev->key_type, pin_len, &persistent);
3512 /* Update connection information since adding the key will have
3513 * fixed up the type in the case of changed combination keys.
3515 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3516 conn_set_key(conn, key->type, key->pin_len);
3518 mgmt_new_link_key(hdev, key, persistent);
3520 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3521 * is set. If it's not set simply remove the key from the kernel
3522 * list (we've still notified user space about it but with
3523 * store_hint being 0).
3525 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3526 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3527 list_del_rcu(&key->list);
3528 kfree_rcu(key, rcu);
3533 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3535 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3538 hci_dev_unlock(hdev);
3541 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3543 struct hci_ev_clock_offset *ev = (void *) skb->data;
3544 struct hci_conn *conn;
3546 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3550 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3551 if (conn && !ev->status) {
3552 struct inquiry_entry *ie;
3554 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3556 ie->data.clock_offset = ev->clock_offset;
3557 ie->timestamp = jiffies;
3561 hci_dev_unlock(hdev);
3564 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3566 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3567 struct hci_conn *conn;
3569 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3573 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3574 if (conn && !ev->status)
3575 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3577 hci_dev_unlock(hdev);
3580 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3582 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3583 struct inquiry_entry *ie;
3585 BT_DBG("%s", hdev->name);
3589 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3591 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3592 ie->timestamp = jiffies;
3595 hci_dev_unlock(hdev);
3598 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3599 struct sk_buff *skb)
3601 struct inquiry_data data;
3602 int num_rsp = *((__u8 *) skb->data);
3604 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3609 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3614 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3615 struct inquiry_info_with_rssi_and_pscan_mode *info;
3616 info = (void *) (skb->data + 1);
3618 if (skb->len < num_rsp * sizeof(*info) + 1)
3621 for (; num_rsp; num_rsp--, info++) {
3624 bacpy(&data.bdaddr, &info->bdaddr);
3625 data.pscan_rep_mode = info->pscan_rep_mode;
3626 data.pscan_period_mode = info->pscan_period_mode;
3627 data.pscan_mode = info->pscan_mode;
3628 memcpy(data.dev_class, info->dev_class, 3);
3629 data.clock_offset = info->clock_offset;
3630 data.rssi = info->rssi;
3631 data.ssp_mode = 0x00;
3633 flags = hci_inquiry_cache_update(hdev, &data, false);
3635 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3636 info->dev_class, info->rssi,
3637 flags, NULL, 0, NULL, 0);
3640 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3642 if (skb->len < num_rsp * sizeof(*info) + 1)
3645 for (; num_rsp; num_rsp--, info++) {
3648 bacpy(&data.bdaddr, &info->bdaddr);
3649 data.pscan_rep_mode = info->pscan_rep_mode;
3650 data.pscan_period_mode = info->pscan_period_mode;
3651 data.pscan_mode = 0x00;
3652 memcpy(data.dev_class, info->dev_class, 3);
3653 data.clock_offset = info->clock_offset;
3654 data.rssi = info->rssi;
3655 data.ssp_mode = 0x00;
3657 flags = hci_inquiry_cache_update(hdev, &data, false);
3659 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3660 info->dev_class, info->rssi,
3661 flags, NULL, 0, NULL, 0);
3666 hci_dev_unlock(hdev);
3669 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3670 struct sk_buff *skb)
3672 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3673 struct hci_conn *conn;
3675 BT_DBG("%s", hdev->name);
3679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3683 if (ev->page < HCI_MAX_PAGES)
3684 memcpy(conn->features[ev->page], ev->features, 8);
3686 if (!ev->status && ev->page == 0x01) {
3687 struct inquiry_entry *ie;
3689 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3691 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3693 if (ev->features[0] & LMP_HOST_SSP) {
3694 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3696 /* It is mandatory by the Bluetooth specification that
3697 * Extended Inquiry Results are only used when Secure
3698 * Simple Pairing is enabled, but some devices violate
3701 * To make these devices work, the internal SSP
3702 * enabled flag needs to be cleared if the remote host
3703 * features do not indicate SSP support */
3704 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3707 if (ev->features[0] & LMP_HOST_SC)
3708 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3711 if (conn->state != BT_CONFIG)
3714 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3715 struct hci_cp_remote_name_req cp;
3716 memset(&cp, 0, sizeof(cp));
3717 bacpy(&cp.bdaddr, &conn->dst);
3718 cp.pscan_rep_mode = 0x02;
3719 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3720 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3721 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3723 if (!hci_outgoing_auth_needed(hdev, conn)) {
3724 conn->state = BT_CONNECTED;
3725 hci_connect_cfm(conn, ev->status);
3726 hci_conn_drop(conn);
3730 hci_dev_unlock(hdev);
3733 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3734 struct sk_buff *skb)
3736 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3737 struct hci_conn *conn;
3739 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3743 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3745 if (ev->link_type == ESCO_LINK)
3748 /* When the link type in the event indicates SCO connection
3749 * and lookup of the connection object fails, then check
3750 * if an eSCO connection object exists.
3752 * The core limits the synchronous connections to either
3753 * SCO or eSCO. The eSCO connection is preferred and tried
3754 * to be setup first and until successfully established,
3755 * the link type will be hinted as eSCO.
3757 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3762 switch (ev->status) {
3764 /* The synchronous connection complete event should only be
3765 * sent once per new connection. Receiving a successful
3766 * complete event when the connection status is already
3767 * BT_CONNECTED means that the device is misbehaving and sent
3768 * multiple complete event packets for the same new connection.
3770 * Registering the device more than once can corrupt kernel
3771 * memory, hence upon detecting this invalid event, we report
3772 * an error and ignore the packet.
3774 if (conn->state == BT_CONNECTED) {
3775 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
3779 conn->handle = __le16_to_cpu(ev->handle);
3780 conn->state = BT_CONNECTED;
3781 conn->type = ev->link_type;
3783 hci_debugfs_create_conn(conn);
3784 hci_conn_add_sysfs(conn);
3787 case 0x10: /* Connection Accept Timeout */
3788 case 0x0d: /* Connection Rejected due to Limited Resources */
3789 case 0x11: /* Unsupported Feature or Parameter Value */
3790 case 0x1c: /* SCO interval rejected */
3791 case 0x1a: /* Unsupported Remote Feature */
3792 case 0x1e: /* Invalid LMP Parameters */
3793 case 0x1f: /* Unspecified error */
3794 case 0x20: /* Unsupported LMP Parameter value */
3796 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3797 (hdev->esco_type & EDR_ESCO_MASK);
3798 if (hci_setup_sync(conn, conn->link->handle))
3804 conn->state = BT_CLOSED;
3808 hci_connect_cfm(conn, ev->status);
3813 hci_dev_unlock(hdev);
3816 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3820 while (parsed < eir_len) {
3821 u8 field_len = eir[0];
3826 parsed += field_len + 1;
3827 eir += field_len + 1;
3833 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3834 struct sk_buff *skb)
3836 struct inquiry_data data;
3837 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3838 int num_rsp = *((__u8 *) skb->data);
3841 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3843 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
3846 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3851 for (; num_rsp; num_rsp--, info++) {
3855 bacpy(&data.bdaddr, &info->bdaddr);
3856 data.pscan_rep_mode = info->pscan_rep_mode;
3857 data.pscan_period_mode = info->pscan_period_mode;
3858 data.pscan_mode = 0x00;
3859 memcpy(data.dev_class, info->dev_class, 3);
3860 data.clock_offset = info->clock_offset;
3861 data.rssi = info->rssi;
3862 data.ssp_mode = 0x01;
3864 if (hci_dev_test_flag(hdev, HCI_MGMT))
3865 name_known = eir_get_data(info->data,
3867 EIR_NAME_COMPLETE, NULL);
3871 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3873 eir_len = eir_get_length(info->data, sizeof(info->data));
3875 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3876 info->dev_class, info->rssi,
3877 flags, info->data, eir_len, NULL, 0);
3880 hci_dev_unlock(hdev);
3883 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3884 struct sk_buff *skb)
3886 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3887 struct hci_conn *conn;
3889 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3890 __le16_to_cpu(ev->handle));
3894 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3898 /* For BR/EDR the necessary steps are taken through the
3899 * auth_complete event.
3901 if (conn->type != LE_LINK)
3905 conn->sec_level = conn->pending_sec_level;
3907 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3909 if (ev->status && conn->state == BT_CONNECTED) {
3910 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3911 hci_conn_drop(conn);
3915 if (conn->state == BT_CONFIG) {
3917 conn->state = BT_CONNECTED;
3919 hci_connect_cfm(conn, ev->status);
3920 hci_conn_drop(conn);
3922 hci_auth_cfm(conn, ev->status);
3924 hci_conn_hold(conn);
3925 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3926 hci_conn_drop(conn);
3930 hci_dev_unlock(hdev);
3933 static u8 hci_get_auth_req(struct hci_conn *conn)
3935 /* If remote requests no-bonding follow that lead */
3936 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3937 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3938 return conn->remote_auth | (conn->auth_type & 0x01);
3940 /* If both remote and local have enough IO capabilities, require
3943 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3944 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3945 return conn->remote_auth | 0x01;
3947 /* No MITM protection possible so ignore remote requirement */
3948 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3951 static u8 bredr_oob_data_present(struct hci_conn *conn)
3953 struct hci_dev *hdev = conn->hdev;
3954 struct oob_data *data;
3956 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3960 if (bredr_sc_enabled(hdev)) {
3961 /* When Secure Connections is enabled, then just
3962 * return the present value stored with the OOB
3963 * data. The stored value contains the right present
3964 * information. However it can only be trusted when
3965 * not in Secure Connection Only mode.
3967 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3968 return data->present;
3970 /* When Secure Connections Only mode is enabled, then
3971 * the P-256 values are required. If they are not
3972 * available, then do not declare that OOB data is
3975 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3976 !memcmp(data->hash256, ZERO_KEY, 16))
3982 /* When Secure Connections is not enabled or actually
3983 * not supported by the hardware, then check that if
3984 * P-192 data values are present.
3986 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3987 !memcmp(data->hash192, ZERO_KEY, 16))
3993 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3995 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3996 struct hci_conn *conn;
3998 BT_DBG("%s", hdev->name);
4002 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4006 hci_conn_hold(conn);
4008 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4011 /* Allow pairing if we're pairable, the initiators of the
4012 * pairing or if the remote is not requesting bonding.
4014 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4015 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4016 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4017 struct hci_cp_io_capability_reply cp;
4019 bacpy(&cp.bdaddr, &ev->bdaddr);
4020 /* Change the IO capability from KeyboardDisplay
4021 * to DisplayYesNo as it is not supported by BT spec. */
4022 cp.capability = (conn->io_capability == 0x04) ?
4023 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4025 /* If we are initiators, there is no remote information yet */
4026 if (conn->remote_auth == 0xff) {
4027 /* Request MITM protection if our IO caps allow it
4028 * except for the no-bonding case.
4030 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4031 conn->auth_type != HCI_AT_NO_BONDING)
4032 conn->auth_type |= 0x01;
4034 conn->auth_type = hci_get_auth_req(conn);
4037 /* If we're not bondable, force one of the non-bondable
4038 * authentication requirement values.
4040 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4041 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4043 cp.authentication = conn->auth_type;
4044 cp.oob_data = bredr_oob_data_present(conn);
4046 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4049 struct hci_cp_io_capability_neg_reply cp;
4051 bacpy(&cp.bdaddr, &ev->bdaddr);
4052 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4054 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4059 hci_dev_unlock(hdev);
4062 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4064 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4065 struct hci_conn *conn;
4067 BT_DBG("%s", hdev->name);
4071 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4075 conn->remote_cap = ev->capability;
4076 conn->remote_auth = ev->authentication;
4079 hci_dev_unlock(hdev);
4082 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4083 struct sk_buff *skb)
4085 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4086 int loc_mitm, rem_mitm, confirm_hint = 0;
4087 struct hci_conn *conn;
4089 BT_DBG("%s", hdev->name);
4093 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4096 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4100 loc_mitm = (conn->auth_type & 0x01);
4101 rem_mitm = (conn->remote_auth & 0x01);
4103 /* If we require MITM but the remote device can't provide that
4104 * (it has NoInputNoOutput) then reject the confirmation
4105 * request. We check the security level here since it doesn't
4106 * necessarily match conn->auth_type.
4108 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4109 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4110 BT_DBG("Rejecting request: remote device can't provide MITM");
4111 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4112 sizeof(ev->bdaddr), &ev->bdaddr);
4116 /* If no side requires MITM protection; auto-accept */
4117 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4118 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4120 /* If we're not the initiators request authorization to
4121 * proceed from user space (mgmt_user_confirm with
4122 * confirm_hint set to 1). The exception is if neither
4123 * side had MITM or if the local IO capability is
4124 * NoInputNoOutput, in which case we do auto-accept
4126 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4127 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4128 (loc_mitm || rem_mitm)) {
4129 BT_DBG("Confirming auto-accept as acceptor");
4134 BT_DBG("Auto-accept of user confirmation with %ums delay",
4135 hdev->auto_accept_delay);
4137 if (hdev->auto_accept_delay > 0) {
4138 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4139 queue_delayed_work(conn->hdev->workqueue,
4140 &conn->auto_accept_work, delay);
4144 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4145 sizeof(ev->bdaddr), &ev->bdaddr);
4150 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4151 le32_to_cpu(ev->passkey), confirm_hint);
4154 hci_dev_unlock(hdev);
4157 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4158 struct sk_buff *skb)
4160 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4162 BT_DBG("%s", hdev->name);
4164 if (hci_dev_test_flag(hdev, HCI_MGMT))
4165 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4168 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4169 struct sk_buff *skb)
4171 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4172 struct hci_conn *conn;
4174 BT_DBG("%s", hdev->name);
4176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4180 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4181 conn->passkey_entered = 0;
4183 if (hci_dev_test_flag(hdev, HCI_MGMT))
4184 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4185 conn->dst_type, conn->passkey_notify,
4186 conn->passkey_entered);
4189 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4191 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4192 struct hci_conn *conn;
4194 BT_DBG("%s", hdev->name);
4196 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4201 case HCI_KEYPRESS_STARTED:
4202 conn->passkey_entered = 0;
4205 case HCI_KEYPRESS_ENTERED:
4206 conn->passkey_entered++;
4209 case HCI_KEYPRESS_ERASED:
4210 conn->passkey_entered--;
4213 case HCI_KEYPRESS_CLEARED:
4214 conn->passkey_entered = 0;
4217 case HCI_KEYPRESS_COMPLETED:
4221 if (hci_dev_test_flag(hdev, HCI_MGMT))
4222 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4223 conn->dst_type, conn->passkey_notify,
4224 conn->passkey_entered);
4227 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4228 struct sk_buff *skb)
4230 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4231 struct hci_conn *conn;
4233 BT_DBG("%s", hdev->name);
4237 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4241 /* Reset the authentication requirement to unknown */
4242 conn->remote_auth = 0xff;
4244 /* To avoid duplicate auth_failed events to user space we check
4245 * the HCI_CONN_AUTH_PEND flag which will be set if we
4246 * initiated the authentication. A traditional auth_complete
4247 * event gets always produced as initiator and is also mapped to
4248 * the mgmt_auth_failed event */
4249 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4250 mgmt_auth_failed(conn, ev->status);
4252 hci_conn_drop(conn);
4255 hci_dev_unlock(hdev);
4258 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4259 struct sk_buff *skb)
4261 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4262 struct inquiry_entry *ie;
4263 struct hci_conn *conn;
4265 BT_DBG("%s", hdev->name);
4269 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4271 memcpy(conn->features[1], ev->features, 8);
4273 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4275 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4277 hci_dev_unlock(hdev);
4280 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4281 struct sk_buff *skb)
4283 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4284 struct oob_data *data;
4286 BT_DBG("%s", hdev->name);
4290 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4293 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4295 struct hci_cp_remote_oob_data_neg_reply cp;
4297 bacpy(&cp.bdaddr, &ev->bdaddr);
4298 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4303 if (bredr_sc_enabled(hdev)) {
4304 struct hci_cp_remote_oob_ext_data_reply cp;
4306 bacpy(&cp.bdaddr, &ev->bdaddr);
4307 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4308 memset(cp.hash192, 0, sizeof(cp.hash192));
4309 memset(cp.rand192, 0, sizeof(cp.rand192));
4311 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4312 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4314 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4315 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4317 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4320 struct hci_cp_remote_oob_data_reply cp;
4322 bacpy(&cp.bdaddr, &ev->bdaddr);
4323 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4324 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4326 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4331 hci_dev_unlock(hdev);
4334 #if IS_ENABLED(CONFIG_BT_HS)
4335 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4337 struct hci_ev_channel_selected *ev = (void *)skb->data;
4338 struct hci_conn *hcon;
4340 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4342 skb_pull(skb, sizeof(*ev));
4344 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4348 amp_read_loc_assoc_final_data(hdev, hcon);
4351 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4352 struct sk_buff *skb)
4354 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4355 struct hci_conn *hcon, *bredr_hcon;
4357 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4362 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4364 hci_dev_unlock(hdev);
4368 if (!hcon->amp_mgr) {
4369 hci_dev_unlock(hdev);
4375 hci_dev_unlock(hdev);
4379 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4381 hcon->state = BT_CONNECTED;
4382 bacpy(&hcon->dst, &bredr_hcon->dst);
4384 hci_conn_hold(hcon);
4385 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4386 hci_conn_drop(hcon);
4388 hci_debugfs_create_conn(hcon);
4389 hci_conn_add_sysfs(hcon);
4391 amp_physical_cfm(bredr_hcon, hcon);
4393 hci_dev_unlock(hdev);
4396 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4398 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4399 struct hci_conn *hcon;
4400 struct hci_chan *hchan;
4401 struct amp_mgr *mgr;
4403 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4404 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4407 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4411 /* Create AMP hchan */
4412 hchan = hci_chan_create(hcon);
4416 hchan->handle = le16_to_cpu(ev->handle);
4419 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4421 mgr = hcon->amp_mgr;
4422 if (mgr && mgr->bredr_chan) {
4423 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4425 l2cap_chan_lock(bredr_chan);
4427 bredr_chan->conn->mtu = hdev->block_mtu;
4428 l2cap_logical_cfm(bredr_chan, hchan, 0);
4429 hci_conn_hold(hcon);
4431 l2cap_chan_unlock(bredr_chan);
4435 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4436 struct sk_buff *skb)
4438 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4439 struct hci_chan *hchan;
4441 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4442 le16_to_cpu(ev->handle), ev->status);
4449 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4450 if (!hchan || !hchan->amp)
4453 amp_destroy_logical_link(hchan, ev->reason);
4456 hci_dev_unlock(hdev);
4459 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4460 struct sk_buff *skb)
4462 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4463 struct hci_conn *hcon;
4465 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4472 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4473 if (hcon && hcon->type == AMP_LINK) {
4474 hcon->state = BT_CLOSED;
4475 hci_disconn_cfm(hcon, ev->reason);
4479 hci_dev_unlock(hdev);
4483 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4485 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4486 struct hci_conn_params *params;
4487 struct hci_conn *conn;
4488 struct smp_irk *irk;
4491 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4495 /* All controllers implicitly stop advertising in the event of a
4496 * connection, so ensure that the state bit is cleared.
4498 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4500 conn = hci_lookup_le_connect(hdev);
4502 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4504 BT_ERR("No memory for new connection");
4508 conn->dst_type = ev->bdaddr_type;
4510 /* If we didn't have a hci_conn object previously
4511 * but we're in master role this must be something
4512 * initiated using a white list. Since white list based
4513 * connections are not "first class citizens" we don't
4514 * have full tracking of them. Therefore, we go ahead
4515 * with a "best effort" approach of determining the
4516 * initiator address based on the HCI_PRIVACY flag.
4519 conn->resp_addr_type = ev->bdaddr_type;
4520 bacpy(&conn->resp_addr, &ev->bdaddr);
4521 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4522 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4523 bacpy(&conn->init_addr, &hdev->rpa);
4525 hci_copy_identity_address(hdev,
4527 &conn->init_addr_type);
4531 cancel_delayed_work(&conn->le_conn_timeout);
4535 /* Set the responder (our side) address type based on
4536 * the advertising address type.
4538 conn->resp_addr_type = hdev->adv_addr_type;
4539 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4540 bacpy(&conn->resp_addr, &hdev->random_addr);
4542 bacpy(&conn->resp_addr, &hdev->bdaddr);
4544 conn->init_addr_type = ev->bdaddr_type;
4545 bacpy(&conn->init_addr, &ev->bdaddr);
4547 /* For incoming connections, set the default minimum
4548 * and maximum connection interval. They will be used
4549 * to check if the parameters are in range and if not
4550 * trigger the connection update procedure.
4552 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4553 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4556 /* Lookup the identity address from the stored connection
4557 * address and address type.
4559 * When establishing connections to an identity address, the
4560 * connection procedure will store the resolvable random
4561 * address first. Now if it can be converted back into the
4562 * identity address, start using the identity address from
4565 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4567 bacpy(&conn->dst, &irk->bdaddr);
4568 conn->dst_type = irk->addr_type;
4572 hci_le_conn_failed(conn, ev->status);
4576 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4577 addr_type = BDADDR_LE_PUBLIC;
4579 addr_type = BDADDR_LE_RANDOM;
4581 /* Drop the connection if the device is blocked */
4582 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4583 hci_conn_drop(conn);
4587 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4588 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4590 conn->sec_level = BT_SECURITY_LOW;
4591 conn->handle = __le16_to_cpu(ev->handle);
4592 conn->state = BT_CONFIG;
4594 conn->le_conn_interval = le16_to_cpu(ev->interval);
4595 conn->le_conn_latency = le16_to_cpu(ev->latency);
4596 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4598 hci_debugfs_create_conn(conn);
4599 hci_conn_add_sysfs(conn);
4602 /* The remote features procedure is defined for master
4603 * role only. So only in case of an initiated connection
4604 * request the remote features.
4606 * If the local controller supports slave-initiated features
4607 * exchange, then requesting the remote features in slave
4608 * role is possible. Otherwise just transition into the
4609 * connected state without requesting the remote features.
4612 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4613 struct hci_cp_le_read_remote_features cp;
4615 cp.handle = __cpu_to_le16(conn->handle);
4617 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4620 hci_conn_hold(conn);
4622 conn->state = BT_CONNECTED;
4623 hci_connect_cfm(conn, ev->status);
4626 hci_connect_cfm(conn, ev->status);
4629 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4632 list_del_init(¶ms->action);
4634 hci_conn_drop(params->conn);
4635 hci_conn_put(params->conn);
4636 params->conn = NULL;
4641 hci_update_background_scan(hdev);
4642 hci_dev_unlock(hdev);
4645 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4646 struct sk_buff *skb)
4648 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4649 struct hci_conn *conn;
4651 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4658 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4660 conn->le_conn_interval = le16_to_cpu(ev->interval);
4661 conn->le_conn_latency = le16_to_cpu(ev->latency);
4662 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4665 hci_dev_unlock(hdev);
4668 /* This function requires the caller holds hdev->lock */
4669 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4671 u8 addr_type, u8 adv_type,
4672 bdaddr_t *direct_rpa)
4674 struct hci_conn *conn;
4675 struct hci_conn_params *params;
4677 /* If the event is not connectable don't proceed further */
4678 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4681 /* Ignore if the device is blocked */
4682 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4685 /* Most controller will fail if we try to create new connections
4686 * while we have an existing one in slave role.
4688 if (hdev->conn_hash.le_num_slave > 0)
4691 /* If we're not connectable only connect devices that we have in
4692 * our pend_le_conns list.
4694 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4699 if (!params->explicit_connect) {
4700 switch (params->auto_connect) {
4701 case HCI_AUTO_CONN_DIRECT:
4702 /* Only devices advertising with ADV_DIRECT_IND are
4703 * triggering a connection attempt. This is allowing
4704 * incoming connections from slave devices.
4706 if (adv_type != LE_ADV_DIRECT_IND)
4709 case HCI_AUTO_CONN_ALWAYS:
4710 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4711 * are triggering a connection attempt. This means
4712 * that incoming connectioms from slave device are
4713 * accepted and also outgoing connections to slave
4714 * devices are established when found.
4722 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4723 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4725 if (!IS_ERR(conn)) {
4726 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4727 * by higher layer that tried to connect, if no then
4728 * store the pointer since we don't really have any
4729 * other owner of the object besides the params that
4730 * triggered it. This way we can abort the connection if
4731 * the parameters get removed and keep the reference
4732 * count consistent once the connection is established.
4735 if (!params->explicit_connect)
4736 params->conn = hci_conn_get(conn);
4741 switch (PTR_ERR(conn)) {
4743 /* If hci_connect() returns -EBUSY it means there is already
4744 * an LE connection attempt going on. Since controllers don't
4745 * support more than one connection attempt at the time, we
4746 * don't consider this an error case.
4750 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4757 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4758 u8 bdaddr_type, bdaddr_t *direct_addr,
4759 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4761 struct discovery_state *d = &hdev->discovery;
4762 struct smp_irk *irk;
4763 struct hci_conn *conn;
4770 case LE_ADV_DIRECT_IND:
4771 case LE_ADV_SCAN_IND:
4772 case LE_ADV_NONCONN_IND:
4773 case LE_ADV_SCAN_RSP:
4776 BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x",
4781 if (len > HCI_MAX_AD_LENGTH) {
4782 pr_err_ratelimited("legacy adv larger than 31 bytes");
4786 /* Find the end of the data in case the report contains padded zero
4787 * bytes at the end causing an invalid length value.
4789 * When data is NULL, len is 0 so there is no need for extra ptr
4790 * check as 'ptr < data + 0' is already false in such case.
4792 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4793 if (ptr + 1 + *ptr > data + len)
4797 real_len = ptr - data;
4799 /* Adjust for actual length */
4800 if (len != real_len) {
4801 BT_ERR_RATELIMITED("%s advertising data length corrected",
4806 /* If the direct address is present, then this report is from
4807 * a LE Direct Advertising Report event. In that case it is
4808 * important to see if the address is matching the local
4809 * controller address.
4812 /* Only resolvable random addresses are valid for these
4813 * kind of reports and others can be ignored.
4815 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4818 /* If the controller is not using resolvable random
4819 * addresses, then this report can be ignored.
4821 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4824 /* If the local IRK of the controller does not match
4825 * with the resolvable random address provided, then
4826 * this report can be ignored.
4828 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4832 /* Check if we need to convert to identity address */
4833 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4835 bdaddr = &irk->bdaddr;
4836 bdaddr_type = irk->addr_type;
4839 /* Check if we have been requested to connect to this device.
4841 * direct_addr is set only for directed advertising reports (it is NULL
4842 * for advertising reports) and is already verified to be RPA above.
4844 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4846 if (conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
4847 /* Store report for later inclusion by
4848 * mgmt_device_connected
4850 memcpy(conn->le_adv_data, data, len);
4851 conn->le_adv_data_len = len;
4854 /* Passive scanning shouldn't trigger any device found events,
4855 * except for devices marked as CONN_REPORT for which we do send
4856 * device found events.
4858 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4859 if (type == LE_ADV_DIRECT_IND)
4862 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4863 bdaddr, bdaddr_type))
4866 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4867 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4870 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4871 rssi, flags, data, len, NULL, 0);
4875 /* When receiving non-connectable or scannable undirected
4876 * advertising reports, this means that the remote device is
4877 * not connectable and then clearly indicate this in the
4878 * device found event.
4880 * When receiving a scan response, then there is no way to
4881 * know if the remote device is connectable or not. However
4882 * since scan responses are merged with a previously seen
4883 * advertising report, the flags field from that report
4886 * In the really unlikely case that a controller get confused
4887 * and just sends a scan response event, then it is marked as
4888 * not connectable as well.
4890 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4891 type == LE_ADV_SCAN_RSP)
4892 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4896 /* If there's nothing pending either store the data from this
4897 * event or send an immediate device found event if the data
4898 * should not be stored for later.
4900 if (!has_pending_adv_report(hdev)) {
4901 /* If the report will trigger a SCAN_REQ store it for
4904 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4905 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4906 rssi, flags, data, len);
4910 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4911 rssi, flags, data, len, NULL, 0);
4915 /* Check if the pending report is for the same device as the new one */
4916 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4917 bdaddr_type == d->last_adv_addr_type);
4919 /* If the pending data doesn't match this report or this isn't a
4920 * scan response (e.g. we got a duplicate ADV_IND) then force
4921 * sending of the pending data.
4923 if (type != LE_ADV_SCAN_RSP || !match) {
4924 /* Send out whatever is in the cache, but skip duplicates */
4926 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4927 d->last_adv_addr_type, NULL,
4928 d->last_adv_rssi, d->last_adv_flags,
4930 d->last_adv_data_len, NULL, 0);
4932 /* If the new report will trigger a SCAN_REQ store it for
4935 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4936 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4937 rssi, flags, data, len);
4941 /* The advertising reports cannot be merged, so clear
4942 * the pending report and send out a device found event.
4944 clear_pending_adv_report(hdev);
4945 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4946 rssi, flags, data, len, NULL, 0);
4950 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4951 * the new event is a SCAN_RSP. We can therefore proceed with
4952 * sending a merged device found event.
4954 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4955 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4956 d->last_adv_data, d->last_adv_data_len, data, len);
4957 clear_pending_adv_report(hdev);
4960 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4962 u8 num_reports = skb->data[0];
4963 void *ptr = &skb->data[1];
4967 while (num_reports--) {
4968 struct hci_ev_le_advertising_info *ev = ptr;
4971 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
4972 bt_dev_err(hdev, "Malicious advertising data.");
4976 if (ev->length <= HCI_MAX_AD_LENGTH &&
4977 ev->data + ev->length <= skb_tail_pointer(skb)) {
4978 rssi = ev->data[ev->length];
4979 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4980 ev->bdaddr_type, NULL, 0, rssi,
4981 ev->data, ev->length);
4983 bt_dev_err(hdev, "Dropping invalid advertising data");
4986 ptr += sizeof(*ev) + ev->length + 1;
4989 hci_dev_unlock(hdev);
4992 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4993 struct sk_buff *skb)
4995 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4996 struct hci_conn *conn;
4998 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5002 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5005 memcpy(conn->features[0], ev->features, 8);
5007 if (conn->state == BT_CONFIG) {
5010 /* If the local controller supports slave-initiated
5011 * features exchange, but the remote controller does
5012 * not, then it is possible that the error code 0x1a
5013 * for unsupported remote feature gets returned.
5015 * In this specific case, allow the connection to
5016 * transition into connected state and mark it as
5019 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5020 !conn->out && ev->status == 0x1a)
5023 status = ev->status;
5025 conn->state = BT_CONNECTED;
5026 hci_connect_cfm(conn, status);
5027 hci_conn_drop(conn);
5031 hci_dev_unlock(hdev);
5034 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5036 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5037 struct hci_cp_le_ltk_reply cp;
5038 struct hci_cp_le_ltk_neg_reply neg;
5039 struct hci_conn *conn;
5040 struct smp_ltk *ltk;
5042 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5046 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5050 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5054 if (smp_ltk_is_sc(ltk)) {
5055 /* With SC both EDiv and Rand are set to zero */
5056 if (ev->ediv || ev->rand)
5059 /* For non-SC keys check that EDiv and Rand match */
5060 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5064 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5065 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5066 cp.handle = cpu_to_le16(conn->handle);
5068 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5070 conn->enc_key_size = ltk->enc_size;
5072 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5074 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5075 * temporary key used to encrypt a connection following
5076 * pairing. It is used during the Encrypted Session Setup to
5077 * distribute the keys. Later, security can be re-established
5078 * using a distributed LTK.
5080 if (ltk->type == SMP_STK) {
5081 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5082 list_del_rcu(<k->list);
5083 kfree_rcu(ltk, rcu);
5085 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5088 hci_dev_unlock(hdev);
5093 neg.handle = ev->handle;
5094 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5095 hci_dev_unlock(hdev);
5098 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5101 struct hci_cp_le_conn_param_req_neg_reply cp;
5103 cp.handle = cpu_to_le16(handle);
5106 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5110 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5111 struct sk_buff *skb)
5113 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5114 struct hci_cp_le_conn_param_req_reply cp;
5115 struct hci_conn *hcon;
5116 u16 handle, min, max, latency, timeout;
5118 handle = le16_to_cpu(ev->handle);
5119 min = le16_to_cpu(ev->interval_min);
5120 max = le16_to_cpu(ev->interval_max);
5121 latency = le16_to_cpu(ev->latency);
5122 timeout = le16_to_cpu(ev->timeout);
5124 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5125 if (!hcon || hcon->state != BT_CONNECTED)
5126 return send_conn_param_neg_reply(hdev, handle,
5127 HCI_ERROR_UNKNOWN_CONN_ID);
5129 if (hci_check_conn_params(min, max, latency, timeout))
5130 return send_conn_param_neg_reply(hdev, handle,
5131 HCI_ERROR_INVALID_LL_PARAMS);
5133 if (hcon->role == HCI_ROLE_MASTER) {
5134 struct hci_conn_params *params;
5139 params = hci_conn_params_lookup(hdev, &hcon->dst,
5142 params->conn_min_interval = min;
5143 params->conn_max_interval = max;
5144 params->conn_latency = latency;
5145 params->supervision_timeout = timeout;
5151 hci_dev_unlock(hdev);
5153 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5154 store_hint, min, max, latency, timeout);
5157 cp.handle = ev->handle;
5158 cp.interval_min = ev->interval_min;
5159 cp.interval_max = ev->interval_max;
5160 cp.latency = ev->latency;
5161 cp.timeout = ev->timeout;
5165 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5168 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5169 struct sk_buff *skb)
5171 u8 num_reports = skb->data[0];
5172 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5174 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5179 for (; num_reports; num_reports--, ev++)
5180 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5181 ev->bdaddr_type, &ev->direct_addr,
5182 ev->direct_addr_type, ev->rssi, NULL, 0);
5184 hci_dev_unlock(hdev);
5187 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5189 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5191 skb_pull(skb, sizeof(*le_ev));
5193 switch (le_ev->subevent) {
5194 case HCI_EV_LE_CONN_COMPLETE:
5195 hci_le_conn_complete_evt(hdev, skb);
5198 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5199 hci_le_conn_update_complete_evt(hdev, skb);
5202 case HCI_EV_LE_ADVERTISING_REPORT:
5203 hci_le_adv_report_evt(hdev, skb);
5206 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5207 hci_le_remote_feat_complete_evt(hdev, skb);
5210 case HCI_EV_LE_LTK_REQ:
5211 hci_le_ltk_request_evt(hdev, skb);
5214 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5215 hci_le_remote_conn_param_req_evt(hdev, skb);
5218 case HCI_EV_LE_DIRECT_ADV_REPORT:
5219 hci_le_direct_adv_report_evt(hdev, skb);
5227 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5228 u8 event, struct sk_buff *skb)
5230 struct hci_ev_cmd_complete *ev;
5231 struct hci_event_hdr *hdr;
5236 if (skb->len < sizeof(*hdr)) {
5237 BT_ERR("Too short HCI event");
5241 hdr = (void *) skb->data;
5242 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5245 if (hdr->evt != event)
5250 /* Check if request ended in Command Status - no way to retreive
5251 * any extra parameters in this case.
5253 if (hdr->evt == HCI_EV_CMD_STATUS)
5256 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5257 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5261 if (skb->len < sizeof(*ev)) {
5262 BT_ERR("Too short cmd_complete event");
5266 ev = (void *) skb->data;
5267 skb_pull(skb, sizeof(*ev));
5269 if (opcode != __le16_to_cpu(ev->opcode)) {
5270 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5271 __le16_to_cpu(ev->opcode));
5278 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5280 struct hci_event_hdr *hdr = (void *) skb->data;
5281 hci_req_complete_t req_complete = NULL;
5282 hci_req_complete_skb_t req_complete_skb = NULL;
5283 struct sk_buff *orig_skb = NULL;
5284 u8 status = 0, event = hdr->evt, req_evt = 0;
5285 u16 opcode = HCI_OP_NOP;
5288 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5292 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5293 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5294 opcode = __le16_to_cpu(cmd_hdr->opcode);
5295 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5300 /* If it looks like we might end up having to call
5301 * req_complete_skb, store a pristine copy of the skb since the
5302 * various handlers may modify the original one through
5303 * skb_pull() calls, etc.
5305 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5306 event == HCI_EV_CMD_COMPLETE)
5307 orig_skb = skb_clone(skb, GFP_KERNEL);
5309 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5312 case HCI_EV_INQUIRY_COMPLETE:
5313 hci_inquiry_complete_evt(hdev, skb);
5316 case HCI_EV_INQUIRY_RESULT:
5317 hci_inquiry_result_evt(hdev, skb);
5320 case HCI_EV_CONN_COMPLETE:
5321 hci_conn_complete_evt(hdev, skb);
5324 case HCI_EV_CONN_REQUEST:
5325 hci_conn_request_evt(hdev, skb);
5328 case HCI_EV_DISCONN_COMPLETE:
5329 hci_disconn_complete_evt(hdev, skb);
5332 case HCI_EV_AUTH_COMPLETE:
5333 hci_auth_complete_evt(hdev, skb);
5336 case HCI_EV_REMOTE_NAME:
5337 hci_remote_name_evt(hdev, skb);
5340 case HCI_EV_ENCRYPT_CHANGE:
5341 hci_encrypt_change_evt(hdev, skb);
5344 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5345 hci_change_link_key_complete_evt(hdev, skb);
5348 case HCI_EV_REMOTE_FEATURES:
5349 hci_remote_features_evt(hdev, skb);
5352 case HCI_EV_CMD_COMPLETE:
5353 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5354 &req_complete, &req_complete_skb);
5357 case HCI_EV_CMD_STATUS:
5358 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5362 case HCI_EV_HARDWARE_ERROR:
5363 hci_hardware_error_evt(hdev, skb);
5366 case HCI_EV_ROLE_CHANGE:
5367 hci_role_change_evt(hdev, skb);
5370 case HCI_EV_NUM_COMP_PKTS:
5371 hci_num_comp_pkts_evt(hdev, skb);
5374 case HCI_EV_MODE_CHANGE:
5375 hci_mode_change_evt(hdev, skb);
5378 case HCI_EV_PIN_CODE_REQ:
5379 hci_pin_code_request_evt(hdev, skb);
5382 case HCI_EV_LINK_KEY_REQ:
5383 hci_link_key_request_evt(hdev, skb);
5386 case HCI_EV_LINK_KEY_NOTIFY:
5387 hci_link_key_notify_evt(hdev, skb);
5390 case HCI_EV_CLOCK_OFFSET:
5391 hci_clock_offset_evt(hdev, skb);
5394 case HCI_EV_PKT_TYPE_CHANGE:
5395 hci_pkt_type_change_evt(hdev, skb);
5398 case HCI_EV_PSCAN_REP_MODE:
5399 hci_pscan_rep_mode_evt(hdev, skb);
5402 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5403 hci_inquiry_result_with_rssi_evt(hdev, skb);
5406 case HCI_EV_REMOTE_EXT_FEATURES:
5407 hci_remote_ext_features_evt(hdev, skb);
5410 case HCI_EV_SYNC_CONN_COMPLETE:
5411 hci_sync_conn_complete_evt(hdev, skb);
5414 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5415 hci_extended_inquiry_result_evt(hdev, skb);
5418 case HCI_EV_KEY_REFRESH_COMPLETE:
5419 hci_key_refresh_complete_evt(hdev, skb);
5422 case HCI_EV_IO_CAPA_REQUEST:
5423 hci_io_capa_request_evt(hdev, skb);
5426 case HCI_EV_IO_CAPA_REPLY:
5427 hci_io_capa_reply_evt(hdev, skb);
5430 case HCI_EV_USER_CONFIRM_REQUEST:
5431 hci_user_confirm_request_evt(hdev, skb);
5434 case HCI_EV_USER_PASSKEY_REQUEST:
5435 hci_user_passkey_request_evt(hdev, skb);
5438 case HCI_EV_USER_PASSKEY_NOTIFY:
5439 hci_user_passkey_notify_evt(hdev, skb);
5442 case HCI_EV_KEYPRESS_NOTIFY:
5443 hci_keypress_notify_evt(hdev, skb);
5446 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5447 hci_simple_pair_complete_evt(hdev, skb);
5450 case HCI_EV_REMOTE_HOST_FEATURES:
5451 hci_remote_host_features_evt(hdev, skb);
5454 case HCI_EV_LE_META:
5455 hci_le_meta_evt(hdev, skb);
5458 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5459 hci_remote_oob_data_request_evt(hdev, skb);
5462 #if IS_ENABLED(CONFIG_BT_HS)
5463 case HCI_EV_CHANNEL_SELECTED:
5464 hci_chan_selected_evt(hdev, skb);
5467 case HCI_EV_PHY_LINK_COMPLETE:
5468 hci_phy_link_complete_evt(hdev, skb);
5471 case HCI_EV_LOGICAL_LINK_COMPLETE:
5472 hci_loglink_complete_evt(hdev, skb);
5475 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5476 hci_disconn_loglink_complete_evt(hdev, skb);
5479 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5480 hci_disconn_phylink_complete_evt(hdev, skb);
5484 case HCI_EV_NUM_COMP_BLOCKS:
5485 hci_num_comp_blocks_evt(hdev, skb);
5489 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5494 req_complete(hdev, status, opcode);
5495 } else if (req_complete_skb) {
5496 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5497 kfree_skb(orig_skb);
5500 req_complete_skb(hdev, status, opcode, orig_skb);
5504 kfree_skb(orig_skb);
5506 hdev->stat.evt_rx++;