2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
47 __u8 status = *((__u8 *) skb->data);
49 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 /* It is possible that we receive Inquiry Complete event right
52 * before we receive Inquiry Cancel Command Complete event, in
53 * which case the latter event should have status of Command
54 * Disallowed (0x0c). This should not be treated as error, since
55 * we actually achieve what Inquiry Cancel wants to achieve,
56 * which is to end the last Inquiry session.
58 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
59 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
68 clear_bit(HCI_INQUIRY, &hdev->flags);
69 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
70 wake_up_bit(&hdev->flags, HCI_INQUIRY);
73 /* Set discovery state to stopped if we're not doing LE active
76 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
77 hdev->le_scan_type != LE_SCAN_ACTIVE)
78 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 __u8 status = *((__u8 *) skb->data);
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
96 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 __u8 status = *((__u8 *) skb->data);
100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
105 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 hci_conn_check_pending(hdev);
110 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
113 BT_DBG("%s", hdev->name);
116 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 struct hci_rp_role_discovery *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 conn->role = rp->role;
132 hci_dev_unlock(hdev);
135 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 struct hci_rp_read_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 conn->link_policy = __le16_to_cpu(rp->policy);
151 hci_dev_unlock(hdev);
154 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 struct hci_rp_write_link_policy *rp = (void *) skb->data;
157 struct hci_conn *conn;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 conn->link_policy = get_unaligned_le16(sent + 2);
175 hci_dev_unlock(hdev);
178 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
181 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
188 hdev->link_policy = __le16_to_cpu(rp->policy);
191 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
194 __u8 status = *((__u8 *) skb->data);
197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 hdev->link_policy = get_unaligned_le16(sent);
209 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 __u8 status = *((__u8 *) skb->data);
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 clear_bit(HCI_RESET, &hdev->flags);
220 /* Reset all non-persistent flags */
221 hci_dev_clear_volatile_flags(hdev);
223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
226 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
229 hdev->adv_data_len = 0;
231 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
232 hdev->scan_rsp_data_len = 0;
234 hdev->le_scan_type = LE_SCAN_PASSIVE;
236 hdev->ssp_debug_mode = 0;
238 hci_bdaddr_list_clear(&hdev->le_white_list);
241 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
245 struct hci_cp_read_stored_link_key *sent;
247 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
253 if (!rp->status && sent->read_all == 0x01) {
254 hdev->stored_max_keys = rp->max_keys;
255 hdev->stored_num_keys = rp->num_keys;
259 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
264 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
269 if (rp->num_keys <= hdev->stored_num_keys)
270 hdev->stored_num_keys -= rp->num_keys;
272 hdev->stored_num_keys = 0;
275 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 __u8 status = *((__u8 *) skb->data);
280 BT_DBG("%s status 0x%2.2x", hdev->name, status);
282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
288 if (hci_dev_test_flag(hdev, HCI_MGMT))
289 mgmt_set_local_name_complete(hdev, sent, status);
291 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
293 hci_dev_unlock(hdev);
296 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
298 struct hci_rp_read_local_name *rp = (void *) skb->data;
300 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
305 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
306 hci_dev_test_flag(hdev, HCI_CONFIG))
307 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
312 __u8 status = *((__u8 *) skb->data);
315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
317 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
324 __u8 param = *((__u8 *) sent);
326 if (param == AUTH_ENABLED)
327 set_bit(HCI_AUTH, &hdev->flags);
329 clear_bit(HCI_AUTH, &hdev->flags);
332 if (hci_dev_test_flag(hdev, HCI_MGMT))
333 mgmt_auth_enable_complete(hdev, status);
335 hci_dev_unlock(hdev);
338 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
340 __u8 status = *((__u8 *) skb->data);
344 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
353 param = *((__u8 *) sent);
356 set_bit(HCI_ENCRYPT, &hdev->flags);
358 clear_bit(HCI_ENCRYPT, &hdev->flags);
361 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
363 __u8 status = *((__u8 *) skb->data);
367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
369 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
373 param = *((__u8 *) sent);
378 hdev->discov_timeout = 0;
382 if (param & SCAN_INQUIRY)
383 set_bit(HCI_ISCAN, &hdev->flags);
385 clear_bit(HCI_ISCAN, &hdev->flags);
387 if (param & SCAN_PAGE)
388 set_bit(HCI_PSCAN, &hdev->flags);
390 clear_bit(HCI_PSCAN, &hdev->flags);
393 hci_dev_unlock(hdev);
396 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
400 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
405 memcpy(hdev->dev_class, rp->dev_class, 3);
407 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
408 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
413 __u8 status = *((__u8 *) skb->data);
416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
418 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
425 memcpy(hdev->dev_class, sent, 3);
427 if (hci_dev_test_flag(hdev, HCI_MGMT))
428 mgmt_set_class_of_dev_complete(hdev, sent, status);
430 hci_dev_unlock(hdev);
433 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
435 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
443 setting = __le16_to_cpu(rp->voice_setting);
445 if (hdev->voice_setting == setting)
448 hdev->voice_setting = setting;
450 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459 __u8 status = *((__u8 *) skb->data);
463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
468 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
472 setting = get_unaligned_le16(sent);
474 if (hdev->voice_setting == setting)
477 hdev->voice_setting = setting;
479 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
495 hdev->num_iac = rp->num_iac;
497 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
502 __u8 status = *((__u8 *) skb->data);
503 struct hci_cp_write_ssp_mode *sent;
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
507 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
515 hdev->features[1][0] |= LMP_HOST_SSP;
517 hdev->features[1][0] &= ~LMP_HOST_SSP;
520 if (hci_dev_test_flag(hdev, HCI_MGMT))
521 mgmt_ssp_enable_complete(hdev, sent->mode, status);
524 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
526 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529 hci_dev_unlock(hdev);
532 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
534 u8 status = *((u8 *) skb->data);
535 struct hci_cp_write_sc_support *sent;
537 BT_DBG("%s status 0x%2.2x", hdev->name, status);
539 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
547 hdev->features[1][0] |= LMP_HOST_SC;
549 hdev->features[1][0] &= ~LMP_HOST_SC;
552 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
554 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
556 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559 hci_dev_unlock(hdev);
562 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
564 struct hci_rp_read_local_version *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG)) {
573 hdev->hci_ver = rp->hci_ver;
574 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
575 hdev->lmp_ver = rp->lmp_ver;
576 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
577 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
581 static void hci_cc_read_local_commands(struct hci_dev *hdev,
584 struct hci_rp_read_local_commands *rp = (void *) skb->data;
586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
592 hci_dev_test_flag(hdev, HCI_CONFIG))
593 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596 static void hci_cc_read_local_features(struct hci_dev *hdev,
599 struct hci_rp_read_local_features *rp = (void *) skb->data;
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
606 memcpy(hdev->features, rp->features, 8);
608 /* Adjust default settings according to features
609 * supported by device. */
611 if (hdev->features[0][0] & LMP_3SLOT)
612 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
614 if (hdev->features[0][0] & LMP_5SLOT)
615 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
617 if (hdev->features[0][1] & LMP_HV2) {
618 hdev->pkt_type |= (HCI_HV2);
619 hdev->esco_type |= (ESCO_HV2);
622 if (hdev->features[0][1] & LMP_HV3) {
623 hdev->pkt_type |= (HCI_HV3);
624 hdev->esco_type |= (ESCO_HV3);
627 if (lmp_esco_capable(hdev))
628 hdev->esco_type |= (ESCO_EV3);
630 if (hdev->features[0][4] & LMP_EV4)
631 hdev->esco_type |= (ESCO_EV4);
633 if (hdev->features[0][4] & LMP_EV5)
634 hdev->esco_type |= (ESCO_EV5);
636 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
637 hdev->esco_type |= (ESCO_2EV3);
639 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
640 hdev->esco_type |= (ESCO_3EV3);
642 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
643 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
646 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
649 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
651 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
656 if (hdev->max_page < rp->max_page)
657 hdev->max_page = rp->max_page;
659 if (rp->page < HCI_MAX_PAGES)
660 memcpy(hdev->features[rp->page], rp->features, 8);
663 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
666 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
673 hdev->flow_ctl_mode = rp->mode;
676 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
678 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
686 hdev->sco_mtu = rp->sco_mtu;
687 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
688 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
690 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
695 hdev->acl_cnt = hdev->acl_pkts;
696 hdev->sco_cnt = hdev->sco_pkts;
698 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
699 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
702 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
704 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
706 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711 if (test_bit(HCI_INIT, &hdev->flags))
712 bacpy(&hdev->bdaddr, &rp->bdaddr);
714 if (hci_dev_test_flag(hdev, HCI_SETUP))
715 bacpy(&hdev->setup_addr, &rp->bdaddr);
718 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
721 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (test_bit(HCI_INIT, &hdev->flags)) {
729 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
730 hdev->page_scan_window = __le16_to_cpu(rp->window);
734 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
737 u8 status = *((u8 *) skb->data);
738 struct hci_cp_write_page_scan_activity *sent;
740 BT_DBG("%s status 0x%2.2x", hdev->name, status);
745 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
749 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
750 hdev->page_scan_window = __le16_to_cpu(sent->window);
753 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
756 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
758 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763 if (test_bit(HCI_INIT, &hdev->flags))
764 hdev->page_scan_type = rp->type;
767 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
770 u8 status = *((u8 *) skb->data);
773 BT_DBG("%s status 0x%2.2x", hdev->name, status);
778 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
780 hdev->page_scan_type = *type;
783 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
786 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
794 hdev->block_len = __le16_to_cpu(rp->block_len);
795 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
797 hdev->block_cnt = hdev->num_blocks;
799 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
800 hdev->block_cnt, hdev->block_len);
803 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
805 struct hci_rp_read_clock *rp = (void *) skb->data;
806 struct hci_cp_read_clock *cp;
807 struct hci_conn *conn;
809 BT_DBG("%s", hdev->name);
811 if (skb->len < sizeof(*rp))
819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
823 if (cp->which == 0x00) {
824 hdev->clock = le32_to_cpu(rp->clock);
828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
830 conn->clock = le32_to_cpu(rp->clock);
831 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
835 hci_dev_unlock(hdev);
838 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848 hdev->amp_status = rp->amp_status;
849 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
850 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
851 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
852 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
853 hdev->amp_type = rp->amp_type;
854 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
855 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
856 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
857 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
863 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870 hdev->inq_tx_power = rp->tx_power;
873 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
875 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
876 struct hci_cp_pin_code_reply *cp;
877 struct hci_conn *conn;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (hci_dev_test_flag(hdev, HCI_MGMT))
884 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
889 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
893 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
895 conn->pin_length = cp->pin_len;
898 hci_dev_unlock(hdev);
901 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
903 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909 if (hci_dev_test_flag(hdev, HCI_MGMT))
910 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
913 hci_dev_unlock(hdev);
916 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
919 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
927 hdev->le_pkts = rp->le_max_pkt;
929 hdev->le_cnt = hdev->le_pkts;
931 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
934 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
937 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
939 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
944 memcpy(hdev->le_features, rp->features, 8);
947 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
950 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 hdev->adv_tx_power = rp->tx_power;
960 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
962 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 if (hci_dev_test_flag(hdev, HCI_MGMT))
969 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
972 hci_dev_unlock(hdev);
975 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
978 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984 if (hci_dev_test_flag(hdev, HCI_MGMT))
985 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
986 ACL_LINK, 0, rp->status);
988 hci_dev_unlock(hdev);
991 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 if (hci_dev_test_flag(hdev, HCI_MGMT))
1000 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1003 hci_dev_unlock(hdev);
1006 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1009 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 if (hci_dev_test_flag(hdev, HCI_MGMT))
1016 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1017 ACL_LINK, 0, rp->status);
1019 hci_dev_unlock(hdev);
1022 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1023 struct sk_buff *skb)
1025 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1027 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1031 struct sk_buff *skb)
1033 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1035 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1038 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1040 __u8 status = *((__u8 *) skb->data);
1043 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1048 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1054 bacpy(&hdev->random_addr, sent);
1056 hci_dev_unlock(hdev);
1059 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1061 __u8 *sent, status = *((__u8 *) skb->data);
1063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1068 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1074 /* If we're doing connection initiation as peripheral. Set a
1075 * timeout in case something goes wrong.
1078 struct hci_conn *conn;
1080 hci_dev_set_flag(hdev, HCI_LE_ADV);
1082 conn = hci_lookup_le_connect(hdev);
1084 queue_delayed_work(hdev->workqueue,
1085 &conn->le_conn_timeout,
1086 conn->conn_timeout);
1088 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1091 hci_dev_unlock(hdev);
1094 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1096 struct hci_cp_le_set_scan_param *cp;
1097 __u8 status = *((__u8 *) skb->data);
1099 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1110 hdev->le_scan_type = cp->type;
1112 hci_dev_unlock(hdev);
1115 static bool has_pending_adv_report(struct hci_dev *hdev)
1117 struct discovery_state *d = &hdev->discovery;
1119 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1122 static void clear_pending_adv_report(struct hci_dev *hdev)
1124 struct discovery_state *d = &hdev->discovery;
1126 bacpy(&d->last_adv_addr, BDADDR_ANY);
1127 d->last_adv_data_len = 0;
1130 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1131 u8 bdaddr_type, s8 rssi, u32 flags,
1134 struct discovery_state *d = &hdev->discovery;
1136 if (len > HCI_MAX_AD_LENGTH)
1139 bacpy(&d->last_adv_addr, bdaddr);
1140 d->last_adv_addr_type = bdaddr_type;
1141 d->last_adv_rssi = rssi;
1142 d->last_adv_flags = flags;
1143 memcpy(d->last_adv_data, data, len);
1144 d->last_adv_data_len = len;
1147 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1148 struct sk_buff *skb)
1150 struct hci_cp_le_set_scan_enable *cp;
1151 __u8 status = *((__u8 *) skb->data);
1153 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1158 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1164 switch (cp->enable) {
1165 case LE_SCAN_ENABLE:
1166 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1167 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1168 clear_pending_adv_report(hdev);
1171 case LE_SCAN_DISABLE:
1172 /* We do this here instead of when setting DISCOVERY_STOPPED
1173 * since the latter would potentially require waiting for
1174 * inquiry to stop too.
1176 if (has_pending_adv_report(hdev)) {
1177 struct discovery_state *d = &hdev->discovery;
1179 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1180 d->last_adv_addr_type, NULL,
1181 d->last_adv_rssi, d->last_adv_flags,
1183 d->last_adv_data_len, NULL, 0);
1186 /* Cancel this timer so that we don't try to disable scanning
1187 * when it's already disabled.
1189 cancel_delayed_work(&hdev->le_scan_disable);
1191 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1193 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1194 * interrupted scanning due to a connect request. Mark
1195 * therefore discovery as stopped. If this was not
1196 * because of a connect request advertising might have
1197 * been disabled because of active scanning, so
1198 * re-enable it again if necessary.
1200 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1201 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1202 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1203 hdev->discovery.state == DISCOVERY_FINDING)
1204 mgmt_reenable_advertising(hdev);
1209 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1213 hci_dev_unlock(hdev);
1216 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1217 struct sk_buff *skb)
1219 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1221 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1226 hdev->le_white_list_size = rp->size;
1229 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1230 struct sk_buff *skb)
1232 __u8 status = *((__u8 *) skb->data);
1234 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1239 hci_bdaddr_list_clear(&hdev->le_white_list);
1242 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1243 struct sk_buff *skb)
1245 struct hci_cp_le_add_to_white_list *sent;
1246 __u8 status = *((__u8 *) skb->data);
1248 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1257 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1261 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1262 struct sk_buff *skb)
1264 struct hci_cp_le_del_from_white_list *sent;
1265 __u8 status = *((__u8 *) skb->data);
1267 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1272 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1276 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1280 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1281 struct sk_buff *skb)
1283 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1285 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1290 memcpy(hdev->le_states, rp->le_states, 8);
1293 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1294 struct sk_buff *skb)
1296 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1298 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1303 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1304 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1307 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1308 struct sk_buff *skb)
1310 struct hci_cp_le_write_def_data_len *sent;
1311 __u8 status = *((__u8 *) skb->data);
1313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1318 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1322 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1323 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1326 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1327 struct sk_buff *skb)
1329 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1331 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1336 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1337 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1338 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1339 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1342 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1343 struct sk_buff *skb)
1345 struct hci_cp_write_le_host_supported *sent;
1346 __u8 status = *((__u8 *) skb->data);
1348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1360 hdev->features[1][0] |= LMP_HOST_LE;
1361 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1363 hdev->features[1][0] &= ~LMP_HOST_LE;
1364 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1365 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1369 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1371 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1373 hci_dev_unlock(hdev);
1376 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1378 struct hci_cp_le_set_adv_param *cp;
1379 u8 status = *((u8 *) skb->data);
1381 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1391 hdev->adv_addr_type = cp->own_address_type;
1392 hci_dev_unlock(hdev);
1395 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1397 struct hci_rp_read_rssi *rp = (void *) skb->data;
1398 struct hci_conn *conn;
1400 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1407 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1409 conn->rssi = rp->rssi;
1411 hci_dev_unlock(hdev);
1414 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1416 struct hci_cp_read_tx_power *sent;
1417 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1418 struct hci_conn *conn;
1420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1425 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1435 switch (sent->type) {
1437 conn->tx_power = rp->tx_power;
1440 conn->max_tx_power = rp->tx_power;
1445 hci_dev_unlock(hdev);
1448 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1450 u8 status = *((u8 *) skb->data);
1453 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1458 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1460 hdev->ssp_debug_mode = *mode;
1463 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1465 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1468 hci_conn_check_pending(hdev);
1472 set_bit(HCI_INQUIRY, &hdev->flags);
1475 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1477 struct hci_cp_create_conn *cp;
1478 struct hci_conn *conn;
1480 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1482 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1488 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1490 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1493 if (conn && conn->state == BT_CONNECT) {
1494 if (status != 0x0c || conn->attempt > 2) {
1495 conn->state = BT_CLOSED;
1496 hci_connect_cfm(conn, status);
1499 conn->state = BT_CONNECT2;
1503 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1506 BT_ERR("No memory for new connection");
1510 hci_dev_unlock(hdev);
1513 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1515 struct hci_cp_add_sco *cp;
1516 struct hci_conn *acl, *sco;
1519 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1524 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1528 handle = __le16_to_cpu(cp->handle);
1530 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1534 acl = hci_conn_hash_lookup_handle(hdev, handle);
1538 sco->state = BT_CLOSED;
1540 hci_connect_cfm(sco, status);
1545 hci_dev_unlock(hdev);
1548 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1550 struct hci_cp_auth_requested *cp;
1551 struct hci_conn *conn;
1553 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1564 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1566 if (conn->state == BT_CONFIG) {
1567 hci_connect_cfm(conn, status);
1568 hci_conn_drop(conn);
1572 hci_dev_unlock(hdev);
1575 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1577 struct hci_cp_set_conn_encrypt *cp;
1578 struct hci_conn *conn;
1580 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1585 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1591 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1593 if (conn->state == BT_CONFIG) {
1594 hci_connect_cfm(conn, status);
1595 hci_conn_drop(conn);
1599 hci_dev_unlock(hdev);
1602 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1603 struct hci_conn *conn)
1605 if (conn->state != BT_CONFIG || !conn->out)
1608 if (conn->pending_sec_level == BT_SECURITY_SDP)
1611 /* Only request authentication for SSP connections or non-SSP
1612 * devices with sec_level MEDIUM or HIGH or if MITM protection
1615 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1616 conn->pending_sec_level != BT_SECURITY_FIPS &&
1617 conn->pending_sec_level != BT_SECURITY_HIGH &&
1618 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1624 static int hci_resolve_name(struct hci_dev *hdev,
1625 struct inquiry_entry *e)
1627 struct hci_cp_remote_name_req cp;
1629 memset(&cp, 0, sizeof(cp));
1631 bacpy(&cp.bdaddr, &e->data.bdaddr);
1632 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1633 cp.pscan_mode = e->data.pscan_mode;
1634 cp.clock_offset = e->data.clock_offset;
1636 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1639 static bool hci_resolve_next_name(struct hci_dev *hdev)
1641 struct discovery_state *discov = &hdev->discovery;
1642 struct inquiry_entry *e;
1644 if (list_empty(&discov->resolve))
1647 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1651 if (hci_resolve_name(hdev, e) == 0) {
1652 e->name_state = NAME_PENDING;
1659 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1660 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1662 struct discovery_state *discov = &hdev->discovery;
1663 struct inquiry_entry *e;
1665 /* Update the mgmt connected state if necessary. Be careful with
1666 * conn objects that exist but are not (yet) connected however.
1667 * Only those in BT_CONFIG or BT_CONNECTED states can be
1668 * considered connected.
1671 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1672 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1673 mgmt_device_connected(hdev, conn, 0, name, name_len);
1675 if (discov->state == DISCOVERY_STOPPED)
1678 if (discov->state == DISCOVERY_STOPPING)
1679 goto discov_complete;
1681 if (discov->state != DISCOVERY_RESOLVING)
1684 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1685 /* If the device was not found in a list of found devices names of which
1686 * are pending. there is no need to continue resolving a next name as it
1687 * will be done upon receiving another Remote Name Request Complete
1694 e->name_state = NAME_KNOWN;
1695 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1696 e->data.rssi, name, name_len);
1698 e->name_state = NAME_NOT_KNOWN;
1701 if (hci_resolve_next_name(hdev))
1705 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1708 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1710 struct hci_cp_remote_name_req *cp;
1711 struct hci_conn *conn;
1713 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1715 /* If successful wait for the name req complete event before
1716 * checking for the need to do authentication */
1720 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1726 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1728 if (hci_dev_test_flag(hdev, HCI_MGMT))
1729 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1734 if (!hci_outgoing_auth_needed(hdev, conn))
1737 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1738 struct hci_cp_auth_requested auth_cp;
1740 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1742 auth_cp.handle = __cpu_to_le16(conn->handle);
1743 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1744 sizeof(auth_cp), &auth_cp);
1748 hci_dev_unlock(hdev);
1751 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1753 struct hci_cp_read_remote_features *cp;
1754 struct hci_conn *conn;
1756 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1761 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1767 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1769 if (conn->state == BT_CONFIG) {
1770 hci_connect_cfm(conn, status);
1771 hci_conn_drop(conn);
1775 hci_dev_unlock(hdev);
1778 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1780 struct hci_cp_read_remote_ext_features *cp;
1781 struct hci_conn *conn;
1783 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1788 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1796 if (conn->state == BT_CONFIG) {
1797 hci_connect_cfm(conn, status);
1798 hci_conn_drop(conn);
1802 hci_dev_unlock(hdev);
1805 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1807 struct hci_cp_setup_sync_conn *cp;
1808 struct hci_conn *acl, *sco;
1811 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1820 handle = __le16_to_cpu(cp->handle);
1822 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1826 acl = hci_conn_hash_lookup_handle(hdev, handle);
1830 sco->state = BT_CLOSED;
1832 hci_connect_cfm(sco, status);
1837 hci_dev_unlock(hdev);
1840 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1842 struct hci_cp_sniff_mode *cp;
1843 struct hci_conn *conn;
1845 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1858 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1860 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1861 hci_sco_setup(conn, status);
1864 hci_dev_unlock(hdev);
1867 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1869 struct hci_cp_exit_sniff_mode *cp;
1870 struct hci_conn *conn;
1872 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1883 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1885 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1887 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1888 hci_sco_setup(conn, status);
1891 hci_dev_unlock(hdev);
1894 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1896 struct hci_cp_disconnect *cp;
1897 struct hci_conn *conn;
1902 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1908 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1910 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1911 conn->dst_type, status);
1913 hci_dev_unlock(hdev);
1916 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1918 struct hci_cp_le_create_conn *cp;
1919 struct hci_conn *conn;
1921 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1923 /* All connection failure handling is taken care of by the
1924 * hci_le_conn_failed function which is triggered by the HCI
1925 * request completion callbacks used for connecting.
1930 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1936 conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
1937 cp->peer_addr_type);
1941 /* Store the initiator and responder address information which
1942 * is needed for SMP. These values will not change during the
1943 * lifetime of the connection.
1945 conn->init_addr_type = cp->own_address_type;
1946 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1947 bacpy(&conn->init_addr, &hdev->random_addr);
1949 bacpy(&conn->init_addr, &hdev->bdaddr);
1951 conn->resp_addr_type = cp->peer_addr_type;
1952 bacpy(&conn->resp_addr, &cp->peer_addr);
1954 /* We don't want the connection attempt to stick around
1955 * indefinitely since LE doesn't have a page timeout concept
1956 * like BR/EDR. Set a timer for any connection that doesn't use
1957 * the white list for connecting.
1959 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1960 queue_delayed_work(conn->hdev->workqueue,
1961 &conn->le_conn_timeout,
1962 conn->conn_timeout);
1965 hci_dev_unlock(hdev);
1968 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1970 struct hci_cp_le_read_remote_features *cp;
1971 struct hci_conn *conn;
1973 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1978 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1986 if (conn->state == BT_CONFIG) {
1987 hci_connect_cfm(conn, status);
1988 hci_conn_drop(conn);
1992 hci_dev_unlock(hdev);
1995 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1997 struct hci_cp_le_start_enc *cp;
1998 struct hci_conn *conn;
2000 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2011 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2015 if (conn->state != BT_CONNECTED)
2018 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2019 hci_conn_drop(conn);
2022 hci_dev_unlock(hdev);
2025 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2027 struct hci_cp_switch_role *cp;
2028 struct hci_conn *conn;
2030 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2035 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2041 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2043 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2045 hci_dev_unlock(hdev);
2048 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2050 __u8 status = *((__u8 *) skb->data);
2051 struct discovery_state *discov = &hdev->discovery;
2052 struct inquiry_entry *e;
2054 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2056 hci_conn_check_pending(hdev);
2058 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2061 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2062 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2064 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2069 if (discov->state != DISCOVERY_FINDING)
2072 if (list_empty(&discov->resolve)) {
2073 /* When BR/EDR inquiry is active and no LE scanning is in
2074 * progress, then change discovery state to indicate completion.
2076 * When running LE scanning and BR/EDR inquiry simultaneously
2077 * and the LE scan already finished, then change the discovery
2078 * state to indicate completion.
2080 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2081 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2082 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2086 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2087 if (e && hci_resolve_name(hdev, e) == 0) {
2088 e->name_state = NAME_PENDING;
2089 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2091 /* When BR/EDR inquiry is active and no LE scanning is in
2092 * progress, then change discovery state to indicate completion.
2094 * When running LE scanning and BR/EDR inquiry simultaneously
2095 * and the LE scan already finished, then change the discovery
2096 * state to indicate completion.
2098 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2099 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2100 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2104 hci_dev_unlock(hdev);
2107 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2109 struct inquiry_data data;
2110 struct inquiry_info *info = (void *) (skb->data + 1);
2111 int num_rsp = *((__u8 *) skb->data);
2113 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2115 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2118 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2123 for (; num_rsp; num_rsp--, info++) {
2126 bacpy(&data.bdaddr, &info->bdaddr);
2127 data.pscan_rep_mode = info->pscan_rep_mode;
2128 data.pscan_period_mode = info->pscan_period_mode;
2129 data.pscan_mode = info->pscan_mode;
2130 memcpy(data.dev_class, info->dev_class, 3);
2131 data.clock_offset = info->clock_offset;
2132 data.rssi = HCI_RSSI_INVALID;
2133 data.ssp_mode = 0x00;
2135 flags = hci_inquiry_cache_update(hdev, &data, false);
2137 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2138 info->dev_class, HCI_RSSI_INVALID,
2139 flags, NULL, 0, NULL, 0);
2142 hci_dev_unlock(hdev);
2145 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2147 struct hci_ev_conn_complete *ev = (void *) skb->data;
2148 struct hci_conn *conn;
2150 BT_DBG("%s", hdev->name);
2154 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2156 if (ev->link_type != SCO_LINK)
2159 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2163 conn->type = SCO_LINK;
2167 conn->handle = __le16_to_cpu(ev->handle);
2169 if (conn->type == ACL_LINK) {
2170 conn->state = BT_CONFIG;
2171 hci_conn_hold(conn);
2173 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2174 !hci_find_link_key(hdev, &ev->bdaddr))
2175 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2177 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2179 conn->state = BT_CONNECTED;
2181 hci_debugfs_create_conn(conn);
2182 hci_conn_add_sysfs(conn);
2184 if (test_bit(HCI_AUTH, &hdev->flags))
2185 set_bit(HCI_CONN_AUTH, &conn->flags);
2187 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2188 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2190 /* Get remote features */
2191 if (conn->type == ACL_LINK) {
2192 struct hci_cp_read_remote_features cp;
2193 cp.handle = ev->handle;
2194 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2197 hci_update_page_scan(hdev);
2200 /* Set packet type for incoming connection */
2201 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2202 struct hci_cp_change_conn_ptype cp;
2203 cp.handle = ev->handle;
2204 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2205 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2209 conn->state = BT_CLOSED;
2210 if (conn->type == ACL_LINK)
2211 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2212 conn->dst_type, ev->status);
2215 if (conn->type == ACL_LINK)
2216 hci_sco_setup(conn, ev->status);
2219 hci_connect_cfm(conn, ev->status);
2221 } else if (ev->link_type != ACL_LINK)
2222 hci_connect_cfm(conn, ev->status);
2225 hci_dev_unlock(hdev);
2227 hci_conn_check_pending(hdev);
2230 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2232 struct hci_cp_reject_conn_req cp;
2234 bacpy(&cp.bdaddr, bdaddr);
2235 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2236 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2239 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2241 struct hci_ev_conn_request *ev = (void *) skb->data;
2242 int mask = hdev->link_mode;
2243 struct inquiry_entry *ie;
2244 struct hci_conn *conn;
2247 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2250 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2253 if (!(mask & HCI_LM_ACCEPT)) {
2254 hci_reject_conn(hdev, &ev->bdaddr);
2258 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2260 hci_reject_conn(hdev, &ev->bdaddr);
2264 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2265 * connection. These features are only touched through mgmt so
2266 * only do the checks if HCI_MGMT is set.
2268 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2269 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2270 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2272 hci_reject_conn(hdev, &ev->bdaddr);
2276 /* Connection accepted */
2280 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2282 memcpy(ie->data.dev_class, ev->dev_class, 3);
2284 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2287 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2290 BT_ERR("No memory for new connection");
2291 hci_dev_unlock(hdev);
2296 memcpy(conn->dev_class, ev->dev_class, 3);
2298 hci_dev_unlock(hdev);
2300 if (ev->link_type == ACL_LINK ||
2301 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2302 struct hci_cp_accept_conn_req cp;
2303 conn->state = BT_CONNECT;
2305 bacpy(&cp.bdaddr, &ev->bdaddr);
2307 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2308 cp.role = 0x00; /* Become master */
2310 cp.role = 0x01; /* Remain slave */
2312 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2313 } else if (!(flags & HCI_PROTO_DEFER)) {
2314 struct hci_cp_accept_sync_conn_req cp;
2315 conn->state = BT_CONNECT;
2317 bacpy(&cp.bdaddr, &ev->bdaddr);
2318 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2320 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2321 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2322 cp.max_latency = cpu_to_le16(0xffff);
2323 cp.content_format = cpu_to_le16(hdev->voice_setting);
2324 cp.retrans_effort = 0xff;
2326 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2329 conn->state = BT_CONNECT2;
2330 hci_connect_cfm(conn, 0);
2334 static u8 hci_to_mgmt_reason(u8 err)
2337 case HCI_ERROR_CONNECTION_TIMEOUT:
2338 return MGMT_DEV_DISCONN_TIMEOUT;
2339 case HCI_ERROR_REMOTE_USER_TERM:
2340 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2341 case HCI_ERROR_REMOTE_POWER_OFF:
2342 return MGMT_DEV_DISCONN_REMOTE;
2343 case HCI_ERROR_LOCAL_HOST_TERM:
2344 return MGMT_DEV_DISCONN_LOCAL_HOST;
2346 return MGMT_DEV_DISCONN_UNKNOWN;
2350 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2353 u8 reason = hci_to_mgmt_reason(ev->reason);
2354 struct hci_conn_params *params;
2355 struct hci_conn *conn;
2356 bool mgmt_connected;
2359 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2363 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2368 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2369 conn->dst_type, ev->status);
2373 conn->state = BT_CLOSED;
2375 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2376 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2377 reason, mgmt_connected);
2379 if (conn->type == ACL_LINK) {
2380 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2381 hci_remove_link_key(hdev, &conn->dst);
2383 hci_update_page_scan(hdev);
2386 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2388 switch (params->auto_connect) {
2389 case HCI_AUTO_CONN_LINK_LOSS:
2390 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2394 case HCI_AUTO_CONN_DIRECT:
2395 case HCI_AUTO_CONN_ALWAYS:
2396 list_del_init(¶ms->action);
2397 list_add(¶ms->action, &hdev->pend_le_conns);
2398 hci_update_background_scan(hdev);
2408 hci_disconn_cfm(conn, ev->reason);
2411 /* Re-enable advertising if necessary, since it might
2412 * have been disabled by the connection. From the
2413 * HCI_LE_Set_Advertise_Enable command description in
2414 * the core specification (v4.0):
2415 * "The Controller shall continue advertising until the Host
2416 * issues an LE_Set_Advertise_Enable command with
2417 * Advertising_Enable set to 0x00 (Advertising is disabled)
2418 * or until a connection is created or until the Advertising
2419 * is timed out due to Directed Advertising."
2421 if (type == LE_LINK)
2422 mgmt_reenable_advertising(hdev);
2425 hci_dev_unlock(hdev);
2428 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2430 struct hci_ev_auth_complete *ev = (void *) skb->data;
2431 struct hci_conn *conn;
2433 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2437 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2442 if (!hci_conn_ssp_enabled(conn) &&
2443 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2444 BT_INFO("re-auth of legacy device is not possible.");
2446 set_bit(HCI_CONN_AUTH, &conn->flags);
2447 conn->sec_level = conn->pending_sec_level;
2450 mgmt_auth_failed(conn, ev->status);
2453 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2454 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2456 if (conn->state == BT_CONFIG) {
2457 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2458 struct hci_cp_set_conn_encrypt cp;
2459 cp.handle = ev->handle;
2461 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2464 conn->state = BT_CONNECTED;
2465 hci_connect_cfm(conn, ev->status);
2466 hci_conn_drop(conn);
2469 hci_auth_cfm(conn, ev->status);
2471 hci_conn_hold(conn);
2472 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2473 hci_conn_drop(conn);
2476 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2478 struct hci_cp_set_conn_encrypt cp;
2479 cp.handle = ev->handle;
2481 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2484 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2485 hci_encrypt_cfm(conn, ev->status);
2490 hci_dev_unlock(hdev);
2493 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495 struct hci_ev_remote_name *ev = (void *) skb->data;
2496 struct hci_conn *conn;
2498 BT_DBG("%s", hdev->name);
2500 hci_conn_check_pending(hdev);
2504 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2506 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2509 if (ev->status == 0)
2510 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2511 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2513 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2519 if (!hci_outgoing_auth_needed(hdev, conn))
2522 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2523 struct hci_cp_auth_requested cp;
2525 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2527 cp.handle = __cpu_to_le16(conn->handle);
2528 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2532 hci_dev_unlock(hdev);
2535 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2536 u16 opcode, struct sk_buff *skb)
2538 const struct hci_rp_read_enc_key_size *rp;
2539 struct hci_conn *conn;
2542 BT_DBG("%s status 0x%02x", hdev->name, status);
2544 if (!skb || skb->len < sizeof(*rp)) {
2545 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2550 rp = (void *)skb->data;
2551 handle = le16_to_cpu(rp->handle);
2555 conn = hci_conn_hash_lookup_handle(hdev, handle);
2559 /* If we fail to read the encryption key size, assume maximum
2560 * (which is the same we do also when this HCI command isn't
2564 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2566 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2568 conn->enc_key_size = rp->key_size;
2571 hci_encrypt_cfm(conn, 0);
2574 hci_dev_unlock(hdev);
2577 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2579 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2580 struct hci_conn *conn;
2582 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2586 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2592 /* Encryption implies authentication */
2593 set_bit(HCI_CONN_AUTH, &conn->flags);
2594 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2595 conn->sec_level = conn->pending_sec_level;
2597 /* P-256 authentication key implies FIPS */
2598 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2599 set_bit(HCI_CONN_FIPS, &conn->flags);
2601 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2602 conn->type == LE_LINK)
2603 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2605 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2606 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2610 /* We should disregard the current RPA and generate a new one
2611 * whenever the encryption procedure fails.
2613 if (ev->status && conn->type == LE_LINK)
2614 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2616 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2618 /* Check link security requirements are met */
2619 if (!hci_conn_check_link_mode(conn))
2620 ev->status = HCI_ERROR_AUTH_FAILURE;
2622 if (ev->status && conn->state == BT_CONNECTED) {
2623 /* Notify upper layers so they can cleanup before
2626 hci_encrypt_cfm(conn, ev->status);
2627 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2628 hci_conn_drop(conn);
2632 /* Try reading the encryption key size for encrypted ACL links */
2633 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2634 struct hci_cp_read_enc_key_size cp;
2635 struct hci_request req;
2637 /* Only send HCI_Read_Encryption_Key_Size if the
2638 * controller really supports it. If it doesn't, assume
2639 * the default size (16).
2641 if (!(hdev->commands[20] & 0x10)) {
2642 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2646 hci_req_init(&req, hdev);
2648 cp.handle = cpu_to_le16(conn->handle);
2649 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2651 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2652 BT_ERR("Sending HCI Read Encryption Key Size failed");
2653 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2661 hci_encrypt_cfm(conn, ev->status);
2664 hci_dev_unlock(hdev);
2667 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2668 struct sk_buff *skb)
2670 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2671 struct hci_conn *conn;
2673 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2677 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2680 set_bit(HCI_CONN_SECURE, &conn->flags);
2682 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2684 hci_key_change_cfm(conn, ev->status);
2687 hci_dev_unlock(hdev);
2690 static void hci_remote_features_evt(struct hci_dev *hdev,
2691 struct sk_buff *skb)
2693 struct hci_ev_remote_features *ev = (void *) skb->data;
2694 struct hci_conn *conn;
2696 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2705 memcpy(conn->features[0], ev->features, 8);
2707 if (conn->state != BT_CONFIG)
2710 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2711 lmp_ext_feat_capable(conn)) {
2712 struct hci_cp_read_remote_ext_features cp;
2713 cp.handle = ev->handle;
2715 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2720 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2721 struct hci_cp_remote_name_req cp;
2722 memset(&cp, 0, sizeof(cp));
2723 bacpy(&cp.bdaddr, &conn->dst);
2724 cp.pscan_rep_mode = 0x02;
2725 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2726 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2727 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2729 if (!hci_outgoing_auth_needed(hdev, conn)) {
2730 conn->state = BT_CONNECTED;
2731 hci_connect_cfm(conn, ev->status);
2732 hci_conn_drop(conn);
2736 hci_dev_unlock(hdev);
2739 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2740 u16 *opcode, u8 *status,
2741 hci_req_complete_t *req_complete,
2742 hci_req_complete_skb_t *req_complete_skb)
2744 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2746 *opcode = __le16_to_cpu(ev->opcode);
2747 *status = skb->data[sizeof(*ev)];
2749 skb_pull(skb, sizeof(*ev));
2752 case HCI_OP_INQUIRY_CANCEL:
2753 hci_cc_inquiry_cancel(hdev, skb, status);
2756 case HCI_OP_PERIODIC_INQ:
2757 hci_cc_periodic_inq(hdev, skb);
2760 case HCI_OP_EXIT_PERIODIC_INQ:
2761 hci_cc_exit_periodic_inq(hdev, skb);
2764 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2765 hci_cc_remote_name_req_cancel(hdev, skb);
2768 case HCI_OP_ROLE_DISCOVERY:
2769 hci_cc_role_discovery(hdev, skb);
2772 case HCI_OP_READ_LINK_POLICY:
2773 hci_cc_read_link_policy(hdev, skb);
2776 case HCI_OP_WRITE_LINK_POLICY:
2777 hci_cc_write_link_policy(hdev, skb);
2780 case HCI_OP_READ_DEF_LINK_POLICY:
2781 hci_cc_read_def_link_policy(hdev, skb);
2784 case HCI_OP_WRITE_DEF_LINK_POLICY:
2785 hci_cc_write_def_link_policy(hdev, skb);
2789 hci_cc_reset(hdev, skb);
2792 case HCI_OP_READ_STORED_LINK_KEY:
2793 hci_cc_read_stored_link_key(hdev, skb);
2796 case HCI_OP_DELETE_STORED_LINK_KEY:
2797 hci_cc_delete_stored_link_key(hdev, skb);
2800 case HCI_OP_WRITE_LOCAL_NAME:
2801 hci_cc_write_local_name(hdev, skb);
2804 case HCI_OP_READ_LOCAL_NAME:
2805 hci_cc_read_local_name(hdev, skb);
2808 case HCI_OP_WRITE_AUTH_ENABLE:
2809 hci_cc_write_auth_enable(hdev, skb);
2812 case HCI_OP_WRITE_ENCRYPT_MODE:
2813 hci_cc_write_encrypt_mode(hdev, skb);
2816 case HCI_OP_WRITE_SCAN_ENABLE:
2817 hci_cc_write_scan_enable(hdev, skb);
2820 case HCI_OP_READ_CLASS_OF_DEV:
2821 hci_cc_read_class_of_dev(hdev, skb);
2824 case HCI_OP_WRITE_CLASS_OF_DEV:
2825 hci_cc_write_class_of_dev(hdev, skb);
2828 case HCI_OP_READ_VOICE_SETTING:
2829 hci_cc_read_voice_setting(hdev, skb);
2832 case HCI_OP_WRITE_VOICE_SETTING:
2833 hci_cc_write_voice_setting(hdev, skb);
2836 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2837 hci_cc_read_num_supported_iac(hdev, skb);
2840 case HCI_OP_WRITE_SSP_MODE:
2841 hci_cc_write_ssp_mode(hdev, skb);
2844 case HCI_OP_WRITE_SC_SUPPORT:
2845 hci_cc_write_sc_support(hdev, skb);
2848 case HCI_OP_READ_LOCAL_VERSION:
2849 hci_cc_read_local_version(hdev, skb);
2852 case HCI_OP_READ_LOCAL_COMMANDS:
2853 hci_cc_read_local_commands(hdev, skb);
2856 case HCI_OP_READ_LOCAL_FEATURES:
2857 hci_cc_read_local_features(hdev, skb);
2860 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2861 hci_cc_read_local_ext_features(hdev, skb);
2864 case HCI_OP_READ_BUFFER_SIZE:
2865 hci_cc_read_buffer_size(hdev, skb);
2868 case HCI_OP_READ_BD_ADDR:
2869 hci_cc_read_bd_addr(hdev, skb);
2872 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2873 hci_cc_read_page_scan_activity(hdev, skb);
2876 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2877 hci_cc_write_page_scan_activity(hdev, skb);
2880 case HCI_OP_READ_PAGE_SCAN_TYPE:
2881 hci_cc_read_page_scan_type(hdev, skb);
2884 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2885 hci_cc_write_page_scan_type(hdev, skb);
2888 case HCI_OP_READ_DATA_BLOCK_SIZE:
2889 hci_cc_read_data_block_size(hdev, skb);
2892 case HCI_OP_READ_FLOW_CONTROL_MODE:
2893 hci_cc_read_flow_control_mode(hdev, skb);
2896 case HCI_OP_READ_LOCAL_AMP_INFO:
2897 hci_cc_read_local_amp_info(hdev, skb);
2900 case HCI_OP_READ_CLOCK:
2901 hci_cc_read_clock(hdev, skb);
2904 case HCI_OP_READ_INQ_RSP_TX_POWER:
2905 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2908 case HCI_OP_PIN_CODE_REPLY:
2909 hci_cc_pin_code_reply(hdev, skb);
2912 case HCI_OP_PIN_CODE_NEG_REPLY:
2913 hci_cc_pin_code_neg_reply(hdev, skb);
2916 case HCI_OP_READ_LOCAL_OOB_DATA:
2917 hci_cc_read_local_oob_data(hdev, skb);
2920 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2921 hci_cc_read_local_oob_ext_data(hdev, skb);
2924 case HCI_OP_LE_READ_BUFFER_SIZE:
2925 hci_cc_le_read_buffer_size(hdev, skb);
2928 case HCI_OP_LE_READ_LOCAL_FEATURES:
2929 hci_cc_le_read_local_features(hdev, skb);
2932 case HCI_OP_LE_READ_ADV_TX_POWER:
2933 hci_cc_le_read_adv_tx_power(hdev, skb);
2936 case HCI_OP_USER_CONFIRM_REPLY:
2937 hci_cc_user_confirm_reply(hdev, skb);
2940 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2941 hci_cc_user_confirm_neg_reply(hdev, skb);
2944 case HCI_OP_USER_PASSKEY_REPLY:
2945 hci_cc_user_passkey_reply(hdev, skb);
2948 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2949 hci_cc_user_passkey_neg_reply(hdev, skb);
2952 case HCI_OP_LE_SET_RANDOM_ADDR:
2953 hci_cc_le_set_random_addr(hdev, skb);
2956 case HCI_OP_LE_SET_ADV_ENABLE:
2957 hci_cc_le_set_adv_enable(hdev, skb);
2960 case HCI_OP_LE_SET_SCAN_PARAM:
2961 hci_cc_le_set_scan_param(hdev, skb);
2964 case HCI_OP_LE_SET_SCAN_ENABLE:
2965 hci_cc_le_set_scan_enable(hdev, skb);
2968 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2969 hci_cc_le_read_white_list_size(hdev, skb);
2972 case HCI_OP_LE_CLEAR_WHITE_LIST:
2973 hci_cc_le_clear_white_list(hdev, skb);
2976 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2977 hci_cc_le_add_to_white_list(hdev, skb);
2980 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2981 hci_cc_le_del_from_white_list(hdev, skb);
2984 case HCI_OP_LE_READ_SUPPORTED_STATES:
2985 hci_cc_le_read_supported_states(hdev, skb);
2988 case HCI_OP_LE_READ_DEF_DATA_LEN:
2989 hci_cc_le_read_def_data_len(hdev, skb);
2992 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2993 hci_cc_le_write_def_data_len(hdev, skb);
2996 case HCI_OP_LE_READ_MAX_DATA_LEN:
2997 hci_cc_le_read_max_data_len(hdev, skb);
3000 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3001 hci_cc_write_le_host_supported(hdev, skb);
3004 case HCI_OP_LE_SET_ADV_PARAM:
3005 hci_cc_set_adv_param(hdev, skb);
3008 case HCI_OP_READ_RSSI:
3009 hci_cc_read_rssi(hdev, skb);
3012 case HCI_OP_READ_TX_POWER:
3013 hci_cc_read_tx_power(hdev, skb);
3016 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3017 hci_cc_write_ssp_debug_mode(hdev, skb);
3021 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3025 if (*opcode != HCI_OP_NOP)
3026 cancel_delayed_work(&hdev->cmd_timer);
3028 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3029 atomic_set(&hdev->cmd_cnt, 1);
3031 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3034 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3035 queue_work(hdev->workqueue, &hdev->cmd_work);
3038 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3039 u16 *opcode, u8 *status,
3040 hci_req_complete_t *req_complete,
3041 hci_req_complete_skb_t *req_complete_skb)
3043 struct hci_ev_cmd_status *ev = (void *) skb->data;
3045 skb_pull(skb, sizeof(*ev));
3047 *opcode = __le16_to_cpu(ev->opcode);
3048 *status = ev->status;
3051 case HCI_OP_INQUIRY:
3052 hci_cs_inquiry(hdev, ev->status);
3055 case HCI_OP_CREATE_CONN:
3056 hci_cs_create_conn(hdev, ev->status);
3059 case HCI_OP_DISCONNECT:
3060 hci_cs_disconnect(hdev, ev->status);
3063 case HCI_OP_ADD_SCO:
3064 hci_cs_add_sco(hdev, ev->status);
3067 case HCI_OP_AUTH_REQUESTED:
3068 hci_cs_auth_requested(hdev, ev->status);
3071 case HCI_OP_SET_CONN_ENCRYPT:
3072 hci_cs_set_conn_encrypt(hdev, ev->status);
3075 case HCI_OP_REMOTE_NAME_REQ:
3076 hci_cs_remote_name_req(hdev, ev->status);
3079 case HCI_OP_READ_REMOTE_FEATURES:
3080 hci_cs_read_remote_features(hdev, ev->status);
3083 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3084 hci_cs_read_remote_ext_features(hdev, ev->status);
3087 case HCI_OP_SETUP_SYNC_CONN:
3088 hci_cs_setup_sync_conn(hdev, ev->status);
3091 case HCI_OP_SNIFF_MODE:
3092 hci_cs_sniff_mode(hdev, ev->status);
3095 case HCI_OP_EXIT_SNIFF_MODE:
3096 hci_cs_exit_sniff_mode(hdev, ev->status);
3099 case HCI_OP_SWITCH_ROLE:
3100 hci_cs_switch_role(hdev, ev->status);
3103 case HCI_OP_LE_CREATE_CONN:
3104 hci_cs_le_create_conn(hdev, ev->status);
3107 case HCI_OP_LE_READ_REMOTE_FEATURES:
3108 hci_cs_le_read_remote_features(hdev, ev->status);
3111 case HCI_OP_LE_START_ENC:
3112 hci_cs_le_start_enc(hdev, ev->status);
3116 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3120 if (*opcode != HCI_OP_NOP)
3121 cancel_delayed_work(&hdev->cmd_timer);
3123 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3124 atomic_set(&hdev->cmd_cnt, 1);
3126 /* Indicate request completion if the command failed. Also, if
3127 * we're not waiting for a special event and we get a success
3128 * command status we should try to flag the request as completed
3129 * (since for this kind of commands there will not be a command
3133 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3134 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3137 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3138 queue_work(hdev->workqueue, &hdev->cmd_work);
3141 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3143 struct hci_ev_hardware_error *ev = (void *) skb->data;
3145 hdev->hw_error_code = ev->code;
3147 queue_work(hdev->req_workqueue, &hdev->error_reset);
3150 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3152 struct hci_ev_role_change *ev = (void *) skb->data;
3153 struct hci_conn *conn;
3155 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3162 conn->role = ev->role;
3164 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3166 hci_role_switch_cfm(conn, ev->status, ev->role);
3169 hci_dev_unlock(hdev);
3172 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3174 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3177 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3178 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3182 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3183 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3184 BT_DBG("%s bad parameters", hdev->name);
3188 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3190 for (i = 0; i < ev->num_hndl; i++) {
3191 struct hci_comp_pkts_info *info = &ev->handles[i];
3192 struct hci_conn *conn;
3193 __u16 handle, count;
3195 handle = __le16_to_cpu(info->handle);
3196 count = __le16_to_cpu(info->count);
3198 conn = hci_conn_hash_lookup_handle(hdev, handle);
3202 conn->sent -= count;
3204 switch (conn->type) {
3206 hdev->acl_cnt += count;
3207 if (hdev->acl_cnt > hdev->acl_pkts)
3208 hdev->acl_cnt = hdev->acl_pkts;
3212 if (hdev->le_pkts) {
3213 hdev->le_cnt += count;
3214 if (hdev->le_cnt > hdev->le_pkts)
3215 hdev->le_cnt = hdev->le_pkts;
3217 hdev->acl_cnt += count;
3218 if (hdev->acl_cnt > hdev->acl_pkts)
3219 hdev->acl_cnt = hdev->acl_pkts;
3224 hdev->sco_cnt += count;
3225 if (hdev->sco_cnt > hdev->sco_pkts)
3226 hdev->sco_cnt = hdev->sco_pkts;
3230 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3235 queue_work(hdev->workqueue, &hdev->tx_work);
3238 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3241 struct hci_chan *chan;
3243 switch (hdev->dev_type) {
3245 return hci_conn_hash_lookup_handle(hdev, handle);
3247 chan = hci_chan_lookup_handle(hdev, handle);
3252 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3259 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3261 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3264 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3265 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3269 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3270 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3271 BT_DBG("%s bad parameters", hdev->name);
3275 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3278 for (i = 0; i < ev->num_hndl; i++) {
3279 struct hci_comp_blocks_info *info = &ev->handles[i];
3280 struct hci_conn *conn = NULL;
3281 __u16 handle, block_count;
3283 handle = __le16_to_cpu(info->handle);
3284 block_count = __le16_to_cpu(info->blocks);
3286 conn = __hci_conn_lookup_handle(hdev, handle);
3290 conn->sent -= block_count;
3292 switch (conn->type) {
3295 hdev->block_cnt += block_count;
3296 if (hdev->block_cnt > hdev->num_blocks)
3297 hdev->block_cnt = hdev->num_blocks;
3301 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3306 queue_work(hdev->workqueue, &hdev->tx_work);
3309 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3311 struct hci_ev_mode_change *ev = (void *) skb->data;
3312 struct hci_conn *conn;
3314 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3318 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3320 conn->mode = ev->mode;
3322 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3324 if (conn->mode == HCI_CM_ACTIVE)
3325 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3327 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3330 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3331 hci_sco_setup(conn, ev->status);
3334 hci_dev_unlock(hdev);
3337 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3339 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3340 struct hci_conn *conn;
3342 BT_DBG("%s", hdev->name);
3346 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3350 if (conn->state == BT_CONNECTED) {
3351 hci_conn_hold(conn);
3352 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3353 hci_conn_drop(conn);
3356 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3357 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3358 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3359 sizeof(ev->bdaddr), &ev->bdaddr);
3360 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3363 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3368 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3372 hci_dev_unlock(hdev);
3375 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3377 if (key_type == HCI_LK_CHANGED_COMBINATION)
3380 conn->pin_length = pin_len;
3381 conn->key_type = key_type;
3384 case HCI_LK_LOCAL_UNIT:
3385 case HCI_LK_REMOTE_UNIT:
3386 case HCI_LK_DEBUG_COMBINATION:
3388 case HCI_LK_COMBINATION:
3390 conn->pending_sec_level = BT_SECURITY_HIGH;
3392 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3394 case HCI_LK_UNAUTH_COMBINATION_P192:
3395 case HCI_LK_UNAUTH_COMBINATION_P256:
3396 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3398 case HCI_LK_AUTH_COMBINATION_P192:
3399 conn->pending_sec_level = BT_SECURITY_HIGH;
3401 case HCI_LK_AUTH_COMBINATION_P256:
3402 conn->pending_sec_level = BT_SECURITY_FIPS;
3407 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3409 struct hci_ev_link_key_req *ev = (void *) skb->data;
3410 struct hci_cp_link_key_reply cp;
3411 struct hci_conn *conn;
3412 struct link_key *key;
3414 BT_DBG("%s", hdev->name);
3416 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3421 key = hci_find_link_key(hdev, &ev->bdaddr);
3423 BT_DBG("%s link key not found for %pMR", hdev->name,
3428 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3431 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3433 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3435 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3436 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3437 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3438 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3442 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3443 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3444 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3445 BT_DBG("%s ignoring key unauthenticated for high security",
3450 conn_set_key(conn, key->type, key->pin_len);
3453 bacpy(&cp.bdaddr, &ev->bdaddr);
3454 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3456 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3458 hci_dev_unlock(hdev);
3463 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3464 hci_dev_unlock(hdev);
3467 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3469 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3470 struct hci_conn *conn;
3471 struct link_key *key;
3475 BT_DBG("%s", hdev->name);
3479 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3483 hci_conn_hold(conn);
3484 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3485 hci_conn_drop(conn);
3487 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3488 conn_set_key(conn, ev->key_type, conn->pin_length);
3490 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3493 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3494 ev->key_type, pin_len, &persistent);
3498 /* Update connection information since adding the key will have
3499 * fixed up the type in the case of changed combination keys.
3501 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3502 conn_set_key(conn, key->type, key->pin_len);
3504 mgmt_new_link_key(hdev, key, persistent);
3506 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3507 * is set. If it's not set simply remove the key from the kernel
3508 * list (we've still notified user space about it but with
3509 * store_hint being 0).
3511 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3512 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3513 list_del_rcu(&key->list);
3514 kfree_rcu(key, rcu);
3519 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3521 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3524 hci_dev_unlock(hdev);
3527 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3529 struct hci_ev_clock_offset *ev = (void *) skb->data;
3530 struct hci_conn *conn;
3532 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3536 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3537 if (conn && !ev->status) {
3538 struct inquiry_entry *ie;
3540 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3542 ie->data.clock_offset = ev->clock_offset;
3543 ie->timestamp = jiffies;
3547 hci_dev_unlock(hdev);
3550 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3552 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3553 struct hci_conn *conn;
3555 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3559 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3560 if (conn && !ev->status)
3561 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3563 hci_dev_unlock(hdev);
3566 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3568 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3569 struct inquiry_entry *ie;
3571 BT_DBG("%s", hdev->name);
3575 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3577 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3578 ie->timestamp = jiffies;
3581 hci_dev_unlock(hdev);
3584 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3585 struct sk_buff *skb)
3587 struct inquiry_data data;
3588 int num_rsp = *((__u8 *) skb->data);
3590 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3595 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3600 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3601 struct inquiry_info_with_rssi_and_pscan_mode *info;
3602 info = (void *) (skb->data + 1);
3604 if (skb->len < num_rsp * sizeof(*info) + 1)
3607 for (; num_rsp; num_rsp--, info++) {
3610 bacpy(&data.bdaddr, &info->bdaddr);
3611 data.pscan_rep_mode = info->pscan_rep_mode;
3612 data.pscan_period_mode = info->pscan_period_mode;
3613 data.pscan_mode = info->pscan_mode;
3614 memcpy(data.dev_class, info->dev_class, 3);
3615 data.clock_offset = info->clock_offset;
3616 data.rssi = info->rssi;
3617 data.ssp_mode = 0x00;
3619 flags = hci_inquiry_cache_update(hdev, &data, false);
3621 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3622 info->dev_class, info->rssi,
3623 flags, NULL, 0, NULL, 0);
3626 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3628 if (skb->len < num_rsp * sizeof(*info) + 1)
3631 for (; num_rsp; num_rsp--, info++) {
3634 bacpy(&data.bdaddr, &info->bdaddr);
3635 data.pscan_rep_mode = info->pscan_rep_mode;
3636 data.pscan_period_mode = info->pscan_period_mode;
3637 data.pscan_mode = 0x00;
3638 memcpy(data.dev_class, info->dev_class, 3);
3639 data.clock_offset = info->clock_offset;
3640 data.rssi = info->rssi;
3641 data.ssp_mode = 0x00;
3643 flags = hci_inquiry_cache_update(hdev, &data, false);
3645 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3646 info->dev_class, info->rssi,
3647 flags, NULL, 0, NULL, 0);
3652 hci_dev_unlock(hdev);
3655 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3656 struct sk_buff *skb)
3658 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3659 struct hci_conn *conn;
3661 BT_DBG("%s", hdev->name);
3665 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3669 if (ev->page < HCI_MAX_PAGES)
3670 memcpy(conn->features[ev->page], ev->features, 8);
3672 if (!ev->status && ev->page == 0x01) {
3673 struct inquiry_entry *ie;
3675 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3677 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3679 if (ev->features[0] & LMP_HOST_SSP) {
3680 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3682 /* It is mandatory by the Bluetooth specification that
3683 * Extended Inquiry Results are only used when Secure
3684 * Simple Pairing is enabled, but some devices violate
3687 * To make these devices work, the internal SSP
3688 * enabled flag needs to be cleared if the remote host
3689 * features do not indicate SSP support */
3690 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3693 if (ev->features[0] & LMP_HOST_SC)
3694 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3697 if (conn->state != BT_CONFIG)
3700 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3701 struct hci_cp_remote_name_req cp;
3702 memset(&cp, 0, sizeof(cp));
3703 bacpy(&cp.bdaddr, &conn->dst);
3704 cp.pscan_rep_mode = 0x02;
3705 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3706 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3707 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3709 if (!hci_outgoing_auth_needed(hdev, conn)) {
3710 conn->state = BT_CONNECTED;
3711 hci_connect_cfm(conn, ev->status);
3712 hci_conn_drop(conn);
3716 hci_dev_unlock(hdev);
3719 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3720 struct sk_buff *skb)
3722 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3723 struct hci_conn *conn;
3725 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3729 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3731 if (ev->link_type == ESCO_LINK)
3734 /* When the link type in the event indicates SCO connection
3735 * and lookup of the connection object fails, then check
3736 * if an eSCO connection object exists.
3738 * The core limits the synchronous connections to either
3739 * SCO or eSCO. The eSCO connection is preferred and tried
3740 * to be setup first and until successfully established,
3741 * the link type will be hinted as eSCO.
3743 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3748 switch (ev->status) {
3750 /* The synchronous connection complete event should only be
3751 * sent once per new connection. Receiving a successful
3752 * complete event when the connection status is already
3753 * BT_CONNECTED means that the device is misbehaving and sent
3754 * multiple complete event packets for the same new connection.
3756 * Registering the device more than once can corrupt kernel
3757 * memory, hence upon detecting this invalid event, we report
3758 * an error and ignore the packet.
3760 if (conn->state == BT_CONNECTED) {
3761 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
3765 conn->handle = __le16_to_cpu(ev->handle);
3766 conn->state = BT_CONNECTED;
3767 conn->type = ev->link_type;
3769 hci_debugfs_create_conn(conn);
3770 hci_conn_add_sysfs(conn);
3773 case 0x10: /* Connection Accept Timeout */
3774 case 0x0d: /* Connection Rejected due to Limited Resources */
3775 case 0x11: /* Unsupported Feature or Parameter Value */
3776 case 0x1c: /* SCO interval rejected */
3777 case 0x1a: /* Unsupported Remote Feature */
3778 case 0x1e: /* Invalid LMP Parameters */
3779 case 0x1f: /* Unspecified error */
3780 case 0x20: /* Unsupported LMP Parameter value */
3782 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3783 (hdev->esco_type & EDR_ESCO_MASK);
3784 if (hci_setup_sync(conn, conn->link->handle))
3790 conn->state = BT_CLOSED;
3794 hci_connect_cfm(conn, ev->status);
3799 hci_dev_unlock(hdev);
3802 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3806 while (parsed < eir_len) {
3807 u8 field_len = eir[0];
3812 parsed += field_len + 1;
3813 eir += field_len + 1;
3819 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3820 struct sk_buff *skb)
3822 struct inquiry_data data;
3823 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3824 int num_rsp = *((__u8 *) skb->data);
3827 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3829 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
3832 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3837 for (; num_rsp; num_rsp--, info++) {
3841 bacpy(&data.bdaddr, &info->bdaddr);
3842 data.pscan_rep_mode = info->pscan_rep_mode;
3843 data.pscan_period_mode = info->pscan_period_mode;
3844 data.pscan_mode = 0x00;
3845 memcpy(data.dev_class, info->dev_class, 3);
3846 data.clock_offset = info->clock_offset;
3847 data.rssi = info->rssi;
3848 data.ssp_mode = 0x01;
3850 if (hci_dev_test_flag(hdev, HCI_MGMT))
3851 name_known = eir_has_data_type(info->data,
3857 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3859 eir_len = eir_get_length(info->data, sizeof(info->data));
3861 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3862 info->dev_class, info->rssi,
3863 flags, info->data, eir_len, NULL, 0);
3866 hci_dev_unlock(hdev);
3869 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3870 struct sk_buff *skb)
3872 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3873 struct hci_conn *conn;
3875 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3876 __le16_to_cpu(ev->handle));
3880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3884 /* For BR/EDR the necessary steps are taken through the
3885 * auth_complete event.
3887 if (conn->type != LE_LINK)
3891 conn->sec_level = conn->pending_sec_level;
3893 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3895 if (ev->status && conn->state == BT_CONNECTED) {
3896 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3897 hci_conn_drop(conn);
3901 if (conn->state == BT_CONFIG) {
3903 conn->state = BT_CONNECTED;
3905 hci_connect_cfm(conn, ev->status);
3906 hci_conn_drop(conn);
3908 hci_auth_cfm(conn, ev->status);
3910 hci_conn_hold(conn);
3911 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3912 hci_conn_drop(conn);
3916 hci_dev_unlock(hdev);
3919 static u8 hci_get_auth_req(struct hci_conn *conn)
3921 /* If remote requests no-bonding follow that lead */
3922 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3923 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3924 return conn->remote_auth | (conn->auth_type & 0x01);
3926 /* If both remote and local have enough IO capabilities, require
3929 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3930 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3931 return conn->remote_auth | 0x01;
3933 /* No MITM protection possible so ignore remote requirement */
3934 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3937 static u8 bredr_oob_data_present(struct hci_conn *conn)
3939 struct hci_dev *hdev = conn->hdev;
3940 struct oob_data *data;
3942 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3946 if (bredr_sc_enabled(hdev)) {
3947 /* When Secure Connections is enabled, then just
3948 * return the present value stored with the OOB
3949 * data. The stored value contains the right present
3950 * information. However it can only be trusted when
3951 * not in Secure Connection Only mode.
3953 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3954 return data->present;
3956 /* When Secure Connections Only mode is enabled, then
3957 * the P-256 values are required. If they are not
3958 * available, then do not declare that OOB data is
3961 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3962 !memcmp(data->hash256, ZERO_KEY, 16))
3968 /* When Secure Connections is not enabled or actually
3969 * not supported by the hardware, then check that if
3970 * P-192 data values are present.
3972 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3973 !memcmp(data->hash192, ZERO_KEY, 16))
3979 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3981 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3982 struct hci_conn *conn;
3984 BT_DBG("%s", hdev->name);
3988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3992 hci_conn_hold(conn);
3994 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3997 /* Allow pairing if we're pairable, the initiators of the
3998 * pairing or if the remote is not requesting bonding.
4000 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4001 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4002 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4003 struct hci_cp_io_capability_reply cp;
4005 bacpy(&cp.bdaddr, &ev->bdaddr);
4006 /* Change the IO capability from KeyboardDisplay
4007 * to DisplayYesNo as it is not supported by BT spec. */
4008 cp.capability = (conn->io_capability == 0x04) ?
4009 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4011 /* If we are initiators, there is no remote information yet */
4012 if (conn->remote_auth == 0xff) {
4013 /* Request MITM protection if our IO caps allow it
4014 * except for the no-bonding case.
4016 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4017 conn->auth_type != HCI_AT_NO_BONDING)
4018 conn->auth_type |= 0x01;
4020 conn->auth_type = hci_get_auth_req(conn);
4023 /* If we're not bondable, force one of the non-bondable
4024 * authentication requirement values.
4026 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4027 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4029 cp.authentication = conn->auth_type;
4030 cp.oob_data = bredr_oob_data_present(conn);
4032 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4035 struct hci_cp_io_capability_neg_reply cp;
4037 bacpy(&cp.bdaddr, &ev->bdaddr);
4038 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4040 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4045 hci_dev_unlock(hdev);
4048 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4050 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4051 struct hci_conn *conn;
4053 BT_DBG("%s", hdev->name);
4057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4061 conn->remote_cap = ev->capability;
4062 conn->remote_auth = ev->authentication;
4065 hci_dev_unlock(hdev);
4068 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4069 struct sk_buff *skb)
4071 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4072 int loc_mitm, rem_mitm, confirm_hint = 0;
4073 struct hci_conn *conn;
4075 BT_DBG("%s", hdev->name);
4079 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4082 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4086 loc_mitm = (conn->auth_type & 0x01);
4087 rem_mitm = (conn->remote_auth & 0x01);
4089 /* If we require MITM but the remote device can't provide that
4090 * (it has NoInputNoOutput) then reject the confirmation
4091 * request. We check the security level here since it doesn't
4092 * necessarily match conn->auth_type.
4094 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4095 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4096 BT_DBG("Rejecting request: remote device can't provide MITM");
4097 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4098 sizeof(ev->bdaddr), &ev->bdaddr);
4102 /* If no side requires MITM protection; auto-accept */
4103 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4104 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4106 /* If we're not the initiators request authorization to
4107 * proceed from user space (mgmt_user_confirm with
4108 * confirm_hint set to 1). The exception is if neither
4109 * side had MITM or if the local IO capability is
4110 * NoInputNoOutput, in which case we do auto-accept
4112 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4113 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4114 (loc_mitm || rem_mitm)) {
4115 BT_DBG("Confirming auto-accept as acceptor");
4120 BT_DBG("Auto-accept of user confirmation with %ums delay",
4121 hdev->auto_accept_delay);
4123 if (hdev->auto_accept_delay > 0) {
4124 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4125 queue_delayed_work(conn->hdev->workqueue,
4126 &conn->auto_accept_work, delay);
4130 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4131 sizeof(ev->bdaddr), &ev->bdaddr);
4136 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4137 le32_to_cpu(ev->passkey), confirm_hint);
4140 hci_dev_unlock(hdev);
4143 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4144 struct sk_buff *skb)
4146 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4148 BT_DBG("%s", hdev->name);
4150 if (hci_dev_test_flag(hdev, HCI_MGMT))
4151 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4154 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4155 struct sk_buff *skb)
4157 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4158 struct hci_conn *conn;
4160 BT_DBG("%s", hdev->name);
4162 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4166 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4167 conn->passkey_entered = 0;
4169 if (hci_dev_test_flag(hdev, HCI_MGMT))
4170 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4171 conn->dst_type, conn->passkey_notify,
4172 conn->passkey_entered);
4175 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4177 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4178 struct hci_conn *conn;
4180 BT_DBG("%s", hdev->name);
4182 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4187 case HCI_KEYPRESS_STARTED:
4188 conn->passkey_entered = 0;
4191 case HCI_KEYPRESS_ENTERED:
4192 conn->passkey_entered++;
4195 case HCI_KEYPRESS_ERASED:
4196 conn->passkey_entered--;
4199 case HCI_KEYPRESS_CLEARED:
4200 conn->passkey_entered = 0;
4203 case HCI_KEYPRESS_COMPLETED:
4207 if (hci_dev_test_flag(hdev, HCI_MGMT))
4208 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4209 conn->dst_type, conn->passkey_notify,
4210 conn->passkey_entered);
4213 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4214 struct sk_buff *skb)
4216 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4217 struct hci_conn *conn;
4219 BT_DBG("%s", hdev->name);
4223 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4227 /* Reset the authentication requirement to unknown */
4228 conn->remote_auth = 0xff;
4230 /* To avoid duplicate auth_failed events to user space we check
4231 * the HCI_CONN_AUTH_PEND flag which will be set if we
4232 * initiated the authentication. A traditional auth_complete
4233 * event gets always produced as initiator and is also mapped to
4234 * the mgmt_auth_failed event */
4235 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4236 mgmt_auth_failed(conn, ev->status);
4238 hci_conn_drop(conn);
4241 hci_dev_unlock(hdev);
4244 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4245 struct sk_buff *skb)
4247 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4248 struct inquiry_entry *ie;
4249 struct hci_conn *conn;
4251 BT_DBG("%s", hdev->name);
4255 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4257 memcpy(conn->features[1], ev->features, 8);
4259 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4261 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4263 hci_dev_unlock(hdev);
4266 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4267 struct sk_buff *skb)
4269 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4270 struct oob_data *data;
4272 BT_DBG("%s", hdev->name);
4276 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4279 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4281 struct hci_cp_remote_oob_data_neg_reply cp;
4283 bacpy(&cp.bdaddr, &ev->bdaddr);
4284 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4289 if (bredr_sc_enabled(hdev)) {
4290 struct hci_cp_remote_oob_ext_data_reply cp;
4292 bacpy(&cp.bdaddr, &ev->bdaddr);
4293 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4294 memset(cp.hash192, 0, sizeof(cp.hash192));
4295 memset(cp.rand192, 0, sizeof(cp.rand192));
4297 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4298 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4300 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4301 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4303 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4306 struct hci_cp_remote_oob_data_reply cp;
4308 bacpy(&cp.bdaddr, &ev->bdaddr);
4309 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4310 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4312 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4317 hci_dev_unlock(hdev);
4320 #if IS_ENABLED(CONFIG_BT_HS)
4321 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4323 struct hci_ev_channel_selected *ev = (void *)skb->data;
4324 struct hci_conn *hcon;
4326 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4328 skb_pull(skb, sizeof(*ev));
4330 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4334 amp_read_loc_assoc_final_data(hdev, hcon);
4337 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4338 struct sk_buff *skb)
4340 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4341 struct hci_conn *hcon, *bredr_hcon;
4343 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4348 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4350 hci_dev_unlock(hdev);
4354 if (!hcon->amp_mgr) {
4355 hci_dev_unlock(hdev);
4361 hci_dev_unlock(hdev);
4365 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4367 hcon->state = BT_CONNECTED;
4368 bacpy(&hcon->dst, &bredr_hcon->dst);
4370 hci_conn_hold(hcon);
4371 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4372 hci_conn_drop(hcon);
4374 hci_debugfs_create_conn(hcon);
4375 hci_conn_add_sysfs(hcon);
4377 amp_physical_cfm(bredr_hcon, hcon);
4379 hci_dev_unlock(hdev);
4382 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4384 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4385 struct hci_conn *hcon;
4386 struct hci_chan *hchan;
4387 struct amp_mgr *mgr;
4389 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4390 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4393 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4397 /* Create AMP hchan */
4398 hchan = hci_chan_create(hcon);
4402 hchan->handle = le16_to_cpu(ev->handle);
4405 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4407 mgr = hcon->amp_mgr;
4408 if (mgr && mgr->bredr_chan) {
4409 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4411 l2cap_chan_lock(bredr_chan);
4413 bredr_chan->conn->mtu = hdev->block_mtu;
4414 l2cap_logical_cfm(bredr_chan, hchan, 0);
4415 hci_conn_hold(hcon);
4417 l2cap_chan_unlock(bredr_chan);
4421 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4422 struct sk_buff *skb)
4424 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4425 struct hci_chan *hchan;
4427 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4428 le16_to_cpu(ev->handle), ev->status);
4435 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4436 if (!hchan || !hchan->amp)
4439 amp_destroy_logical_link(hchan, ev->reason);
4442 hci_dev_unlock(hdev);
4445 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4446 struct sk_buff *skb)
4448 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4449 struct hci_conn *hcon;
4451 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4458 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4460 hcon->state = BT_CLOSED;
4464 hci_dev_unlock(hdev);
4468 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4470 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4471 struct hci_conn_params *params;
4472 struct hci_conn *conn;
4473 struct smp_irk *irk;
4476 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4480 /* All controllers implicitly stop advertising in the event of a
4481 * connection, so ensure that the state bit is cleared.
4483 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4485 conn = hci_lookup_le_connect(hdev);
4487 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4489 BT_ERR("No memory for new connection");
4493 conn->dst_type = ev->bdaddr_type;
4495 /* If we didn't have a hci_conn object previously
4496 * but we're in master role this must be something
4497 * initiated using a white list. Since white list based
4498 * connections are not "first class citizens" we don't
4499 * have full tracking of them. Therefore, we go ahead
4500 * with a "best effort" approach of determining the
4501 * initiator address based on the HCI_PRIVACY flag.
4504 conn->resp_addr_type = ev->bdaddr_type;
4505 bacpy(&conn->resp_addr, &ev->bdaddr);
4506 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4507 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4508 bacpy(&conn->init_addr, &hdev->rpa);
4510 hci_copy_identity_address(hdev,
4512 &conn->init_addr_type);
4516 cancel_delayed_work(&conn->le_conn_timeout);
4520 /* Set the responder (our side) address type based on
4521 * the advertising address type.
4523 conn->resp_addr_type = hdev->adv_addr_type;
4524 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4525 bacpy(&conn->resp_addr, &hdev->random_addr);
4527 bacpy(&conn->resp_addr, &hdev->bdaddr);
4529 conn->init_addr_type = ev->bdaddr_type;
4530 bacpy(&conn->init_addr, &ev->bdaddr);
4532 /* For incoming connections, set the default minimum
4533 * and maximum connection interval. They will be used
4534 * to check if the parameters are in range and if not
4535 * trigger the connection update procedure.
4537 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4538 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4541 /* Lookup the identity address from the stored connection
4542 * address and address type.
4544 * When establishing connections to an identity address, the
4545 * connection procedure will store the resolvable random
4546 * address first. Now if it can be converted back into the
4547 * identity address, start using the identity address from
4550 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4552 bacpy(&conn->dst, &irk->bdaddr);
4553 conn->dst_type = irk->addr_type;
4557 hci_le_conn_failed(conn, ev->status);
4561 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4562 addr_type = BDADDR_LE_PUBLIC;
4564 addr_type = BDADDR_LE_RANDOM;
4566 /* Drop the connection if the device is blocked */
4567 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4568 hci_conn_drop(conn);
4572 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4573 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4575 conn->sec_level = BT_SECURITY_LOW;
4576 conn->handle = __le16_to_cpu(ev->handle);
4577 conn->state = BT_CONFIG;
4579 conn->le_conn_interval = le16_to_cpu(ev->interval);
4580 conn->le_conn_latency = le16_to_cpu(ev->latency);
4581 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4583 hci_debugfs_create_conn(conn);
4584 hci_conn_add_sysfs(conn);
4587 /* The remote features procedure is defined for master
4588 * role only. So only in case of an initiated connection
4589 * request the remote features.
4591 * If the local controller supports slave-initiated features
4592 * exchange, then requesting the remote features in slave
4593 * role is possible. Otherwise just transition into the
4594 * connected state without requesting the remote features.
4597 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4598 struct hci_cp_le_read_remote_features cp;
4600 cp.handle = __cpu_to_le16(conn->handle);
4602 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4605 hci_conn_hold(conn);
4607 conn->state = BT_CONNECTED;
4608 hci_connect_cfm(conn, ev->status);
4611 hci_connect_cfm(conn, ev->status);
4614 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4617 list_del_init(¶ms->action);
4619 hci_conn_drop(params->conn);
4620 hci_conn_put(params->conn);
4621 params->conn = NULL;
4626 hci_update_background_scan(hdev);
4627 hci_dev_unlock(hdev);
4630 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4631 struct sk_buff *skb)
4633 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4634 struct hci_conn *conn;
4636 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4643 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4645 conn->le_conn_interval = le16_to_cpu(ev->interval);
4646 conn->le_conn_latency = le16_to_cpu(ev->latency);
4647 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4650 hci_dev_unlock(hdev);
4653 /* This function requires the caller holds hdev->lock */
4654 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4656 u8 addr_type, u8 adv_type,
4657 bdaddr_t *direct_rpa)
4659 struct hci_conn *conn;
4660 struct hci_conn_params *params;
4662 /* If the event is not connectable don't proceed further */
4663 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4666 /* Ignore if the device is blocked */
4667 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4670 /* Most controller will fail if we try to create new connections
4671 * while we have an existing one in slave role.
4673 if (hdev->conn_hash.le_num_slave > 0)
4676 /* If we're not connectable only connect devices that we have in
4677 * our pend_le_conns list.
4679 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4684 if (!params->explicit_connect) {
4685 switch (params->auto_connect) {
4686 case HCI_AUTO_CONN_DIRECT:
4687 /* Only devices advertising with ADV_DIRECT_IND are
4688 * triggering a connection attempt. This is allowing
4689 * incoming connections from slave devices.
4691 if (adv_type != LE_ADV_DIRECT_IND)
4694 case HCI_AUTO_CONN_ALWAYS:
4695 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4696 * are triggering a connection attempt. This means
4697 * that incoming connectioms from slave device are
4698 * accepted and also outgoing connections to slave
4699 * devices are established when found.
4707 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4708 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4710 if (!IS_ERR(conn)) {
4711 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4712 * by higher layer that tried to connect, if no then
4713 * store the pointer since we don't really have any
4714 * other owner of the object besides the params that
4715 * triggered it. This way we can abort the connection if
4716 * the parameters get removed and keep the reference
4717 * count consistent once the connection is established.
4720 if (!params->explicit_connect)
4721 params->conn = hci_conn_get(conn);
4726 switch (PTR_ERR(conn)) {
4728 /* If hci_connect() returns -EBUSY it means there is already
4729 * an LE connection attempt going on. Since controllers don't
4730 * support more than one connection attempt at the time, we
4731 * don't consider this an error case.
4735 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4742 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4743 u8 bdaddr_type, bdaddr_t *direct_addr,
4744 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4746 struct discovery_state *d = &hdev->discovery;
4747 struct smp_irk *irk;
4748 struct hci_conn *conn;
4753 if (len > HCI_MAX_AD_LENGTH) {
4754 pr_err_ratelimited("legacy adv larger than 31 bytes");
4758 /* Find the end of the data in case the report contains padded zero
4759 * bytes at the end causing an invalid length value.
4761 * When data is NULL, len is 0 so there is no need for extra ptr
4762 * check as 'ptr < data + 0' is already false in such case.
4764 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4765 if (ptr + 1 + *ptr > data + len)
4769 real_len = ptr - data;
4771 /* Adjust for actual length */
4772 if (len != real_len) {
4773 BT_ERR_RATELIMITED("%s advertising data length corrected",
4778 /* If the direct address is present, then this report is from
4779 * a LE Direct Advertising Report event. In that case it is
4780 * important to see if the address is matching the local
4781 * controller address.
4784 /* Only resolvable random addresses are valid for these
4785 * kind of reports and others can be ignored.
4787 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4790 /* If the controller is not using resolvable random
4791 * addresses, then this report can be ignored.
4793 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4796 /* If the local IRK of the controller does not match
4797 * with the resolvable random address provided, then
4798 * this report can be ignored.
4800 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4804 /* Check if we need to convert to identity address */
4805 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4807 bdaddr = &irk->bdaddr;
4808 bdaddr_type = irk->addr_type;
4811 /* Check if we have been requested to connect to this device.
4813 * direct_addr is set only for directed advertising reports (it is NULL
4814 * for advertising reports) and is already verified to be RPA above.
4816 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4818 if (conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
4819 /* Store report for later inclusion by
4820 * mgmt_device_connected
4822 memcpy(conn->le_adv_data, data, len);
4823 conn->le_adv_data_len = len;
4826 /* Passive scanning shouldn't trigger any device found events,
4827 * except for devices marked as CONN_REPORT for which we do send
4828 * device found events.
4830 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4831 if (type == LE_ADV_DIRECT_IND)
4834 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4835 bdaddr, bdaddr_type))
4838 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4839 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4842 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4843 rssi, flags, data, len, NULL, 0);
4847 /* When receiving non-connectable or scannable undirected
4848 * advertising reports, this means that the remote device is
4849 * not connectable and then clearly indicate this in the
4850 * device found event.
4852 * When receiving a scan response, then there is no way to
4853 * know if the remote device is connectable or not. However
4854 * since scan responses are merged with a previously seen
4855 * advertising report, the flags field from that report
4858 * In the really unlikely case that a controller get confused
4859 * and just sends a scan response event, then it is marked as
4860 * not connectable as well.
4862 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4863 type == LE_ADV_SCAN_RSP)
4864 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4868 /* If there's nothing pending either store the data from this
4869 * event or send an immediate device found event if the data
4870 * should not be stored for later.
4872 if (!has_pending_adv_report(hdev)) {
4873 /* If the report will trigger a SCAN_REQ store it for
4876 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4877 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4878 rssi, flags, data, len);
4882 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4883 rssi, flags, data, len, NULL, 0);
4887 /* Check if the pending report is for the same device as the new one */
4888 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4889 bdaddr_type == d->last_adv_addr_type);
4891 /* If the pending data doesn't match this report or this isn't a
4892 * scan response (e.g. we got a duplicate ADV_IND) then force
4893 * sending of the pending data.
4895 if (type != LE_ADV_SCAN_RSP || !match) {
4896 /* Send out whatever is in the cache, but skip duplicates */
4898 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4899 d->last_adv_addr_type, NULL,
4900 d->last_adv_rssi, d->last_adv_flags,
4902 d->last_adv_data_len, NULL, 0);
4904 /* If the new report will trigger a SCAN_REQ store it for
4907 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4908 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4909 rssi, flags, data, len);
4913 /* The advertising reports cannot be merged, so clear
4914 * the pending report and send out a device found event.
4916 clear_pending_adv_report(hdev);
4917 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4918 rssi, flags, data, len, NULL, 0);
4922 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4923 * the new event is a SCAN_RSP. We can therefore proceed with
4924 * sending a merged device found event.
4926 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4927 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4928 d->last_adv_data, d->last_adv_data_len, data, len);
4929 clear_pending_adv_report(hdev);
4932 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4934 u8 num_reports = skb->data[0];
4935 void *ptr = &skb->data[1];
4939 while (num_reports--) {
4940 struct hci_ev_le_advertising_info *ev = ptr;
4943 if (ev->length <= HCI_MAX_AD_LENGTH) {
4944 rssi = ev->data[ev->length];
4945 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4946 ev->bdaddr_type, NULL, 0, rssi,
4947 ev->data, ev->length);
4949 bt_dev_err(hdev, "Dropping invalid advertising data");
4952 ptr += sizeof(*ev) + ev->length + 1;
4955 hci_dev_unlock(hdev);
4958 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4959 struct sk_buff *skb)
4961 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4962 struct hci_conn *conn;
4964 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4968 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4971 memcpy(conn->features[0], ev->features, 8);
4973 if (conn->state == BT_CONFIG) {
4976 /* If the local controller supports slave-initiated
4977 * features exchange, but the remote controller does
4978 * not, then it is possible that the error code 0x1a
4979 * for unsupported remote feature gets returned.
4981 * In this specific case, allow the connection to
4982 * transition into connected state and mark it as
4985 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4986 !conn->out && ev->status == 0x1a)
4989 status = ev->status;
4991 conn->state = BT_CONNECTED;
4992 hci_connect_cfm(conn, status);
4993 hci_conn_drop(conn);
4997 hci_dev_unlock(hdev);
5000 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5002 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5003 struct hci_cp_le_ltk_reply cp;
5004 struct hci_cp_le_ltk_neg_reply neg;
5005 struct hci_conn *conn;
5006 struct smp_ltk *ltk;
5008 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5012 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5016 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5020 if (smp_ltk_is_sc(ltk)) {
5021 /* With SC both EDiv and Rand are set to zero */
5022 if (ev->ediv || ev->rand)
5025 /* For non-SC keys check that EDiv and Rand match */
5026 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5030 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5031 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5032 cp.handle = cpu_to_le16(conn->handle);
5034 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5036 conn->enc_key_size = ltk->enc_size;
5038 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5040 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5041 * temporary key used to encrypt a connection following
5042 * pairing. It is used during the Encrypted Session Setup to
5043 * distribute the keys. Later, security can be re-established
5044 * using a distributed LTK.
5046 if (ltk->type == SMP_STK) {
5047 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5048 list_del_rcu(<k->list);
5049 kfree_rcu(ltk, rcu);
5051 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5054 hci_dev_unlock(hdev);
5059 neg.handle = ev->handle;
5060 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5061 hci_dev_unlock(hdev);
5064 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5067 struct hci_cp_le_conn_param_req_neg_reply cp;
5069 cp.handle = cpu_to_le16(handle);
5072 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5076 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5077 struct sk_buff *skb)
5079 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5080 struct hci_cp_le_conn_param_req_reply cp;
5081 struct hci_conn *hcon;
5082 u16 handle, min, max, latency, timeout;
5084 handle = le16_to_cpu(ev->handle);
5085 min = le16_to_cpu(ev->interval_min);
5086 max = le16_to_cpu(ev->interval_max);
5087 latency = le16_to_cpu(ev->latency);
5088 timeout = le16_to_cpu(ev->timeout);
5090 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5091 if (!hcon || hcon->state != BT_CONNECTED)
5092 return send_conn_param_neg_reply(hdev, handle,
5093 HCI_ERROR_UNKNOWN_CONN_ID);
5095 if (hci_check_conn_params(min, max, latency, timeout))
5096 return send_conn_param_neg_reply(hdev, handle,
5097 HCI_ERROR_INVALID_LL_PARAMS);
5099 if (hcon->role == HCI_ROLE_MASTER) {
5100 struct hci_conn_params *params;
5105 params = hci_conn_params_lookup(hdev, &hcon->dst,
5108 params->conn_min_interval = min;
5109 params->conn_max_interval = max;
5110 params->conn_latency = latency;
5111 params->supervision_timeout = timeout;
5117 hci_dev_unlock(hdev);
5119 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5120 store_hint, min, max, latency, timeout);
5123 cp.handle = ev->handle;
5124 cp.interval_min = ev->interval_min;
5125 cp.interval_max = ev->interval_max;
5126 cp.latency = ev->latency;
5127 cp.timeout = ev->timeout;
5131 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5134 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5135 struct sk_buff *skb)
5137 u8 num_reports = skb->data[0];
5138 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5140 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5145 for (; num_reports; num_reports--, ev++)
5146 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5147 ev->bdaddr_type, &ev->direct_addr,
5148 ev->direct_addr_type, ev->rssi, NULL, 0);
5150 hci_dev_unlock(hdev);
5153 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5155 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5157 skb_pull(skb, sizeof(*le_ev));
5159 switch (le_ev->subevent) {
5160 case HCI_EV_LE_CONN_COMPLETE:
5161 hci_le_conn_complete_evt(hdev, skb);
5164 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5165 hci_le_conn_update_complete_evt(hdev, skb);
5168 case HCI_EV_LE_ADVERTISING_REPORT:
5169 hci_le_adv_report_evt(hdev, skb);
5172 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5173 hci_le_remote_feat_complete_evt(hdev, skb);
5176 case HCI_EV_LE_LTK_REQ:
5177 hci_le_ltk_request_evt(hdev, skb);
5180 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5181 hci_le_remote_conn_param_req_evt(hdev, skb);
5184 case HCI_EV_LE_DIRECT_ADV_REPORT:
5185 hci_le_direct_adv_report_evt(hdev, skb);
5193 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5194 u8 event, struct sk_buff *skb)
5196 struct hci_ev_cmd_complete *ev;
5197 struct hci_event_hdr *hdr;
5202 if (skb->len < sizeof(*hdr)) {
5203 BT_ERR("Too short HCI event");
5207 hdr = (void *) skb->data;
5208 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5211 if (hdr->evt != event)
5216 /* Check if request ended in Command Status - no way to retreive
5217 * any extra parameters in this case.
5219 if (hdr->evt == HCI_EV_CMD_STATUS)
5222 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5223 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5227 if (skb->len < sizeof(*ev)) {
5228 BT_ERR("Too short cmd_complete event");
5232 ev = (void *) skb->data;
5233 skb_pull(skb, sizeof(*ev));
5235 if (opcode != __le16_to_cpu(ev->opcode)) {
5236 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5237 __le16_to_cpu(ev->opcode));
5244 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5246 struct hci_event_hdr *hdr = (void *) skb->data;
5247 hci_req_complete_t req_complete = NULL;
5248 hci_req_complete_skb_t req_complete_skb = NULL;
5249 struct sk_buff *orig_skb = NULL;
5250 u8 status = 0, event = hdr->evt, req_evt = 0;
5251 u16 opcode = HCI_OP_NOP;
5254 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5258 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5259 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5260 opcode = __le16_to_cpu(cmd_hdr->opcode);
5261 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5266 /* If it looks like we might end up having to call
5267 * req_complete_skb, store a pristine copy of the skb since the
5268 * various handlers may modify the original one through
5269 * skb_pull() calls, etc.
5271 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5272 event == HCI_EV_CMD_COMPLETE)
5273 orig_skb = skb_clone(skb, GFP_KERNEL);
5275 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5278 case HCI_EV_INQUIRY_COMPLETE:
5279 hci_inquiry_complete_evt(hdev, skb);
5282 case HCI_EV_INQUIRY_RESULT:
5283 hci_inquiry_result_evt(hdev, skb);
5286 case HCI_EV_CONN_COMPLETE:
5287 hci_conn_complete_evt(hdev, skb);
5290 case HCI_EV_CONN_REQUEST:
5291 hci_conn_request_evt(hdev, skb);
5294 case HCI_EV_DISCONN_COMPLETE:
5295 hci_disconn_complete_evt(hdev, skb);
5298 case HCI_EV_AUTH_COMPLETE:
5299 hci_auth_complete_evt(hdev, skb);
5302 case HCI_EV_REMOTE_NAME:
5303 hci_remote_name_evt(hdev, skb);
5306 case HCI_EV_ENCRYPT_CHANGE:
5307 hci_encrypt_change_evt(hdev, skb);
5310 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5311 hci_change_link_key_complete_evt(hdev, skb);
5314 case HCI_EV_REMOTE_FEATURES:
5315 hci_remote_features_evt(hdev, skb);
5318 case HCI_EV_CMD_COMPLETE:
5319 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5320 &req_complete, &req_complete_skb);
5323 case HCI_EV_CMD_STATUS:
5324 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5328 case HCI_EV_HARDWARE_ERROR:
5329 hci_hardware_error_evt(hdev, skb);
5332 case HCI_EV_ROLE_CHANGE:
5333 hci_role_change_evt(hdev, skb);
5336 case HCI_EV_NUM_COMP_PKTS:
5337 hci_num_comp_pkts_evt(hdev, skb);
5340 case HCI_EV_MODE_CHANGE:
5341 hci_mode_change_evt(hdev, skb);
5344 case HCI_EV_PIN_CODE_REQ:
5345 hci_pin_code_request_evt(hdev, skb);
5348 case HCI_EV_LINK_KEY_REQ:
5349 hci_link_key_request_evt(hdev, skb);
5352 case HCI_EV_LINK_KEY_NOTIFY:
5353 hci_link_key_notify_evt(hdev, skb);
5356 case HCI_EV_CLOCK_OFFSET:
5357 hci_clock_offset_evt(hdev, skb);
5360 case HCI_EV_PKT_TYPE_CHANGE:
5361 hci_pkt_type_change_evt(hdev, skb);
5364 case HCI_EV_PSCAN_REP_MODE:
5365 hci_pscan_rep_mode_evt(hdev, skb);
5368 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5369 hci_inquiry_result_with_rssi_evt(hdev, skb);
5372 case HCI_EV_REMOTE_EXT_FEATURES:
5373 hci_remote_ext_features_evt(hdev, skb);
5376 case HCI_EV_SYNC_CONN_COMPLETE:
5377 hci_sync_conn_complete_evt(hdev, skb);
5380 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5381 hci_extended_inquiry_result_evt(hdev, skb);
5384 case HCI_EV_KEY_REFRESH_COMPLETE:
5385 hci_key_refresh_complete_evt(hdev, skb);
5388 case HCI_EV_IO_CAPA_REQUEST:
5389 hci_io_capa_request_evt(hdev, skb);
5392 case HCI_EV_IO_CAPA_REPLY:
5393 hci_io_capa_reply_evt(hdev, skb);
5396 case HCI_EV_USER_CONFIRM_REQUEST:
5397 hci_user_confirm_request_evt(hdev, skb);
5400 case HCI_EV_USER_PASSKEY_REQUEST:
5401 hci_user_passkey_request_evt(hdev, skb);
5404 case HCI_EV_USER_PASSKEY_NOTIFY:
5405 hci_user_passkey_notify_evt(hdev, skb);
5408 case HCI_EV_KEYPRESS_NOTIFY:
5409 hci_keypress_notify_evt(hdev, skb);
5412 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5413 hci_simple_pair_complete_evt(hdev, skb);
5416 case HCI_EV_REMOTE_HOST_FEATURES:
5417 hci_remote_host_features_evt(hdev, skb);
5420 case HCI_EV_LE_META:
5421 hci_le_meta_evt(hdev, skb);
5424 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5425 hci_remote_oob_data_request_evt(hdev, skb);
5428 #if IS_ENABLED(CONFIG_BT_HS)
5429 case HCI_EV_CHANNEL_SELECTED:
5430 hci_chan_selected_evt(hdev, skb);
5433 case HCI_EV_PHY_LINK_COMPLETE:
5434 hci_phy_link_complete_evt(hdev, skb);
5437 case HCI_EV_LOGICAL_LINK_COMPLETE:
5438 hci_loglink_complete_evt(hdev, skb);
5441 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5442 hci_disconn_loglink_complete_evt(hdev, skb);
5445 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5446 hci_disconn_phylink_complete_evt(hdev, skb);
5450 case HCI_EV_NUM_COMP_BLOCKS:
5451 hci_num_comp_blocks_evt(hdev, skb);
5455 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5460 req_complete(hdev, status, opcode);
5461 } else if (req_complete_skb) {
5462 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5463 kfree_skb(orig_skb);
5466 req_complete_skb(hdev, status, opcode, orig_skb);
5470 kfree_skb(orig_skb);
5472 hdev->stat.evt_rx++;