2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
176 conn->link_policy = get_unaligned_le16(sent + 2);
178 hci_dev_unlock(hdev);
181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
191 hdev->link_policy = __le16_to_cpu(rp->policy);
194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 __u8 status = *((__u8 *) skb->data);
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
209 hdev->link_policy = get_unaligned_le16(sent);
212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
214 __u8 status = *((__u8 *) skb->data);
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 clear_bit(HCI_RESET, &hdev->flags);
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
239 hdev->ssp_debug_mode = 0;
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
276 hdev->stored_num_keys = 0;
279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
281 __u8 status = *((__u8 *) skb->data);
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
297 hci_dev_unlock(hdev);
300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
316 __u8 status = *((__u8 *) skb->data);
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
328 __u8 param = *((__u8 *) sent);
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
333 clear_bit(HCI_AUTH, &hdev->flags);
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
339 hci_dev_unlock(hdev);
342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
344 __u8 status = *((__u8 *) skb->data);
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
357 param = *((__u8 *) sent);
360 set_bit(HCI_ENCRYPT, &hdev->flags);
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
367 __u8 status = *((__u8 *) skb->data);
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
377 param = *((__u8 *) sent);
382 hdev->discov_timeout = 0;
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
389 clear_bit(HCI_ISCAN, &hdev->flags);
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
394 clear_bit(HCI_PSCAN, &hdev->flags);
397 hci_dev_unlock(hdev);
400 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
402 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
404 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
409 memcpy(hdev->dev_class, rp->dev_class, 3);
411 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
412 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
415 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
417 __u8 status = *((__u8 *) skb->data);
420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
422 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
429 memcpy(hdev->dev_class, sent, 3);
431 if (hci_dev_test_flag(hdev, HCI_MGMT))
432 mgmt_set_class_of_dev_complete(hdev, sent, status);
434 hci_dev_unlock(hdev);
437 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
439 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
447 setting = __le16_to_cpu(rp->voice_setting);
449 if (hdev->voice_setting == setting)
452 hdev->voice_setting = setting;
454 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
457 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
460 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
463 __u8 status = *((__u8 *) skb->data);
467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
472 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
476 setting = get_unaligned_le16(sent);
478 if (hdev->voice_setting == setting)
481 hdev->voice_setting = setting;
483 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
486 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
489 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
492 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
494 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
499 hdev->num_iac = rp->num_iac;
501 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
504 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
506 __u8 status = *((__u8 *) skb->data);
507 struct hci_cp_write_ssp_mode *sent;
509 BT_DBG("%s status 0x%2.2x", hdev->name, status);
511 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
519 hdev->features[1][0] |= LMP_HOST_SSP;
521 hdev->features[1][0] &= ~LMP_HOST_SSP;
524 if (hci_dev_test_flag(hdev, HCI_MGMT))
525 mgmt_ssp_enable_complete(hdev, sent->mode, status);
528 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
530 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
533 hci_dev_unlock(hdev);
536 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
538 u8 status = *((u8 *) skb->data);
539 struct hci_cp_write_sc_support *sent;
541 BT_DBG("%s status 0x%2.2x", hdev->name, status);
543 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
551 hdev->features[1][0] |= LMP_HOST_SC;
553 hdev->features[1][0] &= ~LMP_HOST_SC;
556 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
558 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
560 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
563 hci_dev_unlock(hdev);
566 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
568 struct hci_rp_read_local_version *rp = (void *) skb->data;
570 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
575 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
576 hci_dev_test_flag(hdev, HCI_CONFIG)) {
577 hdev->hci_ver = rp->hci_ver;
578 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
579 hdev->lmp_ver = rp->lmp_ver;
580 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
581 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
585 static void hci_cc_read_local_commands(struct hci_dev *hdev,
588 struct hci_rp_read_local_commands *rp = (void *) skb->data;
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
596 hci_dev_test_flag(hdev, HCI_CONFIG))
597 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
600 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
603 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
604 struct hci_conn *conn;
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
615 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
617 hci_dev_unlock(hdev);
620 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
623 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
624 struct hci_conn *conn;
627 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
638 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
640 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
642 hci_dev_unlock(hdev);
645 static void hci_cc_read_local_features(struct hci_dev *hdev,
648 struct hci_rp_read_local_features *rp = (void *) skb->data;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 memcpy(hdev->features, rp->features, 8);
657 /* Adjust default settings according to features
658 * supported by device. */
660 if (hdev->features[0][0] & LMP_3SLOT)
661 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
663 if (hdev->features[0][0] & LMP_5SLOT)
664 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
666 if (hdev->features[0][1] & LMP_HV2) {
667 hdev->pkt_type |= (HCI_HV2);
668 hdev->esco_type |= (ESCO_HV2);
671 if (hdev->features[0][1] & LMP_HV3) {
672 hdev->pkt_type |= (HCI_HV3);
673 hdev->esco_type |= (ESCO_HV3);
676 if (lmp_esco_capable(hdev))
677 hdev->esco_type |= (ESCO_EV3);
679 if (hdev->features[0][4] & LMP_EV4)
680 hdev->esco_type |= (ESCO_EV4);
682 if (hdev->features[0][4] & LMP_EV5)
683 hdev->esco_type |= (ESCO_EV5);
685 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
686 hdev->esco_type |= (ESCO_2EV3);
688 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
689 hdev->esco_type |= (ESCO_3EV3);
691 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
692 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
695 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
698 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
700 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
705 if (hdev->max_page < rp->max_page)
706 hdev->max_page = rp->max_page;
708 if (rp->page < HCI_MAX_PAGES)
709 memcpy(hdev->features[rp->page], rp->features, 8);
712 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
715 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
717 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
722 hdev->flow_ctl_mode = rp->mode;
725 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
727 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
729 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
734 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
735 hdev->sco_mtu = rp->sco_mtu;
736 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
737 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
739 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
744 hdev->acl_cnt = hdev->acl_pkts;
745 hdev->sco_cnt = hdev->sco_pkts;
747 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
748 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
751 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
753 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 if (test_bit(HCI_INIT, &hdev->flags))
761 bacpy(&hdev->bdaddr, &rp->bdaddr);
763 if (hci_dev_test_flag(hdev, HCI_SETUP))
764 bacpy(&hdev->setup_addr, &rp->bdaddr);
767 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
770 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
772 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
777 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
778 hci_dev_test_flag(hdev, HCI_CONFIG)) {
779 hdev->pairing_opts = rp->pairing_opts;
780 hdev->max_enc_key_size = rp->max_key_size;
784 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
787 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794 if (test_bit(HCI_INIT, &hdev->flags)) {
795 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
796 hdev->page_scan_window = __le16_to_cpu(rp->window);
800 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
803 u8 status = *((u8 *) skb->data);
804 struct hci_cp_write_page_scan_activity *sent;
806 BT_DBG("%s status 0x%2.2x", hdev->name, status);
811 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
815 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
816 hdev->page_scan_window = __le16_to_cpu(sent->window);
819 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
822 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
829 if (test_bit(HCI_INIT, &hdev->flags))
830 hdev->page_scan_type = rp->type;
833 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
836 u8 status = *((u8 *) skb->data);
839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
844 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
846 hdev->page_scan_type = *type;
849 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
852 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
854 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
859 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
860 hdev->block_len = __le16_to_cpu(rp->block_len);
861 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
863 hdev->block_cnt = hdev->num_blocks;
865 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
866 hdev->block_cnt, hdev->block_len);
869 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
871 struct hci_rp_read_clock *rp = (void *) skb->data;
872 struct hci_cp_read_clock *cp;
873 struct hci_conn *conn;
875 BT_DBG("%s", hdev->name);
877 if (skb->len < sizeof(*rp))
885 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
889 if (cp->which == 0x00) {
890 hdev->clock = le32_to_cpu(rp->clock);
894 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
896 conn->clock = le32_to_cpu(rp->clock);
897 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
901 hci_dev_unlock(hdev);
904 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
907 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
909 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 hdev->amp_status = rp->amp_status;
915 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
916 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
917 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
918 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
919 hdev->amp_type = rp->amp_type;
920 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
921 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
922 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
923 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
926 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
929 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
936 hdev->inq_tx_power = rp->tx_power;
939 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
942 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
944 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
949 hdev->err_data_reporting = rp->err_data_reporting;
952 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
955 __u8 status = *((__u8 *)skb->data);
956 struct hci_cp_write_def_err_data_reporting *cp;
958 BT_DBG("%s status 0x%2.2x", hdev->name, status);
963 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
967 hdev->err_data_reporting = cp->err_data_reporting;
970 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
972 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
973 struct hci_cp_pin_code_reply *cp;
974 struct hci_conn *conn;
976 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
980 if (hci_dev_test_flag(hdev, HCI_MGMT))
981 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
986 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
992 conn->pin_length = cp->pin_len;
995 hci_dev_unlock(hdev);
998 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1000 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1002 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1006 if (hci_dev_test_flag(hdev, HCI_MGMT))
1007 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1010 hci_dev_unlock(hdev);
1013 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1014 struct sk_buff *skb)
1016 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1018 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1024 hdev->le_pkts = rp->le_max_pkt;
1026 hdev->le_cnt = hdev->le_pkts;
1028 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1031 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1034 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1036 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1041 memcpy(hdev->le_features, rp->features, 8);
1044 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1045 struct sk_buff *skb)
1047 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1049 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1054 hdev->adv_tx_power = rp->tx_power;
1057 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1059 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1061 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1065 if (hci_dev_test_flag(hdev, HCI_MGMT))
1066 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1069 hci_dev_unlock(hdev);
1072 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1073 struct sk_buff *skb)
1075 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1077 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1081 if (hci_dev_test_flag(hdev, HCI_MGMT))
1082 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1083 ACL_LINK, 0, rp->status);
1085 hci_dev_unlock(hdev);
1088 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1090 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1092 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1096 if (hci_dev_test_flag(hdev, HCI_MGMT))
1097 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1100 hci_dev_unlock(hdev);
1103 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1104 struct sk_buff *skb)
1106 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1108 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1112 if (hci_dev_test_flag(hdev, HCI_MGMT))
1113 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1114 ACL_LINK, 0, rp->status);
1116 hci_dev_unlock(hdev);
1119 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1120 struct sk_buff *skb)
1122 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1127 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1128 struct sk_buff *skb)
1130 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1132 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1135 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1137 __u8 status = *((__u8 *) skb->data);
1140 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1151 bacpy(&hdev->random_addr, sent);
1153 hci_dev_unlock(hdev);
1156 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1158 __u8 status = *((__u8 *) skb->data);
1159 struct hci_cp_le_set_default_phy *cp;
1161 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1172 hdev->le_tx_def_phys = cp->tx_phys;
1173 hdev->le_rx_def_phys = cp->rx_phys;
1175 hci_dev_unlock(hdev);
1178 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1179 struct sk_buff *skb)
1181 __u8 status = *((__u8 *) skb->data);
1182 struct hci_cp_le_set_adv_set_rand_addr *cp;
1183 struct adv_info *adv_instance;
1188 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1194 if (!hdev->cur_adv_instance) {
1195 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1196 bacpy(&hdev->random_addr, &cp->bdaddr);
1198 adv_instance = hci_find_adv_instance(hdev,
1199 hdev->cur_adv_instance);
1201 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1204 hci_dev_unlock(hdev);
1207 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1209 __u8 *sent, status = *((__u8 *) skb->data);
1211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1216 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1222 /* If we're doing connection initiation as peripheral. Set a
1223 * timeout in case something goes wrong.
1226 struct hci_conn *conn;
1228 hci_dev_set_flag(hdev, HCI_LE_ADV);
1230 conn = hci_lookup_le_connect(hdev);
1232 queue_delayed_work(hdev->workqueue,
1233 &conn->le_conn_timeout,
1234 conn->conn_timeout);
1236 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1239 hci_dev_unlock(hdev);
1242 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1243 struct sk_buff *skb)
1245 struct hci_cp_le_set_ext_adv_enable *cp;
1246 __u8 status = *((__u8 *) skb->data);
1248 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1253 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1260 struct hci_conn *conn;
1262 hci_dev_set_flag(hdev, HCI_LE_ADV);
1264 conn = hci_lookup_le_connect(hdev);
1266 queue_delayed_work(hdev->workqueue,
1267 &conn->le_conn_timeout,
1268 conn->conn_timeout);
1270 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1273 hci_dev_unlock(hdev);
1276 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1278 struct hci_cp_le_set_scan_param *cp;
1279 __u8 status = *((__u8 *) skb->data);
1281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1286 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1292 hdev->le_scan_type = cp->type;
1294 hci_dev_unlock(hdev);
1297 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1300 struct hci_cp_le_set_ext_scan_params *cp;
1301 __u8 status = *((__u8 *) skb->data);
1302 struct hci_cp_le_scan_phy_params *phy_param;
1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1309 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1313 phy_param = (void *)cp->data;
1317 hdev->le_scan_type = phy_param->type;
1319 hci_dev_unlock(hdev);
1322 static bool has_pending_adv_report(struct hci_dev *hdev)
1324 struct discovery_state *d = &hdev->discovery;
1326 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1329 static void clear_pending_adv_report(struct hci_dev *hdev)
1331 struct discovery_state *d = &hdev->discovery;
1333 bacpy(&d->last_adv_addr, BDADDR_ANY);
1334 d->last_adv_data_len = 0;
1337 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1338 u8 bdaddr_type, s8 rssi, u32 flags,
1341 struct discovery_state *d = &hdev->discovery;
1343 if (len > HCI_MAX_AD_LENGTH)
1346 bacpy(&d->last_adv_addr, bdaddr);
1347 d->last_adv_addr_type = bdaddr_type;
1348 d->last_adv_rssi = rssi;
1349 d->last_adv_flags = flags;
1350 memcpy(d->last_adv_data, data, len);
1351 d->last_adv_data_len = len;
1354 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1359 case LE_SCAN_ENABLE:
1360 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1361 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1362 clear_pending_adv_report(hdev);
1365 case LE_SCAN_DISABLE:
1366 /* We do this here instead of when setting DISCOVERY_STOPPED
1367 * since the latter would potentially require waiting for
1368 * inquiry to stop too.
1370 if (has_pending_adv_report(hdev)) {
1371 struct discovery_state *d = &hdev->discovery;
1373 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1374 d->last_adv_addr_type, NULL,
1375 d->last_adv_rssi, d->last_adv_flags,
1377 d->last_adv_data_len, NULL, 0);
1380 /* Cancel this timer so that we don't try to disable scanning
1381 * when it's already disabled.
1383 cancel_delayed_work(&hdev->le_scan_disable);
1385 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1387 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1388 * interrupted scanning due to a connect request. Mark
1389 * therefore discovery as stopped. If this was not
1390 * because of a connect request advertising might have
1391 * been disabled because of active scanning, so
1392 * re-enable it again if necessary.
1394 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1395 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1396 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1397 hdev->discovery.state == DISCOVERY_FINDING)
1398 hci_req_reenable_advertising(hdev);
1403 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1408 hci_dev_unlock(hdev);
1411 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1412 struct sk_buff *skb)
1414 struct hci_cp_le_set_scan_enable *cp;
1415 __u8 status = *((__u8 *) skb->data);
1417 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1422 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1426 le_set_scan_enable_complete(hdev, cp->enable);
1429 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1430 struct sk_buff *skb)
1432 struct hci_cp_le_set_ext_scan_enable *cp;
1433 __u8 status = *((__u8 *) skb->data);
1435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1440 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1444 le_set_scan_enable_complete(hdev, cp->enable);
1447 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1448 struct sk_buff *skb)
1450 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1452 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1458 hdev->le_num_of_adv_sets = rp->num_of_sets;
1461 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1462 struct sk_buff *skb)
1464 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1466 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1471 hdev->le_accept_list_size = rp->size;
1474 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1477 __u8 status = *((__u8 *) skb->data);
1479 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1484 hci_bdaddr_list_clear(&hdev->le_accept_list);
1487 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1490 struct hci_cp_le_add_to_accept_list *sent;
1491 __u8 status = *((__u8 *) skb->data);
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1498 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1502 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1506 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1507 struct sk_buff *skb)
1509 struct hci_cp_le_del_from_accept_list *sent;
1510 __u8 status = *((__u8 *) skb->data);
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1517 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1521 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1525 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1526 struct sk_buff *skb)
1528 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1530 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1535 memcpy(hdev->le_states, rp->le_states, 8);
1538 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1539 struct sk_buff *skb)
1541 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1543 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1548 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1549 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1552 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1553 struct sk_buff *skb)
1555 struct hci_cp_le_write_def_data_len *sent;
1556 __u8 status = *((__u8 *) skb->data);
1558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1563 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1567 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1568 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1571 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1572 struct sk_buff *skb)
1574 struct hci_cp_le_add_to_resolv_list *sent;
1575 __u8 status = *((__u8 *) skb->data);
1577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1586 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1587 sent->bdaddr_type, sent->peer_irk,
1591 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1592 struct sk_buff *skb)
1594 struct hci_cp_le_del_from_resolv_list *sent;
1595 __u8 status = *((__u8 *) skb->data);
1597 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1602 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1606 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1610 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1611 struct sk_buff *skb)
1613 __u8 status = *((__u8 *) skb->data);
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1623 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1624 struct sk_buff *skb)
1626 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1628 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1633 hdev->le_resolv_list_size = rp->size;
1636 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1637 struct sk_buff *skb)
1639 __u8 *sent, status = *((__u8 *) skb->data);
1641 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1646 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1653 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1655 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1657 hci_dev_unlock(hdev);
1660 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1661 struct sk_buff *skb)
1663 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1670 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1671 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1672 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1673 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1676 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1677 struct sk_buff *skb)
1679 struct hci_cp_write_le_host_supported *sent;
1680 __u8 status = *((__u8 *) skb->data);
1682 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1687 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1694 hdev->features[1][0] |= LMP_HOST_LE;
1695 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1697 hdev->features[1][0] &= ~LMP_HOST_LE;
1698 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1699 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1703 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1705 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1707 hci_dev_unlock(hdev);
1710 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1712 struct hci_cp_le_set_adv_param *cp;
1713 u8 status = *((u8 *) skb->data);
1715 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1725 hdev->adv_addr_type = cp->own_address_type;
1726 hci_dev_unlock(hdev);
1729 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1731 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1732 struct hci_cp_le_set_ext_adv_params *cp;
1733 struct adv_info *adv_instance;
1735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1740 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1745 hdev->adv_addr_type = cp->own_addr_type;
1746 if (!hdev->cur_adv_instance) {
1747 /* Store in hdev for instance 0 */
1748 hdev->adv_tx_power = rp->tx_power;
1750 adv_instance = hci_find_adv_instance(hdev,
1751 hdev->cur_adv_instance);
1753 adv_instance->tx_power = rp->tx_power;
1755 /* Update adv data as tx power is known now */
1756 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1757 hci_dev_unlock(hdev);
1760 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1762 struct hci_rp_read_rssi *rp = (void *) skb->data;
1763 struct hci_conn *conn;
1765 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1774 conn->rssi = rp->rssi;
1776 hci_dev_unlock(hdev);
1779 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1781 struct hci_cp_read_tx_power *sent;
1782 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1783 struct hci_conn *conn;
1785 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1790 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1796 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1800 switch (sent->type) {
1802 conn->tx_power = rp->tx_power;
1805 conn->max_tx_power = rp->tx_power;
1810 hci_dev_unlock(hdev);
1813 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1815 u8 status = *((u8 *) skb->data);
1818 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1823 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1825 hdev->ssp_debug_mode = *mode;
1828 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1830 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1833 hci_conn_check_pending(hdev);
1837 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
1838 set_bit(HCI_INQUIRY, &hdev->flags);
1841 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1843 struct hci_cp_create_conn *cp;
1844 struct hci_conn *conn;
1846 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1848 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1854 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1856 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1859 if (conn && conn->state == BT_CONNECT) {
1860 if (status != 0x0c || conn->attempt > 2) {
1861 conn->state = BT_CLOSED;
1862 hci_connect_cfm(conn, status);
1865 conn->state = BT_CONNECT2;
1869 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1872 bt_dev_err(hdev, "no memory for new connection");
1876 hci_dev_unlock(hdev);
1879 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1881 struct hci_cp_add_sco *cp;
1882 struct hci_conn *acl, *sco;
1885 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1890 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1894 handle = __le16_to_cpu(cp->handle);
1896 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1900 acl = hci_conn_hash_lookup_handle(hdev, handle);
1904 sco->state = BT_CLOSED;
1906 hci_connect_cfm(sco, status);
1911 hci_dev_unlock(hdev);
1914 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1916 struct hci_cp_auth_requested *cp;
1917 struct hci_conn *conn;
1919 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1924 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1930 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1932 if (conn->state == BT_CONFIG) {
1933 hci_connect_cfm(conn, status);
1934 hci_conn_drop(conn);
1938 hci_dev_unlock(hdev);
1941 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1943 struct hci_cp_set_conn_encrypt *cp;
1944 struct hci_conn *conn;
1946 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1951 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1957 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1959 if (conn->state == BT_CONFIG) {
1960 hci_connect_cfm(conn, status);
1961 hci_conn_drop(conn);
1965 hci_dev_unlock(hdev);
1968 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1969 struct hci_conn *conn)
1971 if (conn->state != BT_CONFIG || !conn->out)
1974 if (conn->pending_sec_level == BT_SECURITY_SDP)
1977 /* Only request authentication for SSP connections or non-SSP
1978 * devices with sec_level MEDIUM or HIGH or if MITM protection
1981 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1982 conn->pending_sec_level != BT_SECURITY_FIPS &&
1983 conn->pending_sec_level != BT_SECURITY_HIGH &&
1984 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1990 static int hci_resolve_name(struct hci_dev *hdev,
1991 struct inquiry_entry *e)
1993 struct hci_cp_remote_name_req cp;
1995 memset(&cp, 0, sizeof(cp));
1997 bacpy(&cp.bdaddr, &e->data.bdaddr);
1998 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1999 cp.pscan_mode = e->data.pscan_mode;
2000 cp.clock_offset = e->data.clock_offset;
2002 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2005 static bool hci_resolve_next_name(struct hci_dev *hdev)
2007 struct discovery_state *discov = &hdev->discovery;
2008 struct inquiry_entry *e;
2010 if (list_empty(&discov->resolve))
2013 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2017 if (hci_resolve_name(hdev, e) == 0) {
2018 e->name_state = NAME_PENDING;
2025 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2026 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2028 struct discovery_state *discov = &hdev->discovery;
2029 struct inquiry_entry *e;
2031 /* Update the mgmt connected state if necessary. Be careful with
2032 * conn objects that exist but are not (yet) connected however.
2033 * Only those in BT_CONFIG or BT_CONNECTED states can be
2034 * considered connected.
2037 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2038 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2039 mgmt_device_connected(hdev, conn, 0, name, name_len);
2041 if (discov->state == DISCOVERY_STOPPED)
2044 if (discov->state == DISCOVERY_STOPPING)
2045 goto discov_complete;
2047 if (discov->state != DISCOVERY_RESOLVING)
2050 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2051 /* If the device was not found in a list of found devices names of which
2052 * are pending. there is no need to continue resolving a next name as it
2053 * will be done upon receiving another Remote Name Request Complete
2060 e->name_state = NAME_KNOWN;
2061 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2062 e->data.rssi, name, name_len);
2064 e->name_state = NAME_NOT_KNOWN;
2067 if (hci_resolve_next_name(hdev))
2071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2074 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2076 struct hci_cp_remote_name_req *cp;
2077 struct hci_conn *conn;
2079 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2081 /* If successful wait for the name req complete event before
2082 * checking for the need to do authentication */
2086 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2092 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2094 if (hci_dev_test_flag(hdev, HCI_MGMT))
2095 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2100 if (!hci_outgoing_auth_needed(hdev, conn))
2103 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2104 struct hci_cp_auth_requested auth_cp;
2106 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2108 auth_cp.handle = __cpu_to_le16(conn->handle);
2109 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2110 sizeof(auth_cp), &auth_cp);
2114 hci_dev_unlock(hdev);
2117 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2119 struct hci_cp_read_remote_features *cp;
2120 struct hci_conn *conn;
2122 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2127 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2135 if (conn->state == BT_CONFIG) {
2136 hci_connect_cfm(conn, status);
2137 hci_conn_drop(conn);
2141 hci_dev_unlock(hdev);
2144 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2146 struct hci_cp_read_remote_ext_features *cp;
2147 struct hci_conn *conn;
2149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2154 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2162 if (conn->state == BT_CONFIG) {
2163 hci_connect_cfm(conn, status);
2164 hci_conn_drop(conn);
2168 hci_dev_unlock(hdev);
2171 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2173 struct hci_cp_setup_sync_conn *cp;
2174 struct hci_conn *acl, *sco;
2177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2182 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2186 handle = __le16_to_cpu(cp->handle);
2188 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2192 acl = hci_conn_hash_lookup_handle(hdev, handle);
2196 sco->state = BT_CLOSED;
2198 hci_connect_cfm(sco, status);
2203 hci_dev_unlock(hdev);
2206 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2208 struct hci_cp_sniff_mode *cp;
2209 struct hci_conn *conn;
2211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2216 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2222 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2224 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2226 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2227 hci_sco_setup(conn, status);
2230 hci_dev_unlock(hdev);
2233 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2235 struct hci_cp_exit_sniff_mode *cp;
2236 struct hci_conn *conn;
2238 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2243 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2249 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2251 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2253 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2254 hci_sco_setup(conn, status);
2257 hci_dev_unlock(hdev);
2260 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2262 struct hci_cp_disconnect *cp;
2263 struct hci_conn *conn;
2268 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2274 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2276 u8 type = conn->type;
2278 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2279 conn->dst_type, status);
2281 /* If the disconnection failed for any reason, the upper layer
2282 * does not retry to disconnect in current implementation.
2283 * Hence, we need to do some basic cleanup here and re-enable
2284 * advertising if necessary.
2287 if (type == LE_LINK)
2288 hci_req_reenable_advertising(hdev);
2291 hci_dev_unlock(hdev);
2294 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2295 u8 peer_addr_type, u8 own_address_type,
2298 struct hci_conn *conn;
2300 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2305 /* When using controller based address resolution, then the new
2306 * address types 0x02 and 0x03 are used. These types need to be
2307 * converted back into either public address or random address type
2309 if (use_ll_privacy(hdev) &&
2310 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2311 switch (own_address_type) {
2312 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2313 own_address_type = ADDR_LE_DEV_PUBLIC;
2315 case ADDR_LE_DEV_RANDOM_RESOLVED:
2316 own_address_type = ADDR_LE_DEV_RANDOM;
2321 /* Store the initiator and responder address information which
2322 * is needed for SMP. These values will not change during the
2323 * lifetime of the connection.
2325 conn->init_addr_type = own_address_type;
2326 if (own_address_type == ADDR_LE_DEV_RANDOM)
2327 bacpy(&conn->init_addr, &hdev->random_addr);
2329 bacpy(&conn->init_addr, &hdev->bdaddr);
2331 conn->resp_addr_type = peer_addr_type;
2332 bacpy(&conn->resp_addr, peer_addr);
2334 /* We don't want the connection attempt to stick around
2335 * indefinitely since LE doesn't have a page timeout concept
2336 * like BR/EDR. Set a timer for any connection that doesn't use
2337 * the accept list for connecting.
2339 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2340 queue_delayed_work(conn->hdev->workqueue,
2341 &conn->le_conn_timeout,
2342 conn->conn_timeout);
2345 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2347 struct hci_cp_le_create_conn *cp;
2349 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2351 /* All connection failure handling is taken care of by the
2352 * hci_le_conn_failed function which is triggered by the HCI
2353 * request completion callbacks used for connecting.
2358 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2364 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2365 cp->own_address_type, cp->filter_policy);
2367 hci_dev_unlock(hdev);
2370 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2372 struct hci_cp_le_ext_create_conn *cp;
2374 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2376 /* All connection failure handling is taken care of by the
2377 * hci_le_conn_failed function which is triggered by the HCI
2378 * request completion callbacks used for connecting.
2383 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2389 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2390 cp->own_addr_type, cp->filter_policy);
2392 hci_dev_unlock(hdev);
2395 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2397 struct hci_cp_le_read_remote_features *cp;
2398 struct hci_conn *conn;
2400 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2405 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2411 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2413 if (conn->state == BT_CONFIG) {
2414 hci_connect_cfm(conn, status);
2415 hci_conn_drop(conn);
2419 hci_dev_unlock(hdev);
2422 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2424 struct hci_cp_le_start_enc *cp;
2425 struct hci_conn *conn;
2427 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2434 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2442 if (conn->state != BT_CONNECTED)
2445 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2446 hci_conn_drop(conn);
2449 hci_dev_unlock(hdev);
2452 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2454 struct hci_cp_switch_role *cp;
2455 struct hci_conn *conn;
2457 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2462 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2468 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2470 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2472 hci_dev_unlock(hdev);
2475 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2477 __u8 status = *((__u8 *) skb->data);
2478 struct discovery_state *discov = &hdev->discovery;
2479 struct inquiry_entry *e;
2481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2483 hci_conn_check_pending(hdev);
2485 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2488 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2489 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2491 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2496 if (discov->state != DISCOVERY_FINDING)
2499 if (list_empty(&discov->resolve)) {
2500 /* When BR/EDR inquiry is active and no LE scanning is in
2501 * progress, then change discovery state to indicate completion.
2503 * When running LE scanning and BR/EDR inquiry simultaneously
2504 * and the LE scan already finished, then change the discovery
2505 * state to indicate completion.
2507 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2508 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2509 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2513 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2514 if (e && hci_resolve_name(hdev, e) == 0) {
2515 e->name_state = NAME_PENDING;
2516 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2518 /* When BR/EDR inquiry is active and no LE scanning is in
2519 * progress, then change discovery state to indicate completion.
2521 * When running LE scanning and BR/EDR inquiry simultaneously
2522 * and the LE scan already finished, then change the discovery
2523 * state to indicate completion.
2525 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2526 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2527 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2531 hci_dev_unlock(hdev);
2534 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2536 struct inquiry_data data;
2537 struct inquiry_info *info = (void *) (skb->data + 1);
2538 int num_rsp = *((__u8 *) skb->data);
2540 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2542 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2545 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2550 for (; num_rsp; num_rsp--, info++) {
2553 bacpy(&data.bdaddr, &info->bdaddr);
2554 data.pscan_rep_mode = info->pscan_rep_mode;
2555 data.pscan_period_mode = info->pscan_period_mode;
2556 data.pscan_mode = info->pscan_mode;
2557 memcpy(data.dev_class, info->dev_class, 3);
2558 data.clock_offset = info->clock_offset;
2559 data.rssi = HCI_RSSI_INVALID;
2560 data.ssp_mode = 0x00;
2562 flags = hci_inquiry_cache_update(hdev, &data, false);
2564 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2565 info->dev_class, HCI_RSSI_INVALID,
2566 flags, NULL, 0, NULL, 0);
2569 hci_dev_unlock(hdev);
2572 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2574 struct hci_ev_conn_complete *ev = (void *) skb->data;
2575 struct hci_conn *conn;
2577 BT_DBG("%s", hdev->name);
2581 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2583 /* Connection may not exist if auto-connected. Check the bredr
2584 * allowlist to see if this device is allowed to auto connect.
2585 * If link is an ACL type, create a connection class
2588 * Auto-connect will only occur if the event filter is
2589 * programmed with a given address. Right now, event filter is
2590 * only used during suspend.
2592 if (ev->link_type == ACL_LINK &&
2593 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2596 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2599 bt_dev_err(hdev, "no memory for new conn");
2603 if (ev->link_type != SCO_LINK)
2606 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2611 conn->type = SCO_LINK;
2616 conn->handle = __le16_to_cpu(ev->handle);
2618 if (conn->type == ACL_LINK) {
2619 conn->state = BT_CONFIG;
2620 hci_conn_hold(conn);
2622 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2623 !hci_find_link_key(hdev, &ev->bdaddr))
2624 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2626 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2628 conn->state = BT_CONNECTED;
2630 hci_debugfs_create_conn(conn);
2631 hci_conn_add_sysfs(conn);
2633 if (test_bit(HCI_AUTH, &hdev->flags))
2634 set_bit(HCI_CONN_AUTH, &conn->flags);
2636 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2637 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2639 /* "Link key request" completed ahead of "connect request" completes */
2640 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
2641 ev->link_type == ACL_LINK) {
2642 struct link_key *key;
2643 struct hci_cp_read_enc_key_size cp;
2645 key = hci_find_link_key(hdev, &ev->bdaddr);
2647 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2649 if (!(hdev->commands[20] & 0x10)) {
2650 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2652 cp.handle = cpu_to_le16(conn->handle);
2653 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
2655 bt_dev_err(hdev, "sending read key size failed");
2656 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2660 hci_encrypt_cfm(conn, ev->status);
2664 /* Get remote features */
2665 if (conn->type == ACL_LINK) {
2666 struct hci_cp_read_remote_features cp;
2667 cp.handle = ev->handle;
2668 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2671 hci_req_update_scan(hdev);
2674 /* Set packet type for incoming connection */
2675 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2676 struct hci_cp_change_conn_ptype cp;
2677 cp.handle = ev->handle;
2678 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2679 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2683 conn->state = BT_CLOSED;
2684 if (conn->type == ACL_LINK)
2685 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2686 conn->dst_type, ev->status);
2689 if (conn->type == ACL_LINK)
2690 hci_sco_setup(conn, ev->status);
2693 hci_connect_cfm(conn, ev->status);
2695 } else if (ev->link_type == SCO_LINK) {
2696 switch (conn->setting & SCO_AIRMODE_MASK) {
2697 case SCO_AIRMODE_CVSD:
2699 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2703 hci_connect_cfm(conn, ev->status);
2707 hci_dev_unlock(hdev);
2709 hci_conn_check_pending(hdev);
2712 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2714 struct hci_cp_reject_conn_req cp;
2716 bacpy(&cp.bdaddr, bdaddr);
2717 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2718 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2721 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2723 struct hci_ev_conn_request *ev = (void *) skb->data;
2724 int mask = hdev->link_mode;
2725 struct inquiry_entry *ie;
2726 struct hci_conn *conn;
2729 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2732 /* Reject incoming connection from device with same BD ADDR against
2735 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2736 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2738 hci_reject_conn(hdev, &ev->bdaddr);
2742 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2745 if (!(mask & HCI_LM_ACCEPT)) {
2746 hci_reject_conn(hdev, &ev->bdaddr);
2752 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2754 hci_reject_conn(hdev, &ev->bdaddr);
2758 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2759 * connection. These features are only touched through mgmt so
2760 * only do the checks if HCI_MGMT is set.
2762 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2763 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2764 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2766 hci_reject_conn(hdev, &ev->bdaddr);
2770 /* Connection accepted */
2772 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2774 memcpy(ie->data.dev_class, ev->dev_class, 3);
2776 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2779 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2782 bt_dev_err(hdev, "no memory for new connection");
2787 memcpy(conn->dev_class, ev->dev_class, 3);
2789 hci_dev_unlock(hdev);
2791 if (ev->link_type == ACL_LINK ||
2792 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2793 struct hci_cp_accept_conn_req cp;
2794 conn->state = BT_CONNECT;
2796 bacpy(&cp.bdaddr, &ev->bdaddr);
2798 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2799 cp.role = 0x00; /* Become central */
2801 cp.role = 0x01; /* Remain peripheral */
2803 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2804 } else if (!(flags & HCI_PROTO_DEFER)) {
2805 struct hci_cp_accept_sync_conn_req cp;
2806 conn->state = BT_CONNECT;
2808 bacpy(&cp.bdaddr, &ev->bdaddr);
2809 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2811 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2812 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2813 cp.max_latency = cpu_to_le16(0xffff);
2814 cp.content_format = cpu_to_le16(hdev->voice_setting);
2815 cp.retrans_effort = 0xff;
2817 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2820 conn->state = BT_CONNECT2;
2821 hci_connect_cfm(conn, 0);
2826 hci_dev_unlock(hdev);
2829 static u8 hci_to_mgmt_reason(u8 err)
2832 case HCI_ERROR_CONNECTION_TIMEOUT:
2833 return MGMT_DEV_DISCONN_TIMEOUT;
2834 case HCI_ERROR_REMOTE_USER_TERM:
2835 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2836 case HCI_ERROR_REMOTE_POWER_OFF:
2837 return MGMT_DEV_DISCONN_REMOTE;
2838 case HCI_ERROR_LOCAL_HOST_TERM:
2839 return MGMT_DEV_DISCONN_LOCAL_HOST;
2841 return MGMT_DEV_DISCONN_UNKNOWN;
2845 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2847 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2849 struct hci_conn_params *params;
2850 struct hci_conn *conn;
2851 bool mgmt_connected;
2854 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2858 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2863 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2864 conn->dst_type, ev->status);
2868 conn->state = BT_CLOSED;
2870 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2872 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2873 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2875 reason = hci_to_mgmt_reason(ev->reason);
2877 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2878 reason, mgmt_connected);
2880 if (conn->type == ACL_LINK) {
2881 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2882 hci_remove_link_key(hdev, &conn->dst);
2884 hci_req_update_scan(hdev);
2887 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2889 switch (params->auto_connect) {
2890 case HCI_AUTO_CONN_LINK_LOSS:
2891 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2895 case HCI_AUTO_CONN_DIRECT:
2896 case HCI_AUTO_CONN_ALWAYS:
2897 list_del_init(¶ms->action);
2898 list_add(¶ms->action, &hdev->pend_le_conns);
2899 hci_update_background_scan(hdev);
2909 hci_disconn_cfm(conn, ev->reason);
2912 /* The suspend notifier is waiting for all devices to disconnect so
2913 * clear the bit from pending tasks and inform the wait queue.
2915 if (list_empty(&hdev->conn_hash.list) &&
2916 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2917 wake_up(&hdev->suspend_wait_q);
2920 /* Re-enable advertising if necessary, since it might
2921 * have been disabled by the connection. From the
2922 * HCI_LE_Set_Advertise_Enable command description in
2923 * the core specification (v4.0):
2924 * "The Controller shall continue advertising until the Host
2925 * issues an LE_Set_Advertise_Enable command with
2926 * Advertising_Enable set to 0x00 (Advertising is disabled)
2927 * or until a connection is created or until the Advertising
2928 * is timed out due to Directed Advertising."
2930 if (type == LE_LINK)
2931 hci_req_reenable_advertising(hdev);
2934 hci_dev_unlock(hdev);
2937 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2939 struct hci_ev_auth_complete *ev = (void *) skb->data;
2940 struct hci_conn *conn;
2942 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2946 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2951 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2952 set_bit(HCI_CONN_AUTH, &conn->flags);
2953 conn->sec_level = conn->pending_sec_level;
2955 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2956 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2958 mgmt_auth_failed(conn, ev->status);
2961 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2963 if (conn->state == BT_CONFIG) {
2964 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2965 struct hci_cp_set_conn_encrypt cp;
2966 cp.handle = ev->handle;
2968 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2971 conn->state = BT_CONNECTED;
2972 hci_connect_cfm(conn, ev->status);
2973 hci_conn_drop(conn);
2976 hci_auth_cfm(conn, ev->status);
2978 hci_conn_hold(conn);
2979 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2980 hci_conn_drop(conn);
2983 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2985 struct hci_cp_set_conn_encrypt cp;
2986 cp.handle = ev->handle;
2988 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2991 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2992 hci_encrypt_cfm(conn, ev->status);
2997 hci_dev_unlock(hdev);
3000 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3002 struct hci_ev_remote_name *ev = (void *) skb->data;
3003 struct hci_conn *conn;
3005 BT_DBG("%s", hdev->name);
3009 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3011 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3014 if (ev->status == 0)
3015 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3016 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3018 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3024 if (!hci_outgoing_auth_needed(hdev, conn))
3027 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3028 struct hci_cp_auth_requested cp;
3030 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3032 cp.handle = __cpu_to_le16(conn->handle);
3033 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3037 hci_dev_unlock(hdev);
3040 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3041 u16 opcode, struct sk_buff *skb)
3043 const struct hci_rp_read_enc_key_size *rp;
3044 struct hci_conn *conn;
3047 BT_DBG("%s status 0x%02x", hdev->name, status);
3049 if (!skb || skb->len < sizeof(*rp)) {
3050 bt_dev_err(hdev, "invalid read key size response");
3054 rp = (void *)skb->data;
3055 handle = le16_to_cpu(rp->handle);
3059 conn = hci_conn_hash_lookup_handle(hdev, handle);
3063 /* While unexpected, the read_enc_key_size command may fail. The most
3064 * secure approach is to then assume the key size is 0 to force a
3068 bt_dev_err(hdev, "failed to read key size for handle %u",
3070 conn->enc_key_size = 0;
3072 conn->enc_key_size = rp->key_size;
3075 hci_encrypt_cfm(conn, 0);
3078 hci_dev_unlock(hdev);
3081 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3083 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3084 struct hci_conn *conn;
3086 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3090 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3096 /* Encryption implies authentication */
3097 set_bit(HCI_CONN_AUTH, &conn->flags);
3098 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3099 conn->sec_level = conn->pending_sec_level;
3101 /* P-256 authentication key implies FIPS */
3102 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3103 set_bit(HCI_CONN_FIPS, &conn->flags);
3105 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3106 conn->type == LE_LINK)
3107 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3109 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3110 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3114 /* We should disregard the current RPA and generate a new one
3115 * whenever the encryption procedure fails.
3117 if (ev->status && conn->type == LE_LINK) {
3118 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3119 hci_adv_instances_set_rpa_expired(hdev, true);
3122 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3124 /* Check link security requirements are met */
3125 if (!hci_conn_check_link_mode(conn))
3126 ev->status = HCI_ERROR_AUTH_FAILURE;
3128 if (ev->status && conn->state == BT_CONNECTED) {
3129 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3130 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3132 /* Notify upper layers so they can cleanup before
3135 hci_encrypt_cfm(conn, ev->status);
3136 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3137 hci_conn_drop(conn);
3141 /* Try reading the encryption key size for encrypted ACL links */
3142 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3143 struct hci_cp_read_enc_key_size cp;
3144 struct hci_request req;
3146 /* Only send HCI_Read_Encryption_Key_Size if the
3147 * controller really supports it. If it doesn't, assume
3148 * the default size (16).
3150 if (!(hdev->commands[20] & 0x10)) {
3151 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3155 hci_req_init(&req, hdev);
3157 cp.handle = cpu_to_le16(conn->handle);
3158 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3160 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3161 bt_dev_err(hdev, "sending read key size failed");
3162 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3169 /* Set the default Authenticated Payload Timeout after
3170 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3171 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3172 * sent when the link is active and Encryption is enabled, the conn
3173 * type can be either LE or ACL and controller must support LMP Ping.
3174 * Ensure for AES-CCM encryption as well.
3176 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3177 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3178 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3179 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3180 struct hci_cp_write_auth_payload_to cp;
3182 cp.handle = cpu_to_le16(conn->handle);
3183 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3184 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3189 hci_encrypt_cfm(conn, ev->status);
3192 hci_dev_unlock(hdev);
3195 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3196 struct sk_buff *skb)
3198 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3199 struct hci_conn *conn;
3201 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3205 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3208 set_bit(HCI_CONN_SECURE, &conn->flags);
3210 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3212 hci_key_change_cfm(conn, ev->status);
3215 hci_dev_unlock(hdev);
3218 static void hci_remote_features_evt(struct hci_dev *hdev,
3219 struct sk_buff *skb)
3221 struct hci_ev_remote_features *ev = (void *) skb->data;
3222 struct hci_conn *conn;
3224 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3233 memcpy(conn->features[0], ev->features, 8);
3235 if (conn->state != BT_CONFIG)
3238 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3239 lmp_ext_feat_capable(conn)) {
3240 struct hci_cp_read_remote_ext_features cp;
3241 cp.handle = ev->handle;
3243 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3248 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3249 struct hci_cp_remote_name_req cp;
3250 memset(&cp, 0, sizeof(cp));
3251 bacpy(&cp.bdaddr, &conn->dst);
3252 cp.pscan_rep_mode = 0x02;
3253 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3254 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3255 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3257 if (!hci_outgoing_auth_needed(hdev, conn)) {
3258 conn->state = BT_CONNECTED;
3259 hci_connect_cfm(conn, ev->status);
3260 hci_conn_drop(conn);
3264 hci_dev_unlock(hdev);
3267 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3268 u16 *opcode, u8 *status,
3269 hci_req_complete_t *req_complete,
3270 hci_req_complete_skb_t *req_complete_skb)
3272 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3274 *opcode = __le16_to_cpu(ev->opcode);
3275 *status = skb->data[sizeof(*ev)];
3277 skb_pull(skb, sizeof(*ev));
3280 case HCI_OP_INQUIRY_CANCEL:
3281 hci_cc_inquiry_cancel(hdev, skb, status);
3284 case HCI_OP_PERIODIC_INQ:
3285 hci_cc_periodic_inq(hdev, skb);
3288 case HCI_OP_EXIT_PERIODIC_INQ:
3289 hci_cc_exit_periodic_inq(hdev, skb);
3292 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3293 hci_cc_remote_name_req_cancel(hdev, skb);
3296 case HCI_OP_ROLE_DISCOVERY:
3297 hci_cc_role_discovery(hdev, skb);
3300 case HCI_OP_READ_LINK_POLICY:
3301 hci_cc_read_link_policy(hdev, skb);
3304 case HCI_OP_WRITE_LINK_POLICY:
3305 hci_cc_write_link_policy(hdev, skb);
3308 case HCI_OP_READ_DEF_LINK_POLICY:
3309 hci_cc_read_def_link_policy(hdev, skb);
3312 case HCI_OP_WRITE_DEF_LINK_POLICY:
3313 hci_cc_write_def_link_policy(hdev, skb);
3317 hci_cc_reset(hdev, skb);
3320 case HCI_OP_READ_STORED_LINK_KEY:
3321 hci_cc_read_stored_link_key(hdev, skb);
3324 case HCI_OP_DELETE_STORED_LINK_KEY:
3325 hci_cc_delete_stored_link_key(hdev, skb);
3328 case HCI_OP_WRITE_LOCAL_NAME:
3329 hci_cc_write_local_name(hdev, skb);
3332 case HCI_OP_READ_LOCAL_NAME:
3333 hci_cc_read_local_name(hdev, skb);
3336 case HCI_OP_WRITE_AUTH_ENABLE:
3337 hci_cc_write_auth_enable(hdev, skb);
3340 case HCI_OP_WRITE_ENCRYPT_MODE:
3341 hci_cc_write_encrypt_mode(hdev, skb);
3344 case HCI_OP_WRITE_SCAN_ENABLE:
3345 hci_cc_write_scan_enable(hdev, skb);
3348 case HCI_OP_READ_CLASS_OF_DEV:
3349 hci_cc_read_class_of_dev(hdev, skb);
3352 case HCI_OP_WRITE_CLASS_OF_DEV:
3353 hci_cc_write_class_of_dev(hdev, skb);
3356 case HCI_OP_READ_VOICE_SETTING:
3357 hci_cc_read_voice_setting(hdev, skb);
3360 case HCI_OP_WRITE_VOICE_SETTING:
3361 hci_cc_write_voice_setting(hdev, skb);
3364 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3365 hci_cc_read_num_supported_iac(hdev, skb);
3368 case HCI_OP_WRITE_SSP_MODE:
3369 hci_cc_write_ssp_mode(hdev, skb);
3372 case HCI_OP_WRITE_SC_SUPPORT:
3373 hci_cc_write_sc_support(hdev, skb);
3376 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3377 hci_cc_read_auth_payload_timeout(hdev, skb);
3380 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3381 hci_cc_write_auth_payload_timeout(hdev, skb);
3384 case HCI_OP_READ_LOCAL_VERSION:
3385 hci_cc_read_local_version(hdev, skb);
3388 case HCI_OP_READ_LOCAL_COMMANDS:
3389 hci_cc_read_local_commands(hdev, skb);
3392 case HCI_OP_READ_LOCAL_FEATURES:
3393 hci_cc_read_local_features(hdev, skb);
3396 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3397 hci_cc_read_local_ext_features(hdev, skb);
3400 case HCI_OP_READ_BUFFER_SIZE:
3401 hci_cc_read_buffer_size(hdev, skb);
3404 case HCI_OP_READ_BD_ADDR:
3405 hci_cc_read_bd_addr(hdev, skb);
3408 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3409 hci_cc_read_local_pairing_opts(hdev, skb);
3412 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3413 hci_cc_read_page_scan_activity(hdev, skb);
3416 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3417 hci_cc_write_page_scan_activity(hdev, skb);
3420 case HCI_OP_READ_PAGE_SCAN_TYPE:
3421 hci_cc_read_page_scan_type(hdev, skb);
3424 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3425 hci_cc_write_page_scan_type(hdev, skb);
3428 case HCI_OP_READ_DATA_BLOCK_SIZE:
3429 hci_cc_read_data_block_size(hdev, skb);
3432 case HCI_OP_READ_FLOW_CONTROL_MODE:
3433 hci_cc_read_flow_control_mode(hdev, skb);
3436 case HCI_OP_READ_LOCAL_AMP_INFO:
3437 hci_cc_read_local_amp_info(hdev, skb);
3440 case HCI_OP_READ_CLOCK:
3441 hci_cc_read_clock(hdev, skb);
3444 case HCI_OP_READ_INQ_RSP_TX_POWER:
3445 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3448 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3449 hci_cc_read_def_err_data_reporting(hdev, skb);
3452 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3453 hci_cc_write_def_err_data_reporting(hdev, skb);
3456 case HCI_OP_PIN_CODE_REPLY:
3457 hci_cc_pin_code_reply(hdev, skb);
3460 case HCI_OP_PIN_CODE_NEG_REPLY:
3461 hci_cc_pin_code_neg_reply(hdev, skb);
3464 case HCI_OP_READ_LOCAL_OOB_DATA:
3465 hci_cc_read_local_oob_data(hdev, skb);
3468 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3469 hci_cc_read_local_oob_ext_data(hdev, skb);
3472 case HCI_OP_LE_READ_BUFFER_SIZE:
3473 hci_cc_le_read_buffer_size(hdev, skb);
3476 case HCI_OP_LE_READ_LOCAL_FEATURES:
3477 hci_cc_le_read_local_features(hdev, skb);
3480 case HCI_OP_LE_READ_ADV_TX_POWER:
3481 hci_cc_le_read_adv_tx_power(hdev, skb);
3484 case HCI_OP_USER_CONFIRM_REPLY:
3485 hci_cc_user_confirm_reply(hdev, skb);
3488 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3489 hci_cc_user_confirm_neg_reply(hdev, skb);
3492 case HCI_OP_USER_PASSKEY_REPLY:
3493 hci_cc_user_passkey_reply(hdev, skb);
3496 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3497 hci_cc_user_passkey_neg_reply(hdev, skb);
3500 case HCI_OP_LE_SET_RANDOM_ADDR:
3501 hci_cc_le_set_random_addr(hdev, skb);
3504 case HCI_OP_LE_SET_ADV_ENABLE:
3505 hci_cc_le_set_adv_enable(hdev, skb);
3508 case HCI_OP_LE_SET_SCAN_PARAM:
3509 hci_cc_le_set_scan_param(hdev, skb);
3512 case HCI_OP_LE_SET_SCAN_ENABLE:
3513 hci_cc_le_set_scan_enable(hdev, skb);
3516 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3517 hci_cc_le_read_accept_list_size(hdev, skb);
3520 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3521 hci_cc_le_clear_accept_list(hdev, skb);
3524 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3525 hci_cc_le_add_to_accept_list(hdev, skb);
3528 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3529 hci_cc_le_del_from_accept_list(hdev, skb);
3532 case HCI_OP_LE_READ_SUPPORTED_STATES:
3533 hci_cc_le_read_supported_states(hdev, skb);
3536 case HCI_OP_LE_READ_DEF_DATA_LEN:
3537 hci_cc_le_read_def_data_len(hdev, skb);
3540 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3541 hci_cc_le_write_def_data_len(hdev, skb);
3544 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3545 hci_cc_le_add_to_resolv_list(hdev, skb);
3548 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3549 hci_cc_le_del_from_resolv_list(hdev, skb);
3552 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3553 hci_cc_le_clear_resolv_list(hdev, skb);
3556 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3557 hci_cc_le_read_resolv_list_size(hdev, skb);
3560 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3561 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3564 case HCI_OP_LE_READ_MAX_DATA_LEN:
3565 hci_cc_le_read_max_data_len(hdev, skb);
3568 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3569 hci_cc_write_le_host_supported(hdev, skb);
3572 case HCI_OP_LE_SET_ADV_PARAM:
3573 hci_cc_set_adv_param(hdev, skb);
3576 case HCI_OP_READ_RSSI:
3577 hci_cc_read_rssi(hdev, skb);
3580 case HCI_OP_READ_TX_POWER:
3581 hci_cc_read_tx_power(hdev, skb);
3584 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3585 hci_cc_write_ssp_debug_mode(hdev, skb);
3588 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3589 hci_cc_le_set_ext_scan_param(hdev, skb);
3592 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3593 hci_cc_le_set_ext_scan_enable(hdev, skb);
3596 case HCI_OP_LE_SET_DEFAULT_PHY:
3597 hci_cc_le_set_default_phy(hdev, skb);
3600 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3601 hci_cc_le_read_num_adv_sets(hdev, skb);
3604 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3605 hci_cc_set_ext_adv_param(hdev, skb);
3608 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3609 hci_cc_le_set_ext_adv_enable(hdev, skb);
3612 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3613 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3617 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3621 if (*opcode != HCI_OP_NOP)
3622 cancel_delayed_work(&hdev->cmd_timer);
3624 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3625 atomic_set(&hdev->cmd_cnt, 1);
3627 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3630 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3632 "unexpected event for opcode 0x%4.4x", *opcode);
3636 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3637 queue_work(hdev->workqueue, &hdev->cmd_work);
3640 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3641 u16 *opcode, u8 *status,
3642 hci_req_complete_t *req_complete,
3643 hci_req_complete_skb_t *req_complete_skb)
3645 struct hci_ev_cmd_status *ev = (void *) skb->data;
3647 skb_pull(skb, sizeof(*ev));
3649 *opcode = __le16_to_cpu(ev->opcode);
3650 *status = ev->status;
3653 case HCI_OP_INQUIRY:
3654 hci_cs_inquiry(hdev, ev->status);
3657 case HCI_OP_CREATE_CONN:
3658 hci_cs_create_conn(hdev, ev->status);
3661 case HCI_OP_DISCONNECT:
3662 hci_cs_disconnect(hdev, ev->status);
3665 case HCI_OP_ADD_SCO:
3666 hci_cs_add_sco(hdev, ev->status);
3669 case HCI_OP_AUTH_REQUESTED:
3670 hci_cs_auth_requested(hdev, ev->status);
3673 case HCI_OP_SET_CONN_ENCRYPT:
3674 hci_cs_set_conn_encrypt(hdev, ev->status);
3677 case HCI_OP_REMOTE_NAME_REQ:
3678 hci_cs_remote_name_req(hdev, ev->status);
3681 case HCI_OP_READ_REMOTE_FEATURES:
3682 hci_cs_read_remote_features(hdev, ev->status);
3685 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3686 hci_cs_read_remote_ext_features(hdev, ev->status);
3689 case HCI_OP_SETUP_SYNC_CONN:
3690 hci_cs_setup_sync_conn(hdev, ev->status);
3693 case HCI_OP_SNIFF_MODE:
3694 hci_cs_sniff_mode(hdev, ev->status);
3697 case HCI_OP_EXIT_SNIFF_MODE:
3698 hci_cs_exit_sniff_mode(hdev, ev->status);
3701 case HCI_OP_SWITCH_ROLE:
3702 hci_cs_switch_role(hdev, ev->status);
3705 case HCI_OP_LE_CREATE_CONN:
3706 hci_cs_le_create_conn(hdev, ev->status);
3709 case HCI_OP_LE_READ_REMOTE_FEATURES:
3710 hci_cs_le_read_remote_features(hdev, ev->status);
3713 case HCI_OP_LE_START_ENC:
3714 hci_cs_le_start_enc(hdev, ev->status);
3717 case HCI_OP_LE_EXT_CREATE_CONN:
3718 hci_cs_le_ext_create_conn(hdev, ev->status);
3722 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3726 if (*opcode != HCI_OP_NOP)
3727 cancel_delayed_work(&hdev->cmd_timer);
3729 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3730 atomic_set(&hdev->cmd_cnt, 1);
3732 /* Indicate request completion if the command failed. Also, if
3733 * we're not waiting for a special event and we get a success
3734 * command status we should try to flag the request as completed
3735 * (since for this kind of commands there will not be a command
3739 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3740 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3743 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3745 "unexpected event for opcode 0x%4.4x", *opcode);
3749 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3750 queue_work(hdev->workqueue, &hdev->cmd_work);
3753 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3755 struct hci_ev_hardware_error *ev = (void *) skb->data;
3757 hdev->hw_error_code = ev->code;
3759 queue_work(hdev->req_workqueue, &hdev->error_reset);
3762 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3764 struct hci_ev_role_change *ev = (void *) skb->data;
3765 struct hci_conn *conn;
3767 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3771 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3774 conn->role = ev->role;
3776 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3778 hci_role_switch_cfm(conn, ev->status, ev->role);
3781 hci_dev_unlock(hdev);
3784 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3786 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3789 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3790 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3794 if (skb->len < sizeof(*ev) ||
3795 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3796 BT_DBG("%s bad parameters", hdev->name);
3800 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3802 for (i = 0; i < ev->num_hndl; i++) {
3803 struct hci_comp_pkts_info *info = &ev->handles[i];
3804 struct hci_conn *conn;
3805 __u16 handle, count;
3807 handle = __le16_to_cpu(info->handle);
3808 count = __le16_to_cpu(info->count);
3810 conn = hci_conn_hash_lookup_handle(hdev, handle);
3814 conn->sent -= count;
3816 switch (conn->type) {
3818 hdev->acl_cnt += count;
3819 if (hdev->acl_cnt > hdev->acl_pkts)
3820 hdev->acl_cnt = hdev->acl_pkts;
3824 if (hdev->le_pkts) {
3825 hdev->le_cnt += count;
3826 if (hdev->le_cnt > hdev->le_pkts)
3827 hdev->le_cnt = hdev->le_pkts;
3829 hdev->acl_cnt += count;
3830 if (hdev->acl_cnt > hdev->acl_pkts)
3831 hdev->acl_cnt = hdev->acl_pkts;
3836 hdev->sco_cnt += count;
3837 if (hdev->sco_cnt > hdev->sco_pkts)
3838 hdev->sco_cnt = hdev->sco_pkts;
3842 bt_dev_err(hdev, "unknown type %d conn %p",
3848 queue_work(hdev->workqueue, &hdev->tx_work);
3851 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3854 struct hci_chan *chan;
3856 switch (hdev->dev_type) {
3858 return hci_conn_hash_lookup_handle(hdev, handle);
3860 chan = hci_chan_lookup_handle(hdev, handle);
3865 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3872 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3874 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3877 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3878 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3882 if (skb->len < sizeof(*ev) ||
3883 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3884 BT_DBG("%s bad parameters", hdev->name);
3888 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3891 for (i = 0; i < ev->num_hndl; i++) {
3892 struct hci_comp_blocks_info *info = &ev->handles[i];
3893 struct hci_conn *conn = NULL;
3894 __u16 handle, block_count;
3896 handle = __le16_to_cpu(info->handle);
3897 block_count = __le16_to_cpu(info->blocks);
3899 conn = __hci_conn_lookup_handle(hdev, handle);
3903 conn->sent -= block_count;
3905 switch (conn->type) {
3908 hdev->block_cnt += block_count;
3909 if (hdev->block_cnt > hdev->num_blocks)
3910 hdev->block_cnt = hdev->num_blocks;
3914 bt_dev_err(hdev, "unknown type %d conn %p",
3920 queue_work(hdev->workqueue, &hdev->tx_work);
3923 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3925 struct hci_ev_mode_change *ev = (void *) skb->data;
3926 struct hci_conn *conn;
3928 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3934 conn->mode = ev->mode;
3936 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3938 if (conn->mode == HCI_CM_ACTIVE)
3939 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3941 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3944 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3945 hci_sco_setup(conn, ev->status);
3948 hci_dev_unlock(hdev);
3951 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3953 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3954 struct hci_conn *conn;
3956 BT_DBG("%s", hdev->name);
3960 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3964 if (conn->state == BT_CONNECTED) {
3965 hci_conn_hold(conn);
3966 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3967 hci_conn_drop(conn);
3970 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3971 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3972 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3973 sizeof(ev->bdaddr), &ev->bdaddr);
3974 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3977 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3982 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3986 hci_dev_unlock(hdev);
3989 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3991 if (key_type == HCI_LK_CHANGED_COMBINATION)
3994 conn->pin_length = pin_len;
3995 conn->key_type = key_type;
3998 case HCI_LK_LOCAL_UNIT:
3999 case HCI_LK_REMOTE_UNIT:
4000 case HCI_LK_DEBUG_COMBINATION:
4002 case HCI_LK_COMBINATION:
4004 conn->pending_sec_level = BT_SECURITY_HIGH;
4006 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4008 case HCI_LK_UNAUTH_COMBINATION_P192:
4009 case HCI_LK_UNAUTH_COMBINATION_P256:
4010 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4012 case HCI_LK_AUTH_COMBINATION_P192:
4013 conn->pending_sec_level = BT_SECURITY_HIGH;
4015 case HCI_LK_AUTH_COMBINATION_P256:
4016 conn->pending_sec_level = BT_SECURITY_FIPS;
4021 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4023 struct hci_ev_link_key_req *ev = (void *) skb->data;
4024 struct hci_cp_link_key_reply cp;
4025 struct hci_conn *conn;
4026 struct link_key *key;
4028 BT_DBG("%s", hdev->name);
4030 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4035 key = hci_find_link_key(hdev, &ev->bdaddr);
4037 BT_DBG("%s link key not found for %pMR", hdev->name,
4042 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4045 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4047 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4049 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4050 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4051 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4052 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4056 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4057 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4058 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4059 BT_DBG("%s ignoring key unauthenticated for high security",
4064 conn_set_key(conn, key->type, key->pin_len);
4067 bacpy(&cp.bdaddr, &ev->bdaddr);
4068 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4070 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4072 hci_dev_unlock(hdev);
4077 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4078 hci_dev_unlock(hdev);
4081 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4083 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4084 struct hci_conn *conn;
4085 struct link_key *key;
4089 BT_DBG("%s", hdev->name);
4093 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4097 /* Ignore NULL link key against CVE-2020-26555 */
4098 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4099 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4101 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4102 hci_conn_drop(conn);
4106 hci_conn_hold(conn);
4107 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4108 hci_conn_drop(conn);
4110 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4111 conn_set_key(conn, ev->key_type, conn->pin_length);
4113 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4116 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4117 ev->key_type, pin_len, &persistent);
4121 /* Update connection information since adding the key will have
4122 * fixed up the type in the case of changed combination keys.
4124 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4125 conn_set_key(conn, key->type, key->pin_len);
4127 mgmt_new_link_key(hdev, key, persistent);
4129 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4130 * is set. If it's not set simply remove the key from the kernel
4131 * list (we've still notified user space about it but with
4132 * store_hint being 0).
4134 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4135 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4136 list_del_rcu(&key->list);
4137 kfree_rcu(key, rcu);
4142 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4144 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4147 hci_dev_unlock(hdev);
4150 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4152 struct hci_ev_clock_offset *ev = (void *) skb->data;
4153 struct hci_conn *conn;
4155 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4159 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4160 if (conn && !ev->status) {
4161 struct inquiry_entry *ie;
4163 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4165 ie->data.clock_offset = ev->clock_offset;
4166 ie->timestamp = jiffies;
4170 hci_dev_unlock(hdev);
4173 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4175 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4176 struct hci_conn *conn;
4178 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4182 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4183 if (conn && !ev->status)
4184 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4186 hci_dev_unlock(hdev);
4189 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4191 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4192 struct inquiry_entry *ie;
4194 BT_DBG("%s", hdev->name);
4198 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4200 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4201 ie->timestamp = jiffies;
4204 hci_dev_unlock(hdev);
4207 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4208 struct sk_buff *skb)
4210 struct inquiry_data data;
4211 int num_rsp = *((__u8 *) skb->data);
4213 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4218 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4223 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4224 struct inquiry_info_with_rssi_and_pscan_mode *info;
4225 info = (void *) (skb->data + 1);
4227 if (skb->len < num_rsp * sizeof(*info) + 1)
4230 for (; num_rsp; num_rsp--, info++) {
4233 bacpy(&data.bdaddr, &info->bdaddr);
4234 data.pscan_rep_mode = info->pscan_rep_mode;
4235 data.pscan_period_mode = info->pscan_period_mode;
4236 data.pscan_mode = info->pscan_mode;
4237 memcpy(data.dev_class, info->dev_class, 3);
4238 data.clock_offset = info->clock_offset;
4239 data.rssi = info->rssi;
4240 data.ssp_mode = 0x00;
4242 flags = hci_inquiry_cache_update(hdev, &data, false);
4244 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4245 info->dev_class, info->rssi,
4246 flags, NULL, 0, NULL, 0);
4249 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4251 if (skb->len < num_rsp * sizeof(*info) + 1)
4254 for (; num_rsp; num_rsp--, info++) {
4257 bacpy(&data.bdaddr, &info->bdaddr);
4258 data.pscan_rep_mode = info->pscan_rep_mode;
4259 data.pscan_period_mode = info->pscan_period_mode;
4260 data.pscan_mode = 0x00;
4261 memcpy(data.dev_class, info->dev_class, 3);
4262 data.clock_offset = info->clock_offset;
4263 data.rssi = info->rssi;
4264 data.ssp_mode = 0x00;
4266 flags = hci_inquiry_cache_update(hdev, &data, false);
4268 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4269 info->dev_class, info->rssi,
4270 flags, NULL, 0, NULL, 0);
4275 hci_dev_unlock(hdev);
4278 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4279 struct sk_buff *skb)
4281 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4282 struct hci_conn *conn;
4284 BT_DBG("%s", hdev->name);
4288 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4292 if (ev->page < HCI_MAX_PAGES)
4293 memcpy(conn->features[ev->page], ev->features, 8);
4295 if (!ev->status && ev->page == 0x01) {
4296 struct inquiry_entry *ie;
4298 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4300 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4302 if (ev->features[0] & LMP_HOST_SSP) {
4303 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4305 /* It is mandatory by the Bluetooth specification that
4306 * Extended Inquiry Results are only used when Secure
4307 * Simple Pairing is enabled, but some devices violate
4310 * To make these devices work, the internal SSP
4311 * enabled flag needs to be cleared if the remote host
4312 * features do not indicate SSP support */
4313 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4316 if (ev->features[0] & LMP_HOST_SC)
4317 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4320 if (conn->state != BT_CONFIG)
4323 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4324 struct hci_cp_remote_name_req cp;
4325 memset(&cp, 0, sizeof(cp));
4326 bacpy(&cp.bdaddr, &conn->dst);
4327 cp.pscan_rep_mode = 0x02;
4328 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4329 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4330 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4332 if (!hci_outgoing_auth_needed(hdev, conn)) {
4333 conn->state = BT_CONNECTED;
4334 hci_connect_cfm(conn, ev->status);
4335 hci_conn_drop(conn);
4339 hci_dev_unlock(hdev);
4342 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4343 struct sk_buff *skb)
4345 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4346 struct hci_conn *conn;
4348 switch (ev->link_type) {
4353 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4354 * for HCI_Synchronous_Connection_Complete is limited to
4355 * either SCO or eSCO
4357 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4361 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4365 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4367 if (ev->link_type == ESCO_LINK)
4370 /* When the link type in the event indicates SCO connection
4371 * and lookup of the connection object fails, then check
4372 * if an eSCO connection object exists.
4374 * The core limits the synchronous connections to either
4375 * SCO or eSCO. The eSCO connection is preferred and tried
4376 * to be setup first and until successfully established,
4377 * the link type will be hinted as eSCO.
4379 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4384 switch (ev->status) {
4386 /* The synchronous connection complete event should only be
4387 * sent once per new connection. Receiving a successful
4388 * complete event when the connection status is already
4389 * BT_CONNECTED means that the device is misbehaving and sent
4390 * multiple complete event packets for the same new connection.
4392 * Registering the device more than once can corrupt kernel
4393 * memory, hence upon detecting this invalid event, we report
4394 * an error and ignore the packet.
4396 if (conn->state == BT_CONNECTED) {
4397 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4401 conn->handle = __le16_to_cpu(ev->handle);
4402 conn->state = BT_CONNECTED;
4403 conn->type = ev->link_type;
4405 hci_debugfs_create_conn(conn);
4406 hci_conn_add_sysfs(conn);
4409 case 0x10: /* Connection Accept Timeout */
4410 case 0x0d: /* Connection Rejected due to Limited Resources */
4411 case 0x11: /* Unsupported Feature or Parameter Value */
4412 case 0x1c: /* SCO interval rejected */
4413 case 0x1a: /* Unsupported Remote Feature */
4414 case 0x1e: /* Invalid LMP Parameters */
4415 case 0x1f: /* Unspecified error */
4416 case 0x20: /* Unsupported LMP Parameter value */
4418 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4419 (hdev->esco_type & EDR_ESCO_MASK);
4420 if (hci_setup_sync(conn, conn->link->handle))
4426 conn->state = BT_CLOSED;
4430 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4432 switch (ev->air_mode) {
4435 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4439 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4443 hci_connect_cfm(conn, ev->status);
4448 hci_dev_unlock(hdev);
4451 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4455 while (parsed < eir_len) {
4456 u8 field_len = eir[0];
4461 parsed += field_len + 1;
4462 eir += field_len + 1;
4468 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4469 struct sk_buff *skb)
4471 struct inquiry_data data;
4472 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4473 int num_rsp = *((__u8 *) skb->data);
4476 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4478 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4481 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4486 for (; num_rsp; num_rsp--, info++) {
4490 bacpy(&data.bdaddr, &info->bdaddr);
4491 data.pscan_rep_mode = info->pscan_rep_mode;
4492 data.pscan_period_mode = info->pscan_period_mode;
4493 data.pscan_mode = 0x00;
4494 memcpy(data.dev_class, info->dev_class, 3);
4495 data.clock_offset = info->clock_offset;
4496 data.rssi = info->rssi;
4497 data.ssp_mode = 0x01;
4499 if (hci_dev_test_flag(hdev, HCI_MGMT))
4500 name_known = eir_get_data(info->data,
4502 EIR_NAME_COMPLETE, NULL);
4506 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4508 eir_len = eir_get_length(info->data, sizeof(info->data));
4510 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4511 info->dev_class, info->rssi,
4512 flags, info->data, eir_len, NULL, 0);
4515 hci_dev_unlock(hdev);
4518 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4519 struct sk_buff *skb)
4521 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4522 struct hci_conn *conn;
4524 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4525 __le16_to_cpu(ev->handle));
4529 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4533 /* For BR/EDR the necessary steps are taken through the
4534 * auth_complete event.
4536 if (conn->type != LE_LINK)
4540 conn->sec_level = conn->pending_sec_level;
4542 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4544 if (ev->status && conn->state == BT_CONNECTED) {
4545 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4546 hci_conn_drop(conn);
4550 if (conn->state == BT_CONFIG) {
4552 conn->state = BT_CONNECTED;
4554 hci_connect_cfm(conn, ev->status);
4555 hci_conn_drop(conn);
4557 hci_auth_cfm(conn, ev->status);
4559 hci_conn_hold(conn);
4560 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4561 hci_conn_drop(conn);
4565 hci_dev_unlock(hdev);
4568 static u8 hci_get_auth_req(struct hci_conn *conn)
4570 /* If remote requests no-bonding follow that lead */
4571 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4572 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4573 return conn->remote_auth | (conn->auth_type & 0x01);
4575 /* If both remote and local have enough IO capabilities, require
4578 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4579 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4580 return conn->remote_auth | 0x01;
4582 /* No MITM protection possible so ignore remote requirement */
4583 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4586 static u8 bredr_oob_data_present(struct hci_conn *conn)
4588 struct hci_dev *hdev = conn->hdev;
4589 struct oob_data *data;
4591 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4595 if (bredr_sc_enabled(hdev)) {
4596 /* When Secure Connections is enabled, then just
4597 * return the present value stored with the OOB
4598 * data. The stored value contains the right present
4599 * information. However it can only be trusted when
4600 * not in Secure Connection Only mode.
4602 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4603 return data->present;
4605 /* When Secure Connections Only mode is enabled, then
4606 * the P-256 values are required. If they are not
4607 * available, then do not declare that OOB data is
4610 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
4611 !crypto_memneq(data->hash256, ZERO_KEY, 16))
4617 /* When Secure Connections is not enabled or actually
4618 * not supported by the hardware, then check that if
4619 * P-192 data values are present.
4621 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4622 !crypto_memneq(data->hash192, ZERO_KEY, 16))
4628 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4630 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4631 struct hci_conn *conn;
4633 BT_DBG("%s", hdev->name);
4637 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4638 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4641 /* Assume remote supports SSP since it has triggered this event */
4642 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4644 hci_conn_hold(conn);
4646 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4649 /* Allow pairing if we're pairable, the initiators of the
4650 * pairing or if the remote is not requesting bonding.
4652 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4653 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4654 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4655 struct hci_cp_io_capability_reply cp;
4657 bacpy(&cp.bdaddr, &ev->bdaddr);
4658 /* Change the IO capability from KeyboardDisplay
4659 * to DisplayYesNo as it is not supported by BT spec. */
4660 cp.capability = (conn->io_capability == 0x04) ?
4661 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4663 /* If we are initiators, there is no remote information yet */
4664 if (conn->remote_auth == 0xff) {
4665 /* Request MITM protection if our IO caps allow it
4666 * except for the no-bonding case.
4668 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4669 conn->auth_type != HCI_AT_NO_BONDING)
4670 conn->auth_type |= 0x01;
4672 conn->auth_type = hci_get_auth_req(conn);
4675 /* If we're not bondable, force one of the non-bondable
4676 * authentication requirement values.
4678 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4679 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4681 cp.authentication = conn->auth_type;
4682 cp.oob_data = bredr_oob_data_present(conn);
4684 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4687 struct hci_cp_io_capability_neg_reply cp;
4689 bacpy(&cp.bdaddr, &ev->bdaddr);
4690 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4692 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4697 hci_dev_unlock(hdev);
4700 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4702 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4703 struct hci_conn *conn;
4705 BT_DBG("%s", hdev->name);
4709 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4713 conn->remote_cap = ev->capability;
4714 conn->remote_auth = ev->authentication;
4717 hci_dev_unlock(hdev);
4720 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4721 struct sk_buff *skb)
4723 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4724 int loc_mitm, rem_mitm, confirm_hint = 0;
4725 struct hci_conn *conn;
4727 BT_DBG("%s", hdev->name);
4731 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4738 loc_mitm = (conn->auth_type & 0x01);
4739 rem_mitm = (conn->remote_auth & 0x01);
4741 /* If we require MITM but the remote device can't provide that
4742 * (it has NoInputNoOutput) then reject the confirmation
4743 * request. We check the security level here since it doesn't
4744 * necessarily match conn->auth_type.
4746 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4747 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4748 BT_DBG("Rejecting request: remote device can't provide MITM");
4749 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4750 sizeof(ev->bdaddr), &ev->bdaddr);
4754 /* If no side requires MITM protection; auto-accept */
4755 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4756 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4758 /* If we're not the initiators request authorization to
4759 * proceed from user space (mgmt_user_confirm with
4760 * confirm_hint set to 1). The exception is if neither
4761 * side had MITM or if the local IO capability is
4762 * NoInputNoOutput, in which case we do auto-accept
4764 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4765 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4766 (loc_mitm || rem_mitm)) {
4767 BT_DBG("Confirming auto-accept as acceptor");
4772 /* If there already exists link key in local host, leave the
4773 * decision to user space since the remote device could be
4774 * legitimate or malicious.
4776 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4777 bt_dev_dbg(hdev, "Local host already has link key");
4782 BT_DBG("Auto-accept of user confirmation with %ums delay",
4783 hdev->auto_accept_delay);
4785 if (hdev->auto_accept_delay > 0) {
4786 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4787 queue_delayed_work(conn->hdev->workqueue,
4788 &conn->auto_accept_work, delay);
4792 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4793 sizeof(ev->bdaddr), &ev->bdaddr);
4798 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4799 le32_to_cpu(ev->passkey), confirm_hint);
4802 hci_dev_unlock(hdev);
4805 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4806 struct sk_buff *skb)
4808 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4810 BT_DBG("%s", hdev->name);
4812 if (hci_dev_test_flag(hdev, HCI_MGMT))
4813 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4816 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4817 struct sk_buff *skb)
4819 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4820 struct hci_conn *conn;
4822 BT_DBG("%s", hdev->name);
4824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4828 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4829 conn->passkey_entered = 0;
4831 if (hci_dev_test_flag(hdev, HCI_MGMT))
4832 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4833 conn->dst_type, conn->passkey_notify,
4834 conn->passkey_entered);
4837 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4839 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4840 struct hci_conn *conn;
4842 BT_DBG("%s", hdev->name);
4844 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4849 case HCI_KEYPRESS_STARTED:
4850 conn->passkey_entered = 0;
4853 case HCI_KEYPRESS_ENTERED:
4854 conn->passkey_entered++;
4857 case HCI_KEYPRESS_ERASED:
4858 conn->passkey_entered--;
4861 case HCI_KEYPRESS_CLEARED:
4862 conn->passkey_entered = 0;
4865 case HCI_KEYPRESS_COMPLETED:
4869 if (hci_dev_test_flag(hdev, HCI_MGMT))
4870 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4871 conn->dst_type, conn->passkey_notify,
4872 conn->passkey_entered);
4875 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4876 struct sk_buff *skb)
4878 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4879 struct hci_conn *conn;
4881 BT_DBG("%s", hdev->name);
4885 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4886 if (!conn || !hci_conn_ssp_enabled(conn))
4889 /* Reset the authentication requirement to unknown */
4890 conn->remote_auth = 0xff;
4892 /* To avoid duplicate auth_failed events to user space we check
4893 * the HCI_CONN_AUTH_PEND flag which will be set if we
4894 * initiated the authentication. A traditional auth_complete
4895 * event gets always produced as initiator and is also mapped to
4896 * the mgmt_auth_failed event */
4897 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4898 mgmt_auth_failed(conn, ev->status);
4900 hci_conn_drop(conn);
4903 hci_dev_unlock(hdev);
4906 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4907 struct sk_buff *skb)
4909 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4910 struct inquiry_entry *ie;
4911 struct hci_conn *conn;
4913 BT_DBG("%s", hdev->name);
4917 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4919 memcpy(conn->features[1], ev->features, 8);
4921 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4923 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4925 hci_dev_unlock(hdev);
4928 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4929 struct sk_buff *skb)
4931 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4932 struct oob_data *data;
4934 BT_DBG("%s", hdev->name);
4938 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4941 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4943 struct hci_cp_remote_oob_data_neg_reply cp;
4945 bacpy(&cp.bdaddr, &ev->bdaddr);
4946 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4951 if (bredr_sc_enabled(hdev)) {
4952 struct hci_cp_remote_oob_ext_data_reply cp;
4954 bacpy(&cp.bdaddr, &ev->bdaddr);
4955 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4956 memset(cp.hash192, 0, sizeof(cp.hash192));
4957 memset(cp.rand192, 0, sizeof(cp.rand192));
4959 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4960 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4962 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4963 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4965 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4968 struct hci_cp_remote_oob_data_reply cp;
4970 bacpy(&cp.bdaddr, &ev->bdaddr);
4971 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4972 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4974 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4979 hci_dev_unlock(hdev);
4982 #if IS_ENABLED(CONFIG_BT_HS)
4983 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4985 struct hci_ev_channel_selected *ev = (void *)skb->data;
4986 struct hci_conn *hcon;
4988 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4990 skb_pull(skb, sizeof(*ev));
4992 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4996 amp_read_loc_assoc_final_data(hdev, hcon);
4999 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5000 struct sk_buff *skb)
5002 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5003 struct hci_conn *hcon, *bredr_hcon;
5005 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5010 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5012 hci_dev_unlock(hdev);
5016 if (!hcon->amp_mgr) {
5017 hci_dev_unlock(hdev);
5023 hci_dev_unlock(hdev);
5027 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5029 hcon->state = BT_CONNECTED;
5030 bacpy(&hcon->dst, &bredr_hcon->dst);
5032 hci_conn_hold(hcon);
5033 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5034 hci_conn_drop(hcon);
5036 hci_debugfs_create_conn(hcon);
5037 hci_conn_add_sysfs(hcon);
5039 amp_physical_cfm(bredr_hcon, hcon);
5041 hci_dev_unlock(hdev);
5044 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5046 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5047 struct hci_conn *hcon;
5048 struct hci_chan *hchan;
5049 struct amp_mgr *mgr;
5051 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5052 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5055 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5059 /* Create AMP hchan */
5060 hchan = hci_chan_create(hcon);
5064 hchan->handle = le16_to_cpu(ev->handle);
5067 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5069 mgr = hcon->amp_mgr;
5070 if (mgr && mgr->bredr_chan) {
5071 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5073 l2cap_chan_lock(bredr_chan);
5075 bredr_chan->conn->mtu = hdev->block_mtu;
5076 l2cap_logical_cfm(bredr_chan, hchan, 0);
5077 hci_conn_hold(hcon);
5079 l2cap_chan_unlock(bredr_chan);
5083 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5084 struct sk_buff *skb)
5086 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5087 struct hci_chan *hchan;
5089 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5090 le16_to_cpu(ev->handle), ev->status);
5097 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5098 if (!hchan || !hchan->amp)
5101 amp_destroy_logical_link(hchan, ev->reason);
5104 hci_dev_unlock(hdev);
5107 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5108 struct sk_buff *skb)
5110 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5111 struct hci_conn *hcon;
5113 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5120 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5121 if (hcon && hcon->type == AMP_LINK) {
5122 hcon->state = BT_CLOSED;
5123 hci_disconn_cfm(hcon, ev->reason);
5127 hci_dev_unlock(hdev);
5131 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5132 u8 bdaddr_type, bdaddr_t *local_rpa)
5135 conn->dst_type = bdaddr_type;
5136 conn->resp_addr_type = bdaddr_type;
5137 bacpy(&conn->resp_addr, bdaddr);
5139 /* Check if the controller has set a Local RPA then it must be
5140 * used instead or hdev->rpa.
5142 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5143 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5144 bacpy(&conn->init_addr, local_rpa);
5145 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5146 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5147 bacpy(&conn->init_addr, &conn->hdev->rpa);
5149 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5150 &conn->init_addr_type);
5153 conn->resp_addr_type = conn->hdev->adv_addr_type;
5154 /* Check if the controller has set a Local RPA then it must be
5155 * used instead or hdev->rpa.
5157 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5158 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5159 bacpy(&conn->resp_addr, local_rpa);
5160 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5161 /* In case of ext adv, resp_addr will be updated in
5162 * Adv Terminated event.
5164 if (!ext_adv_capable(conn->hdev))
5165 bacpy(&conn->resp_addr,
5166 &conn->hdev->random_addr);
5168 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5171 conn->init_addr_type = bdaddr_type;
5172 bacpy(&conn->init_addr, bdaddr);
5174 /* For incoming connections, set the default minimum
5175 * and maximum connection interval. They will be used
5176 * to check if the parameters are in range and if not
5177 * trigger the connection update procedure.
5179 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5180 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5184 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5185 bdaddr_t *bdaddr, u8 bdaddr_type,
5186 bdaddr_t *local_rpa, u8 role, u16 handle,
5187 u16 interval, u16 latency,
5188 u16 supervision_timeout)
5190 struct hci_conn_params *params;
5191 struct hci_conn *conn;
5192 struct smp_irk *irk;
5197 /* All controllers implicitly stop advertising in the event of a
5198 * connection, so ensure that the state bit is cleared.
5200 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5202 conn = hci_lookup_le_connect(hdev);
5204 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5206 bt_dev_err(hdev, "no memory for new connection");
5210 conn->dst_type = bdaddr_type;
5212 /* If we didn't have a hci_conn object previously
5213 * but we're in central role this must be something
5214 * initiated using an accept list. Since accept list based
5215 * connections are not "first class citizens" we don't
5216 * have full tracking of them. Therefore, we go ahead
5217 * with a "best effort" approach of determining the
5218 * initiator address based on the HCI_PRIVACY flag.
5221 conn->resp_addr_type = bdaddr_type;
5222 bacpy(&conn->resp_addr, bdaddr);
5223 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5224 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5225 bacpy(&conn->init_addr, &hdev->rpa);
5227 hci_copy_identity_address(hdev,
5229 &conn->init_addr_type);
5233 cancel_delayed_work(&conn->le_conn_timeout);
5236 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5238 /* Lookup the identity address from the stored connection
5239 * address and address type.
5241 * When establishing connections to an identity address, the
5242 * connection procedure will store the resolvable random
5243 * address first. Now if it can be converted back into the
5244 * identity address, start using the identity address from
5247 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5249 bacpy(&conn->dst, &irk->bdaddr);
5250 conn->dst_type = irk->addr_type;
5254 hci_le_conn_failed(conn, status);
5258 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5259 addr_type = BDADDR_LE_PUBLIC;
5261 addr_type = BDADDR_LE_RANDOM;
5263 /* Drop the connection if the device is blocked */
5264 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5265 hci_conn_drop(conn);
5269 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5270 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5272 conn->sec_level = BT_SECURITY_LOW;
5273 conn->handle = handle;
5274 conn->state = BT_CONFIG;
5276 conn->le_conn_interval = interval;
5277 conn->le_conn_latency = latency;
5278 conn->le_supv_timeout = supervision_timeout;
5280 hci_debugfs_create_conn(conn);
5281 hci_conn_add_sysfs(conn);
5283 /* The remote features procedure is defined for master
5284 * role only. So only in case of an initiated connection
5285 * request the remote features.
5287 * If the local controller supports slave-initiated features
5288 * exchange, then requesting the remote features in slave
5289 * role is possible. Otherwise just transition into the
5290 * connected state without requesting the remote features.
5293 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5294 struct hci_cp_le_read_remote_features cp;
5296 cp.handle = __cpu_to_le16(conn->handle);
5298 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5301 hci_conn_hold(conn);
5303 conn->state = BT_CONNECTED;
5304 hci_connect_cfm(conn, status);
5307 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5310 list_del_init(¶ms->action);
5312 hci_conn_drop(params->conn);
5313 hci_conn_put(params->conn);
5314 params->conn = NULL;
5319 hci_update_background_scan(hdev);
5320 hci_dev_unlock(hdev);
5323 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5325 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5327 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5329 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5330 NULL, ev->role, le16_to_cpu(ev->handle),
5331 le16_to_cpu(ev->interval),
5332 le16_to_cpu(ev->latency),
5333 le16_to_cpu(ev->supervision_timeout));
5336 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5337 struct sk_buff *skb)
5339 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5341 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5343 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5344 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5345 le16_to_cpu(ev->interval),
5346 le16_to_cpu(ev->latency),
5347 le16_to_cpu(ev->supervision_timeout));
5349 if (use_ll_privacy(hdev) &&
5350 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5351 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5352 hci_req_disable_address_resolution(hdev);
5355 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5357 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5358 struct hci_conn *conn;
5360 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5363 struct adv_info *adv;
5365 adv = hci_find_adv_instance(hdev, ev->handle);
5369 /* Remove advertising as it has been terminated */
5370 hci_remove_adv_instance(hdev, ev->handle);
5371 mgmt_advertising_removed(NULL, hdev, ev->handle);
5376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5378 struct adv_info *adv_instance;
5380 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5381 bacmp(&conn->resp_addr, BDADDR_ANY))
5384 if (!hdev->cur_adv_instance) {
5385 bacpy(&conn->resp_addr, &hdev->random_addr);
5389 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5391 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5395 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5396 struct sk_buff *skb)
5398 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5399 struct hci_conn *conn;
5401 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5410 conn->le_conn_interval = le16_to_cpu(ev->interval);
5411 conn->le_conn_latency = le16_to_cpu(ev->latency);
5412 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5415 hci_dev_unlock(hdev);
5418 /* This function requires the caller holds hdev->lock */
5419 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5421 u8 addr_type, u8 adv_type,
5422 bdaddr_t *direct_rpa)
5424 struct hci_conn *conn;
5425 struct hci_conn_params *params;
5427 /* If the event is not connectable don't proceed further */
5428 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5431 /* Ignore if the device is blocked */
5432 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5435 /* Most controller will fail if we try to create new connections
5436 * while we have an existing one in slave role.
5438 if (hdev->conn_hash.le_num_slave > 0 &&
5439 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5440 !(hdev->le_states[3] & 0x10)))
5443 /* If we're not connectable only connect devices that we have in
5444 * our pend_le_conns list.
5446 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5451 if (!params->explicit_connect) {
5452 switch (params->auto_connect) {
5453 case HCI_AUTO_CONN_DIRECT:
5454 /* Only devices advertising with ADV_DIRECT_IND are
5455 * triggering a connection attempt. This is allowing
5456 * incoming connections from slave devices.
5458 if (adv_type != LE_ADV_DIRECT_IND)
5461 case HCI_AUTO_CONN_ALWAYS:
5462 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5463 * are triggering a connection attempt. This means
5464 * that incoming connections from slave device are
5465 * accepted and also outgoing connections to slave
5466 * devices are established when found.
5474 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5475 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5477 if (!IS_ERR(conn)) {
5478 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5479 * by higher layer that tried to connect, if no then
5480 * store the pointer since we don't really have any
5481 * other owner of the object besides the params that
5482 * triggered it. This way we can abort the connection if
5483 * the parameters get removed and keep the reference
5484 * count consistent once the connection is established.
5487 if (!params->explicit_connect)
5488 params->conn = hci_conn_get(conn);
5493 switch (PTR_ERR(conn)) {
5495 /* If hci_connect() returns -EBUSY it means there is already
5496 * an LE connection attempt going on. Since controllers don't
5497 * support more than one connection attempt at the time, we
5498 * don't consider this an error case.
5502 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5509 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5510 u8 bdaddr_type, bdaddr_t *direct_addr,
5511 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5514 struct discovery_state *d = &hdev->discovery;
5515 struct smp_irk *irk;
5516 struct hci_conn *conn;
5523 case LE_ADV_DIRECT_IND:
5524 case LE_ADV_SCAN_IND:
5525 case LE_ADV_NONCONN_IND:
5526 case LE_ADV_SCAN_RSP:
5529 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5530 "type: 0x%02x", type);
5534 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5535 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5539 /* Find the end of the data in case the report contains padded zero
5540 * bytes at the end causing an invalid length value.
5542 * When data is NULL, len is 0 so there is no need for extra ptr
5543 * check as 'ptr < data + 0' is already false in such case.
5545 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5546 if (ptr + 1 + *ptr > data + len)
5550 /* Adjust for actual length. This handles the case when remote
5551 * device is advertising with incorrect data length.
5555 /* If the direct address is present, then this report is from
5556 * a LE Direct Advertising Report event. In that case it is
5557 * important to see if the address is matching the local
5558 * controller address.
5561 /* Only resolvable random addresses are valid for these
5562 * kind of reports and others can be ignored.
5564 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5567 /* If the controller is not using resolvable random
5568 * addresses, then this report can be ignored.
5570 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5573 /* If the local IRK of the controller does not match
5574 * with the resolvable random address provided, then
5575 * this report can be ignored.
5577 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5581 /* Check if we need to convert to identity address */
5582 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5584 bdaddr = &irk->bdaddr;
5585 bdaddr_type = irk->addr_type;
5588 /* Check if we have been requested to connect to this device.
5590 * direct_addr is set only for directed advertising reports (it is NULL
5591 * for advertising reports) and is already verified to be RPA above.
5593 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5595 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5596 /* Store report for later inclusion by
5597 * mgmt_device_connected
5599 memcpy(conn->le_adv_data, data, len);
5600 conn->le_adv_data_len = len;
5603 /* Passive scanning shouldn't trigger any device found events,
5604 * except for devices marked as CONN_REPORT for which we do send
5605 * device found events, or advertisement monitoring requested.
5607 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5608 if (type == LE_ADV_DIRECT_IND)
5611 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5612 bdaddr, bdaddr_type) &&
5613 idr_is_empty(&hdev->adv_monitors_idr))
5616 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5617 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5620 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5621 rssi, flags, data, len, NULL, 0);
5625 /* When receiving non-connectable or scannable undirected
5626 * advertising reports, this means that the remote device is
5627 * not connectable and then clearly indicate this in the
5628 * device found event.
5630 * When receiving a scan response, then there is no way to
5631 * know if the remote device is connectable or not. However
5632 * since scan responses are merged with a previously seen
5633 * advertising report, the flags field from that report
5636 * In the really unlikely case that a controller get confused
5637 * and just sends a scan response event, then it is marked as
5638 * not connectable as well.
5640 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5641 type == LE_ADV_SCAN_RSP)
5642 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5646 /* If there's nothing pending either store the data from this
5647 * event or send an immediate device found event if the data
5648 * should not be stored for later.
5650 if (!ext_adv && !has_pending_adv_report(hdev)) {
5651 /* If the report will trigger a SCAN_REQ store it for
5654 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5655 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5656 rssi, flags, data, len);
5660 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5661 rssi, flags, data, len, NULL, 0);
5665 /* Check if the pending report is for the same device as the new one */
5666 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5667 bdaddr_type == d->last_adv_addr_type);
5669 /* If the pending data doesn't match this report or this isn't a
5670 * scan response (e.g. we got a duplicate ADV_IND) then force
5671 * sending of the pending data.
5673 if (type != LE_ADV_SCAN_RSP || !match) {
5674 /* Send out whatever is in the cache, but skip duplicates */
5676 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5677 d->last_adv_addr_type, NULL,
5678 d->last_adv_rssi, d->last_adv_flags,
5680 d->last_adv_data_len, NULL, 0);
5682 /* If the new report will trigger a SCAN_REQ store it for
5685 if (!ext_adv && (type == LE_ADV_IND ||
5686 type == LE_ADV_SCAN_IND)) {
5687 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5688 rssi, flags, data, len);
5692 /* The advertising reports cannot be merged, so clear
5693 * the pending report and send out a device found event.
5695 clear_pending_adv_report(hdev);
5696 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5697 rssi, flags, data, len, NULL, 0);
5701 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5702 * the new event is a SCAN_RSP. We can therefore proceed with
5703 * sending a merged device found event.
5705 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5706 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5707 d->last_adv_data, d->last_adv_data_len, data, len);
5708 clear_pending_adv_report(hdev);
5711 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5713 u8 num_reports = skb->data[0];
5714 void *ptr = &skb->data[1];
5718 while (num_reports--) {
5719 struct hci_ev_le_advertising_info *ev = ptr;
5722 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5723 bt_dev_err(hdev, "Malicious advertising data.");
5727 if (ev->length <= HCI_MAX_AD_LENGTH &&
5728 ev->data + ev->length <= skb_tail_pointer(skb)) {
5729 rssi = ev->data[ev->length];
5730 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5731 ev->bdaddr_type, NULL, 0, rssi,
5732 ev->data, ev->length, false);
5734 bt_dev_err(hdev, "Dropping invalid advertising data");
5737 ptr += sizeof(*ev) + ev->length + 1;
5740 hci_dev_unlock(hdev);
5743 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5745 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5747 case LE_LEGACY_ADV_IND:
5749 case LE_LEGACY_ADV_DIRECT_IND:
5750 return LE_ADV_DIRECT_IND;
5751 case LE_LEGACY_ADV_SCAN_IND:
5752 return LE_ADV_SCAN_IND;
5753 case LE_LEGACY_NONCONN_IND:
5754 return LE_ADV_NONCONN_IND;
5755 case LE_LEGACY_SCAN_RSP_ADV:
5756 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5757 return LE_ADV_SCAN_RSP;
5763 if (evt_type & LE_EXT_ADV_CONN_IND) {
5764 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5765 return LE_ADV_DIRECT_IND;
5770 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5771 return LE_ADV_SCAN_RSP;
5773 if (evt_type & LE_EXT_ADV_SCAN_IND)
5774 return LE_ADV_SCAN_IND;
5776 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5777 evt_type & LE_EXT_ADV_DIRECT_IND)
5778 return LE_ADV_NONCONN_IND;
5781 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5784 return LE_ADV_INVALID;
5787 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5789 u8 num_reports = skb->data[0];
5790 void *ptr = &skb->data[1];
5794 while (num_reports--) {
5795 struct hci_ev_le_ext_adv_report *ev = ptr;
5799 evt_type = __le16_to_cpu(ev->evt_type);
5800 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5801 if (legacy_evt_type != LE_ADV_INVALID) {
5802 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5803 ev->bdaddr_type, NULL, 0, ev->rssi,
5804 ev->data, ev->length,
5805 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5808 ptr += sizeof(*ev) + ev->length;
5811 hci_dev_unlock(hdev);
5814 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5815 struct sk_buff *skb)
5817 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5818 struct hci_conn *conn;
5820 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5824 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5827 memcpy(conn->features[0], ev->features, 8);
5829 if (conn->state == BT_CONFIG) {
5832 /* If the local controller supports slave-initiated
5833 * features exchange, but the remote controller does
5834 * not, then it is possible that the error code 0x1a
5835 * for unsupported remote feature gets returned.
5837 * In this specific case, allow the connection to
5838 * transition into connected state and mark it as
5841 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5842 !conn->out && ev->status == 0x1a)
5845 status = ev->status;
5847 conn->state = BT_CONNECTED;
5848 hci_connect_cfm(conn, status);
5849 hci_conn_drop(conn);
5853 hci_dev_unlock(hdev);
5856 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5858 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5859 struct hci_cp_le_ltk_reply cp;
5860 struct hci_cp_le_ltk_neg_reply neg;
5861 struct hci_conn *conn;
5862 struct smp_ltk *ltk;
5864 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5868 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5872 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5876 if (smp_ltk_is_sc(ltk)) {
5877 /* With SC both EDiv and Rand are set to zero */
5878 if (ev->ediv || ev->rand)
5881 /* For non-SC keys check that EDiv and Rand match */
5882 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5886 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5887 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5888 cp.handle = cpu_to_le16(conn->handle);
5890 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5892 conn->enc_key_size = ltk->enc_size;
5894 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5896 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5897 * temporary key used to encrypt a connection following
5898 * pairing. It is used during the Encrypted Session Setup to
5899 * distribute the keys. Later, security can be re-established
5900 * using a distributed LTK.
5902 if (ltk->type == SMP_STK) {
5903 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5904 list_del_rcu(<k->list);
5905 kfree_rcu(ltk, rcu);
5907 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5910 hci_dev_unlock(hdev);
5915 neg.handle = ev->handle;
5916 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5917 hci_dev_unlock(hdev);
5920 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5923 struct hci_cp_le_conn_param_req_neg_reply cp;
5925 cp.handle = cpu_to_le16(handle);
5928 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5932 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5933 struct sk_buff *skb)
5935 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5936 struct hci_cp_le_conn_param_req_reply cp;
5937 struct hci_conn *hcon;
5938 u16 handle, min, max, latency, timeout;
5940 handle = le16_to_cpu(ev->handle);
5941 min = le16_to_cpu(ev->interval_min);
5942 max = le16_to_cpu(ev->interval_max);
5943 latency = le16_to_cpu(ev->latency);
5944 timeout = le16_to_cpu(ev->timeout);
5946 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5947 if (!hcon || hcon->state != BT_CONNECTED)
5948 return send_conn_param_neg_reply(hdev, handle,
5949 HCI_ERROR_UNKNOWN_CONN_ID);
5951 if (max > hcon->le_conn_max_interval)
5952 return send_conn_param_neg_reply(hdev, handle,
5953 HCI_ERROR_INVALID_LL_PARAMS);
5955 if (hci_check_conn_params(min, max, latency, timeout))
5956 return send_conn_param_neg_reply(hdev, handle,
5957 HCI_ERROR_INVALID_LL_PARAMS);
5959 if (hcon->role == HCI_ROLE_MASTER) {
5960 struct hci_conn_params *params;
5965 params = hci_conn_params_lookup(hdev, &hcon->dst,
5968 params->conn_min_interval = min;
5969 params->conn_max_interval = max;
5970 params->conn_latency = latency;
5971 params->supervision_timeout = timeout;
5977 hci_dev_unlock(hdev);
5979 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5980 store_hint, min, max, latency, timeout);
5983 cp.handle = ev->handle;
5984 cp.interval_min = ev->interval_min;
5985 cp.interval_max = ev->interval_max;
5986 cp.latency = ev->latency;
5987 cp.timeout = ev->timeout;
5991 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5994 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5995 struct sk_buff *skb)
5997 u8 num_reports = skb->data[0];
5998 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6000 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6005 for (; num_reports; num_reports--, ev++)
6006 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6007 ev->bdaddr_type, &ev->direct_addr,
6008 ev->direct_addr_type, ev->rssi, NULL, 0,
6011 hci_dev_unlock(hdev);
6014 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6016 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6017 struct hci_conn *conn;
6019 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6026 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6030 conn->le_tx_phy = ev->tx_phy;
6031 conn->le_rx_phy = ev->rx_phy;
6034 hci_dev_unlock(hdev);
6037 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6039 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6041 skb_pull(skb, sizeof(*le_ev));
6043 switch (le_ev->subevent) {
6044 case HCI_EV_LE_CONN_COMPLETE:
6045 hci_le_conn_complete_evt(hdev, skb);
6048 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6049 hci_le_conn_update_complete_evt(hdev, skb);
6052 case HCI_EV_LE_ADVERTISING_REPORT:
6053 hci_le_adv_report_evt(hdev, skb);
6056 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6057 hci_le_remote_feat_complete_evt(hdev, skb);
6060 case HCI_EV_LE_LTK_REQ:
6061 hci_le_ltk_request_evt(hdev, skb);
6064 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6065 hci_le_remote_conn_param_req_evt(hdev, skb);
6068 case HCI_EV_LE_DIRECT_ADV_REPORT:
6069 hci_le_direct_adv_report_evt(hdev, skb);
6072 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6073 hci_le_phy_update_evt(hdev, skb);
6076 case HCI_EV_LE_EXT_ADV_REPORT:
6077 hci_le_ext_adv_report_evt(hdev, skb);
6080 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6081 hci_le_enh_conn_complete_evt(hdev, skb);
6084 case HCI_EV_LE_EXT_ADV_SET_TERM:
6085 hci_le_ext_adv_term_evt(hdev, skb);
6093 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6094 u8 event, struct sk_buff *skb)
6096 struct hci_ev_cmd_complete *ev;
6097 struct hci_event_hdr *hdr;
6102 if (skb->len < sizeof(*hdr)) {
6103 bt_dev_err(hdev, "too short HCI event");
6107 hdr = (void *) skb->data;
6108 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6111 if (hdr->evt != event)
6116 /* Check if request ended in Command Status - no way to retreive
6117 * any extra parameters in this case.
6119 if (hdr->evt == HCI_EV_CMD_STATUS)
6122 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6123 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6128 if (skb->len < sizeof(*ev)) {
6129 bt_dev_err(hdev, "too short cmd_complete event");
6133 ev = (void *) skb->data;
6134 skb_pull(skb, sizeof(*ev));
6136 if (opcode != __le16_to_cpu(ev->opcode)) {
6137 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6138 __le16_to_cpu(ev->opcode));
6145 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6146 struct sk_buff *skb)
6148 struct hci_ev_le_advertising_info *adv;
6149 struct hci_ev_le_direct_adv_info *direct_adv;
6150 struct hci_ev_le_ext_adv_report *ext_adv;
6151 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6152 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6156 /* If we are currently suspended and this is the first BT event seen,
6157 * save the wake reason associated with the event.
6159 if (!hdev->suspended || hdev->wake_reason)
6162 /* Default to remote wake. Values for wake_reason are documented in the
6163 * Bluez mgmt api docs.
6165 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6167 /* Once configured for remote wakeup, we should only wake up for
6168 * reconnections. It's useful to see which device is waking us up so
6169 * keep track of the bdaddr of the connection event that woke us up.
6171 if (event == HCI_EV_CONN_REQUEST) {
6172 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6173 hdev->wake_addr_type = BDADDR_BREDR;
6174 } else if (event == HCI_EV_CONN_COMPLETE) {
6175 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6176 hdev->wake_addr_type = BDADDR_BREDR;
6177 } else if (event == HCI_EV_LE_META) {
6178 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6179 u8 subevent = le_ev->subevent;
6180 u8 *ptr = &skb->data[sizeof(*le_ev)];
6181 u8 num_reports = *ptr;
6183 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6184 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6185 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6187 adv = (void *)(ptr + 1);
6188 direct_adv = (void *)(ptr + 1);
6189 ext_adv = (void *)(ptr + 1);
6192 case HCI_EV_LE_ADVERTISING_REPORT:
6193 bacpy(&hdev->wake_addr, &adv->bdaddr);
6194 hdev->wake_addr_type = adv->bdaddr_type;
6196 case HCI_EV_LE_DIRECT_ADV_REPORT:
6197 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6198 hdev->wake_addr_type = direct_adv->bdaddr_type;
6200 case HCI_EV_LE_EXT_ADV_REPORT:
6201 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6202 hdev->wake_addr_type = ext_adv->bdaddr_type;
6207 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6211 hci_dev_unlock(hdev);
6214 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6216 struct hci_event_hdr *hdr = (void *) skb->data;
6217 hci_req_complete_t req_complete = NULL;
6218 hci_req_complete_skb_t req_complete_skb = NULL;
6219 struct sk_buff *orig_skb = NULL;
6220 u8 status = 0, event = hdr->evt, req_evt = 0;
6221 u16 opcode = HCI_OP_NOP;
6224 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6228 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6229 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6230 opcode = __le16_to_cpu(cmd_hdr->opcode);
6231 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6236 /* If it looks like we might end up having to call
6237 * req_complete_skb, store a pristine copy of the skb since the
6238 * various handlers may modify the original one through
6239 * skb_pull() calls, etc.
6241 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6242 event == HCI_EV_CMD_COMPLETE)
6243 orig_skb = skb_clone(skb, GFP_KERNEL);
6245 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6247 /* Store wake reason if we're suspended */
6248 hci_store_wake_reason(hdev, event, skb);
6251 case HCI_EV_INQUIRY_COMPLETE:
6252 hci_inquiry_complete_evt(hdev, skb);
6255 case HCI_EV_INQUIRY_RESULT:
6256 hci_inquiry_result_evt(hdev, skb);
6259 case HCI_EV_CONN_COMPLETE:
6260 hci_conn_complete_evt(hdev, skb);
6263 case HCI_EV_CONN_REQUEST:
6264 hci_conn_request_evt(hdev, skb);
6267 case HCI_EV_DISCONN_COMPLETE:
6268 hci_disconn_complete_evt(hdev, skb);
6271 case HCI_EV_AUTH_COMPLETE:
6272 hci_auth_complete_evt(hdev, skb);
6275 case HCI_EV_REMOTE_NAME:
6276 hci_remote_name_evt(hdev, skb);
6279 case HCI_EV_ENCRYPT_CHANGE:
6280 hci_encrypt_change_evt(hdev, skb);
6283 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6284 hci_change_link_key_complete_evt(hdev, skb);
6287 case HCI_EV_REMOTE_FEATURES:
6288 hci_remote_features_evt(hdev, skb);
6291 case HCI_EV_CMD_COMPLETE:
6292 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6293 &req_complete, &req_complete_skb);
6296 case HCI_EV_CMD_STATUS:
6297 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6301 case HCI_EV_HARDWARE_ERROR:
6302 hci_hardware_error_evt(hdev, skb);
6305 case HCI_EV_ROLE_CHANGE:
6306 hci_role_change_evt(hdev, skb);
6309 case HCI_EV_NUM_COMP_PKTS:
6310 hci_num_comp_pkts_evt(hdev, skb);
6313 case HCI_EV_MODE_CHANGE:
6314 hci_mode_change_evt(hdev, skb);
6317 case HCI_EV_PIN_CODE_REQ:
6318 hci_pin_code_request_evt(hdev, skb);
6321 case HCI_EV_LINK_KEY_REQ:
6322 hci_link_key_request_evt(hdev, skb);
6325 case HCI_EV_LINK_KEY_NOTIFY:
6326 hci_link_key_notify_evt(hdev, skb);
6329 case HCI_EV_CLOCK_OFFSET:
6330 hci_clock_offset_evt(hdev, skb);
6333 case HCI_EV_PKT_TYPE_CHANGE:
6334 hci_pkt_type_change_evt(hdev, skb);
6337 case HCI_EV_PSCAN_REP_MODE:
6338 hci_pscan_rep_mode_evt(hdev, skb);
6341 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6342 hci_inquiry_result_with_rssi_evt(hdev, skb);
6345 case HCI_EV_REMOTE_EXT_FEATURES:
6346 hci_remote_ext_features_evt(hdev, skb);
6349 case HCI_EV_SYNC_CONN_COMPLETE:
6350 hci_sync_conn_complete_evt(hdev, skb);
6353 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6354 hci_extended_inquiry_result_evt(hdev, skb);
6357 case HCI_EV_KEY_REFRESH_COMPLETE:
6358 hci_key_refresh_complete_evt(hdev, skb);
6361 case HCI_EV_IO_CAPA_REQUEST:
6362 hci_io_capa_request_evt(hdev, skb);
6365 case HCI_EV_IO_CAPA_REPLY:
6366 hci_io_capa_reply_evt(hdev, skb);
6369 case HCI_EV_USER_CONFIRM_REQUEST:
6370 hci_user_confirm_request_evt(hdev, skb);
6373 case HCI_EV_USER_PASSKEY_REQUEST:
6374 hci_user_passkey_request_evt(hdev, skb);
6377 case HCI_EV_USER_PASSKEY_NOTIFY:
6378 hci_user_passkey_notify_evt(hdev, skb);
6381 case HCI_EV_KEYPRESS_NOTIFY:
6382 hci_keypress_notify_evt(hdev, skb);
6385 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6386 hci_simple_pair_complete_evt(hdev, skb);
6389 case HCI_EV_REMOTE_HOST_FEATURES:
6390 hci_remote_host_features_evt(hdev, skb);
6393 case HCI_EV_LE_META:
6394 hci_le_meta_evt(hdev, skb);
6397 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6398 hci_remote_oob_data_request_evt(hdev, skb);
6401 #if IS_ENABLED(CONFIG_BT_HS)
6402 case HCI_EV_CHANNEL_SELECTED:
6403 hci_chan_selected_evt(hdev, skb);
6406 case HCI_EV_PHY_LINK_COMPLETE:
6407 hci_phy_link_complete_evt(hdev, skb);
6410 case HCI_EV_LOGICAL_LINK_COMPLETE:
6411 hci_loglink_complete_evt(hdev, skb);
6414 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6415 hci_disconn_loglink_complete_evt(hdev, skb);
6418 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6419 hci_disconn_phylink_complete_evt(hdev, skb);
6423 case HCI_EV_NUM_COMP_BLOCKS:
6424 hci_num_comp_blocks_evt(hdev, skb);
6428 msft_vendor_evt(hdev, skb);
6432 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6437 req_complete(hdev, status, opcode);
6438 } else if (req_complete_skb) {
6439 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6440 kfree_skb(orig_skb);
6443 req_complete_skb(hdev, status, opcode, orig_skb);
6447 kfree_skb(orig_skb);
6449 hdev->stat.evt_rx++;