2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
44 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
45 "\x00\x00\x00\x00\x00\x00\x00\x00"
47 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
49 /* Handle HCI Event packets */
51 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
56 data = skb_pull_data(skb, len);
58 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
63 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
68 data = skb_pull_data(skb, len);
70 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
75 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
80 data = skb_pull_data(skb, len);
82 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
87 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
90 struct hci_ev_status *rp = data;
92 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
94 /* It is possible that we receive Inquiry Complete event right
95 * before we receive Inquiry Cancel Command Complete event, in
96 * which case the latter event should have status of Command
97 * Disallowed (0x0c). This should not be treated as error, since
98 * we actually achieve what Inquiry Cancel wants to achieve,
99 * which is to end the last Inquiry session.
101 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
102 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
109 clear_bit(HCI_INQUIRY, &hdev->flags);
110 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
111 wake_up_bit(&hdev->flags, HCI_INQUIRY);
114 /* Set discovery state to stopped if we're not doing LE active
117 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
118 hdev->le_scan_type != LE_SCAN_ACTIVE)
119 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
120 hci_dev_unlock(hdev);
122 hci_conn_check_pending(hdev);
127 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
130 struct hci_ev_status *rp = data;
132 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
137 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
142 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
145 struct hci_ev_status *rp = data;
147 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
152 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154 hci_conn_check_pending(hdev);
159 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
162 struct hci_ev_status *rp = data;
164 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
169 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
172 struct hci_rp_role_discovery *rp = data;
173 struct hci_conn *conn;
175 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
182 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
184 conn->role = rp->role;
186 hci_dev_unlock(hdev);
191 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
194 struct hci_rp_read_link_policy *rp = data;
195 struct hci_conn *conn;
197 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
204 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
206 conn->link_policy = __le16_to_cpu(rp->policy);
208 hci_dev_unlock(hdev);
213 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
216 struct hci_rp_write_link_policy *rp = data;
217 struct hci_conn *conn;
220 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
225 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
231 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
233 conn->link_policy = get_unaligned_le16(sent + 2);
235 hci_dev_unlock(hdev);
240 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
243 struct hci_rp_read_def_link_policy *rp = data;
245 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
250 hdev->link_policy = __le16_to_cpu(rp->policy);
255 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
258 struct hci_ev_status *rp = data;
261 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
266 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
270 hdev->link_policy = get_unaligned_le16(sent);
275 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
277 struct hci_ev_status *rp = data;
279 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
281 clear_bit(HCI_RESET, &hdev->flags);
286 /* Reset all non-persistent flags */
287 hci_dev_clear_volatile_flags(hdev);
289 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
291 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
292 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
294 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
295 hdev->adv_data_len = 0;
297 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
298 hdev->scan_rsp_data_len = 0;
300 hdev->le_scan_type = LE_SCAN_PASSIVE;
302 hdev->ssp_debug_mode = 0;
304 hci_bdaddr_list_clear(&hdev->le_accept_list);
305 hci_bdaddr_list_clear(&hdev->le_resolv_list);
310 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
313 struct hci_rp_read_stored_link_key *rp = data;
314 struct hci_cp_read_stored_link_key *sent;
316 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
318 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
322 if (!rp->status && sent->read_all == 0x01) {
323 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
324 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
330 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
333 struct hci_rp_delete_stored_link_key *rp = data;
336 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
341 num_keys = le16_to_cpu(rp->num_keys);
343 if (num_keys <= hdev->stored_num_keys)
344 hdev->stored_num_keys -= num_keys;
346 hdev->stored_num_keys = 0;
351 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
354 struct hci_ev_status *rp = data;
357 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
359 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
365 if (hci_dev_test_flag(hdev, HCI_MGMT))
366 mgmt_set_local_name_complete(hdev, sent, rp->status);
367 else if (!rp->status)
368 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
370 hci_dev_unlock(hdev);
375 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
378 struct hci_rp_read_local_name *rp = data;
380 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
385 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
386 hci_dev_test_flag(hdev, HCI_CONFIG))
387 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
392 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
395 struct hci_ev_status *rp = data;
398 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
400 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
407 __u8 param = *((__u8 *) sent);
409 if (param == AUTH_ENABLED)
410 set_bit(HCI_AUTH, &hdev->flags);
412 clear_bit(HCI_AUTH, &hdev->flags);
415 if (hci_dev_test_flag(hdev, HCI_MGMT))
416 mgmt_auth_enable_complete(hdev, rp->status);
418 hci_dev_unlock(hdev);
423 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
426 struct hci_ev_status *rp = data;
430 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
435 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
439 param = *((__u8 *) sent);
442 set_bit(HCI_ENCRYPT, &hdev->flags);
444 clear_bit(HCI_ENCRYPT, &hdev->flags);
449 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
452 struct hci_ev_status *rp = data;
456 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
458 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
462 param = *((__u8 *) sent);
467 hdev->discov_timeout = 0;
471 if (param & SCAN_INQUIRY)
472 set_bit(HCI_ISCAN, &hdev->flags);
474 clear_bit(HCI_ISCAN, &hdev->flags);
476 if (param & SCAN_PAGE)
477 set_bit(HCI_PSCAN, &hdev->flags);
479 clear_bit(HCI_PSCAN, &hdev->flags);
482 hci_dev_unlock(hdev);
487 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
490 struct hci_ev_status *rp = data;
491 struct hci_cp_set_event_filter *cp;
494 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
499 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
503 cp = (struct hci_cp_set_event_filter *)sent;
505 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
506 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
513 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
516 struct hci_rp_read_class_of_dev *rp = data;
519 return HCI_ERROR_UNSPECIFIED;
521 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
526 memcpy(hdev->dev_class, rp->dev_class, 3);
528 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
529 hdev->dev_class[1], hdev->dev_class[0]);
534 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
537 struct hci_ev_status *rp = data;
540 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
542 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
549 memcpy(hdev->dev_class, sent, 3);
551 if (hci_dev_test_flag(hdev, HCI_MGMT))
552 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
554 hci_dev_unlock(hdev);
559 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
562 struct hci_rp_read_voice_setting *rp = data;
565 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
570 setting = __le16_to_cpu(rp->voice_setting);
572 if (hdev->voice_setting == setting)
575 hdev->voice_setting = setting;
577 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
580 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
585 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
588 struct hci_ev_status *rp = data;
592 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
597 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
601 setting = get_unaligned_le16(sent);
603 if (hdev->voice_setting == setting)
606 hdev->voice_setting = setting;
608 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
611 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
616 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
619 struct hci_rp_read_num_supported_iac *rp = data;
621 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
626 hdev->num_iac = rp->num_iac;
628 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
633 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
636 struct hci_ev_status *rp = data;
637 struct hci_cp_write_ssp_mode *sent;
639 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
641 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
649 hdev->features[1][0] |= LMP_HOST_SSP;
651 hdev->features[1][0] &= ~LMP_HOST_SSP;
656 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
658 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
661 hci_dev_unlock(hdev);
666 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
669 struct hci_ev_status *rp = data;
670 struct hci_cp_write_sc_support *sent;
672 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
674 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
682 hdev->features[1][0] |= LMP_HOST_SC;
684 hdev->features[1][0] &= ~LMP_HOST_SC;
687 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
689 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
691 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
694 hci_dev_unlock(hdev);
699 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
702 struct hci_rp_read_local_version *rp = data;
704 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
709 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
710 hci_dev_test_flag(hdev, HCI_CONFIG)) {
711 hdev->hci_ver = rp->hci_ver;
712 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
713 hdev->lmp_ver = rp->lmp_ver;
714 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
715 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
721 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
724 struct hci_rp_read_enc_key_size *rp = data;
725 struct hci_conn *conn;
727 u8 status = rp->status;
729 bt_dev_dbg(hdev, "status 0x%2.2x", status);
731 handle = le16_to_cpu(rp->handle);
735 conn = hci_conn_hash_lookup_handle(hdev, handle);
741 /* While unexpected, the read_enc_key_size command may fail. The most
742 * secure approach is to then assume the key size is 0 to force a
746 bt_dev_err(hdev, "failed to read key size for handle %u",
748 conn->enc_key_size = 0;
750 conn->enc_key_size = rp->key_size;
753 if (conn->enc_key_size < hdev->min_enc_key_size) {
754 /* As slave role, the conn->state has been set to
755 * BT_CONNECTED and l2cap conn req might not be received
756 * yet, at this moment the l2cap layer almost does
757 * nothing with the non-zero status.
758 * So we also clear encrypt related bits, and then the
759 * handler of l2cap conn req will get the right secure
760 * state at a later time.
762 status = HCI_ERROR_AUTH_FAILURE;
763 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
764 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
768 hci_encrypt_cfm(conn, status);
771 hci_dev_unlock(hdev);
776 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
779 struct hci_rp_read_local_commands *rp = data;
781 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
786 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
787 hci_dev_test_flag(hdev, HCI_CONFIG))
788 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
793 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
796 struct hci_rp_read_auth_payload_to *rp = data;
797 struct hci_conn *conn;
799 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
806 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
808 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
810 hci_dev_unlock(hdev);
815 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
818 struct hci_rp_write_auth_payload_to *rp = data;
819 struct hci_conn *conn;
822 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
827 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
835 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
837 hci_dev_unlock(hdev);
842 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
845 struct hci_rp_read_local_features *rp = data;
847 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
852 memcpy(hdev->features, rp->features, 8);
854 /* Adjust default settings according to features
855 * supported by device. */
857 if (hdev->features[0][0] & LMP_3SLOT)
858 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
860 if (hdev->features[0][0] & LMP_5SLOT)
861 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
863 if (hdev->features[0][1] & LMP_HV2) {
864 hdev->pkt_type |= (HCI_HV2);
865 hdev->esco_type |= (ESCO_HV2);
868 if (hdev->features[0][1] & LMP_HV3) {
869 hdev->pkt_type |= (HCI_HV3);
870 hdev->esco_type |= (ESCO_HV3);
873 if (lmp_esco_capable(hdev))
874 hdev->esco_type |= (ESCO_EV3);
876 if (hdev->features[0][4] & LMP_EV4)
877 hdev->esco_type |= (ESCO_EV4);
879 if (hdev->features[0][4] & LMP_EV5)
880 hdev->esco_type |= (ESCO_EV5);
882 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
883 hdev->esco_type |= (ESCO_2EV3);
885 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
886 hdev->esco_type |= (ESCO_3EV3);
888 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
889 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
894 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
897 struct hci_rp_read_local_ext_features *rp = data;
899 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
904 if (hdev->max_page < rp->max_page) {
905 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
907 bt_dev_warn(hdev, "broken local ext features page 2");
909 hdev->max_page = rp->max_page;
912 if (rp->page < HCI_MAX_PAGES)
913 memcpy(hdev->features[rp->page], rp->features, 8);
918 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
921 struct hci_rp_read_flow_control_mode *rp = data;
923 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
928 hdev->flow_ctl_mode = rp->mode;
933 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
936 struct hci_rp_read_buffer_size *rp = data;
938 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
943 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
944 hdev->sco_mtu = rp->sco_mtu;
945 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
946 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
948 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
953 hdev->acl_cnt = hdev->acl_pkts;
954 hdev->sco_cnt = hdev->sco_pkts;
956 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
957 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
962 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
965 struct hci_rp_read_bd_addr *rp = data;
967 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
972 if (test_bit(HCI_INIT, &hdev->flags))
973 bacpy(&hdev->bdaddr, &rp->bdaddr);
975 if (hci_dev_test_flag(hdev, HCI_SETUP))
976 bacpy(&hdev->setup_addr, &rp->bdaddr);
981 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
984 struct hci_rp_read_local_pairing_opts *rp = data;
986 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
991 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
992 hci_dev_test_flag(hdev, HCI_CONFIG)) {
993 hdev->pairing_opts = rp->pairing_opts;
994 hdev->max_enc_key_size = rp->max_key_size;
1000 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1001 struct sk_buff *skb)
1003 struct hci_rp_read_page_scan_activity *rp = data;
1005 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1010 if (test_bit(HCI_INIT, &hdev->flags)) {
1011 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1012 hdev->page_scan_window = __le16_to_cpu(rp->window);
1018 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1019 struct sk_buff *skb)
1021 struct hci_ev_status *rp = data;
1022 struct hci_cp_write_page_scan_activity *sent;
1024 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1029 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1033 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1034 hdev->page_scan_window = __le16_to_cpu(sent->window);
1039 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1040 struct sk_buff *skb)
1042 struct hci_rp_read_page_scan_type *rp = data;
1044 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1049 if (test_bit(HCI_INIT, &hdev->flags))
1050 hdev->page_scan_type = rp->type;
1055 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1056 struct sk_buff *skb)
1058 struct hci_ev_status *rp = data;
1061 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1066 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1068 hdev->page_scan_type = *type;
1073 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1074 struct sk_buff *skb)
1076 struct hci_rp_read_data_block_size *rp = data;
1078 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1083 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1084 hdev->block_len = __le16_to_cpu(rp->block_len);
1085 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1087 hdev->block_cnt = hdev->num_blocks;
1089 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1090 hdev->block_cnt, hdev->block_len);
1095 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1096 struct sk_buff *skb)
1098 struct hci_rp_read_clock *rp = data;
1099 struct hci_cp_read_clock *cp;
1100 struct hci_conn *conn;
1102 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1109 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1113 if (cp->which == 0x00) {
1114 hdev->clock = le32_to_cpu(rp->clock);
1118 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1120 conn->clock = le32_to_cpu(rp->clock);
1121 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1125 hci_dev_unlock(hdev);
1129 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1130 struct sk_buff *skb)
1132 struct hci_rp_read_local_amp_info *rp = data;
1134 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1139 hdev->amp_status = rp->amp_status;
1140 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1141 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1142 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1143 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1144 hdev->amp_type = rp->amp_type;
1145 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1146 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1147 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1148 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1153 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1154 struct sk_buff *skb)
1156 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1158 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1163 hdev->inq_tx_power = rp->tx_power;
1168 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1169 struct sk_buff *skb)
1171 struct hci_rp_read_def_err_data_reporting *rp = data;
1173 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1178 hdev->err_data_reporting = rp->err_data_reporting;
1183 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1184 struct sk_buff *skb)
1186 struct hci_ev_status *rp = data;
1187 struct hci_cp_write_def_err_data_reporting *cp;
1189 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1198 hdev->err_data_reporting = cp->err_data_reporting;
1203 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1204 struct sk_buff *skb)
1206 struct hci_rp_pin_code_reply *rp = data;
1207 struct hci_cp_pin_code_reply *cp;
1208 struct hci_conn *conn;
1210 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1214 if (hci_dev_test_flag(hdev, HCI_MGMT))
1215 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1220 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1224 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1226 conn->pin_length = cp->pin_len;
1229 hci_dev_unlock(hdev);
1233 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1234 struct sk_buff *skb)
1236 struct hci_rp_pin_code_neg_reply *rp = data;
1238 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1242 if (hci_dev_test_flag(hdev, HCI_MGMT))
1243 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1246 hci_dev_unlock(hdev);
1251 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1252 struct sk_buff *skb)
1254 struct hci_rp_le_read_buffer_size *rp = data;
1256 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1261 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1262 hdev->le_pkts = rp->le_max_pkt;
1264 hdev->le_cnt = hdev->le_pkts;
1266 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1271 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1272 struct sk_buff *skb)
1274 struct hci_rp_le_read_local_features *rp = data;
1276 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1281 memcpy(hdev->le_features, rp->features, 8);
1286 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1287 struct sk_buff *skb)
1289 struct hci_rp_le_read_adv_tx_power *rp = data;
1291 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1296 hdev->adv_tx_power = rp->tx_power;
1301 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1302 struct sk_buff *skb)
1304 struct hci_rp_user_confirm_reply *rp = data;
1306 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1310 if (hci_dev_test_flag(hdev, HCI_MGMT))
1311 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1314 hci_dev_unlock(hdev);
1319 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1320 struct sk_buff *skb)
1322 struct hci_rp_user_confirm_reply *rp = data;
1324 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1328 if (hci_dev_test_flag(hdev, HCI_MGMT))
1329 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1330 ACL_LINK, 0, rp->status);
1332 hci_dev_unlock(hdev);
1337 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1338 struct sk_buff *skb)
1340 struct hci_rp_user_confirm_reply *rp = data;
1342 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1346 if (hci_dev_test_flag(hdev, HCI_MGMT))
1347 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1350 hci_dev_unlock(hdev);
1355 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1356 struct sk_buff *skb)
1358 struct hci_rp_user_confirm_reply *rp = data;
1360 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1364 if (hci_dev_test_flag(hdev, HCI_MGMT))
1365 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1366 ACL_LINK, 0, rp->status);
1368 hci_dev_unlock(hdev);
1373 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1374 struct sk_buff *skb)
1376 struct hci_rp_read_local_oob_data *rp = data;
1378 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1383 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1384 struct sk_buff *skb)
1386 struct hci_rp_read_local_oob_ext_data *rp = data;
1388 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1393 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1394 struct sk_buff *skb)
1396 struct hci_ev_status *rp = data;
1399 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1404 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1410 bacpy(&hdev->random_addr, sent);
1412 if (!bacmp(&hdev->rpa, sent)) {
1413 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1414 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1415 secs_to_jiffies(hdev->rpa_timeout));
1418 hci_dev_unlock(hdev);
1423 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1424 struct sk_buff *skb)
1426 struct hci_ev_status *rp = data;
1427 struct hci_cp_le_set_default_phy *cp;
1429 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1434 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1440 hdev->le_tx_def_phys = cp->tx_phys;
1441 hdev->le_rx_def_phys = cp->rx_phys;
1443 hci_dev_unlock(hdev);
1448 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1449 struct sk_buff *skb)
1451 struct hci_ev_status *rp = data;
1452 struct hci_cp_le_set_adv_set_rand_addr *cp;
1453 struct adv_info *adv;
1455 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1460 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1461 /* Update only in case the adv instance since handle 0x00 shall be using
1462 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1463 * non-extended adverting.
1465 if (!cp || !cp->handle)
1470 adv = hci_find_adv_instance(hdev, cp->handle);
1472 bacpy(&adv->random_addr, &cp->bdaddr);
1473 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1474 adv->rpa_expired = false;
1475 queue_delayed_work(hdev->workqueue,
1476 &adv->rpa_expired_cb,
1477 secs_to_jiffies(hdev->rpa_timeout));
1481 hci_dev_unlock(hdev);
1486 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1487 struct sk_buff *skb)
1489 struct hci_ev_status *rp = data;
1493 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1498 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1504 err = hci_remove_adv_instance(hdev, *instance);
1506 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1509 hci_dev_unlock(hdev);
1514 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1515 struct sk_buff *skb)
1517 struct hci_ev_status *rp = data;
1518 struct adv_info *adv, *n;
1521 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1526 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1531 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1532 u8 instance = adv->instance;
1534 err = hci_remove_adv_instance(hdev, instance);
1536 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1540 hci_dev_unlock(hdev);
1545 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1546 struct sk_buff *skb)
1548 struct hci_rp_le_read_transmit_power *rp = data;
1550 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1555 hdev->min_le_tx_power = rp->min_le_tx_power;
1556 hdev->max_le_tx_power = rp->max_le_tx_power;
1561 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1562 struct sk_buff *skb)
1564 struct hci_ev_status *rp = data;
1565 struct hci_cp_le_set_privacy_mode *cp;
1566 struct hci_conn_params *params;
1568 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1573 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1579 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1581 WRITE_ONCE(params->privacy_mode, cp->mode);
1583 hci_dev_unlock(hdev);
1588 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1589 struct sk_buff *skb)
1591 struct hci_ev_status *rp = data;
1594 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1599 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1605 /* If we're doing connection initiation as peripheral. Set a
1606 * timeout in case something goes wrong.
1609 struct hci_conn *conn;
1611 hci_dev_set_flag(hdev, HCI_LE_ADV);
1613 conn = hci_lookup_le_connect(hdev);
1615 queue_delayed_work(hdev->workqueue,
1616 &conn->le_conn_timeout,
1617 conn->conn_timeout);
1619 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1622 hci_dev_unlock(hdev);
1627 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1628 struct sk_buff *skb)
1630 struct hci_cp_le_set_ext_adv_enable *cp;
1631 struct hci_cp_ext_adv_set *set;
1632 struct adv_info *adv = NULL, *n;
1633 struct hci_ev_status *rp = data;
1635 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1640 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1644 set = (void *)cp->data;
1648 if (cp->num_of_sets)
1649 adv = hci_find_adv_instance(hdev, set->handle);
1652 struct hci_conn *conn;
1654 hci_dev_set_flag(hdev, HCI_LE_ADV);
1657 adv->enabled = true;
1659 conn = hci_lookup_le_connect(hdev);
1661 queue_delayed_work(hdev->workqueue,
1662 &conn->le_conn_timeout,
1663 conn->conn_timeout);
1665 if (cp->num_of_sets) {
1667 adv->enabled = false;
1669 /* If just one instance was disabled check if there are
1670 * any other instance enabled before clearing HCI_LE_ADV
1672 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1678 /* All instances shall be considered disabled */
1679 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1681 adv->enabled = false;
1684 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1688 hci_dev_unlock(hdev);
1692 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1693 struct sk_buff *skb)
1695 struct hci_cp_le_set_scan_param *cp;
1696 struct hci_ev_status *rp = data;
1698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1703 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1709 hdev->le_scan_type = cp->type;
1711 hci_dev_unlock(hdev);
1716 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1717 struct sk_buff *skb)
1719 struct hci_cp_le_set_ext_scan_params *cp;
1720 struct hci_ev_status *rp = data;
1721 struct hci_cp_le_scan_phy_params *phy_param;
1723 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1728 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1732 phy_param = (void *)cp->data;
1736 hdev->le_scan_type = phy_param->type;
1738 hci_dev_unlock(hdev);
1743 static bool has_pending_adv_report(struct hci_dev *hdev)
1745 struct discovery_state *d = &hdev->discovery;
1747 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1750 static void clear_pending_adv_report(struct hci_dev *hdev)
1752 struct discovery_state *d = &hdev->discovery;
1754 bacpy(&d->last_adv_addr, BDADDR_ANY);
1755 d->last_adv_data_len = 0;
1758 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1759 u8 bdaddr_type, s8 rssi, u32 flags,
1762 struct discovery_state *d = &hdev->discovery;
1764 if (len > max_adv_len(hdev))
1767 bacpy(&d->last_adv_addr, bdaddr);
1768 d->last_adv_addr_type = bdaddr_type;
1769 d->last_adv_rssi = rssi;
1770 d->last_adv_flags = flags;
1771 memcpy(d->last_adv_data, data, len);
1772 d->last_adv_data_len = len;
1775 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1780 case LE_SCAN_ENABLE:
1781 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1782 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1783 clear_pending_adv_report(hdev);
1784 if (hci_dev_test_flag(hdev, HCI_MESH))
1785 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1788 case LE_SCAN_DISABLE:
1789 /* We do this here instead of when setting DISCOVERY_STOPPED
1790 * since the latter would potentially require waiting for
1791 * inquiry to stop too.
1793 if (has_pending_adv_report(hdev)) {
1794 struct discovery_state *d = &hdev->discovery;
1796 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1797 d->last_adv_addr_type, NULL,
1798 d->last_adv_rssi, d->last_adv_flags,
1800 d->last_adv_data_len, NULL, 0, 0);
1803 /* Cancel this timer so that we don't try to disable scanning
1804 * when it's already disabled.
1806 cancel_delayed_work(&hdev->le_scan_disable);
1808 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1810 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1811 * interrupted scanning due to a connect request. Mark
1812 * therefore discovery as stopped.
1814 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1815 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1816 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1817 hdev->discovery.state == DISCOVERY_FINDING)
1818 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1823 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1828 hci_dev_unlock(hdev);
1831 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1832 struct sk_buff *skb)
1834 struct hci_cp_le_set_scan_enable *cp;
1835 struct hci_ev_status *rp = data;
1837 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1842 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1846 le_set_scan_enable_complete(hdev, cp->enable);
1851 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1852 struct sk_buff *skb)
1854 struct hci_cp_le_set_ext_scan_enable *cp;
1855 struct hci_ev_status *rp = data;
1857 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1862 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1866 le_set_scan_enable_complete(hdev, cp->enable);
1871 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1872 struct sk_buff *skb)
1874 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1876 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1882 hdev->le_num_of_adv_sets = rp->num_of_sets;
1887 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1888 struct sk_buff *skb)
1890 struct hci_rp_le_read_accept_list_size *rp = data;
1892 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1897 hdev->le_accept_list_size = rp->size;
1902 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1903 struct sk_buff *skb)
1905 struct hci_ev_status *rp = data;
1907 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1913 hci_bdaddr_list_clear(&hdev->le_accept_list);
1914 hci_dev_unlock(hdev);
1919 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1920 struct sk_buff *skb)
1922 struct hci_cp_le_add_to_accept_list *sent;
1923 struct hci_ev_status *rp = data;
1925 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1930 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1935 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1937 hci_dev_unlock(hdev);
1942 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1943 struct sk_buff *skb)
1945 struct hci_cp_le_del_from_accept_list *sent;
1946 struct hci_ev_status *rp = data;
1948 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1953 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1958 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1960 hci_dev_unlock(hdev);
1965 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1966 struct sk_buff *skb)
1968 struct hci_rp_le_read_supported_states *rp = data;
1970 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1975 memcpy(hdev->le_states, rp->le_states, 8);
1980 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1981 struct sk_buff *skb)
1983 struct hci_rp_le_read_def_data_len *rp = data;
1985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1991 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1996 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1997 struct sk_buff *skb)
1999 struct hci_cp_le_write_def_data_len *sent;
2000 struct hci_ev_status *rp = data;
2002 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2007 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2011 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2012 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2017 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2018 struct sk_buff *skb)
2020 struct hci_cp_le_add_to_resolv_list *sent;
2021 struct hci_ev_status *rp = data;
2023 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2028 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2033 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2034 sent->bdaddr_type, sent->peer_irk,
2036 hci_dev_unlock(hdev);
2041 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2042 struct sk_buff *skb)
2044 struct hci_cp_le_del_from_resolv_list *sent;
2045 struct hci_ev_status *rp = data;
2047 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2052 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2057 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2059 hci_dev_unlock(hdev);
2064 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2065 struct sk_buff *skb)
2067 struct hci_ev_status *rp = data;
2069 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2075 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2076 hci_dev_unlock(hdev);
2081 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2082 struct sk_buff *skb)
2084 struct hci_rp_le_read_resolv_list_size *rp = data;
2086 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2091 hdev->le_resolv_list_size = rp->size;
2096 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2097 struct sk_buff *skb)
2099 struct hci_ev_status *rp = data;
2102 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2107 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2114 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2116 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2118 hci_dev_unlock(hdev);
2123 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2124 struct sk_buff *skb)
2126 struct hci_rp_le_read_max_data_len *rp = data;
2128 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2133 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2134 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2135 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2136 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2141 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2142 struct sk_buff *skb)
2144 struct hci_cp_write_le_host_supported *sent;
2145 struct hci_ev_status *rp = data;
2147 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2152 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2159 hdev->features[1][0] |= LMP_HOST_LE;
2160 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2162 hdev->features[1][0] &= ~LMP_HOST_LE;
2163 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2164 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2168 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2170 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2172 hci_dev_unlock(hdev);
2177 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2178 struct sk_buff *skb)
2180 struct hci_cp_le_set_adv_param *cp;
2181 struct hci_ev_status *rp = data;
2183 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2188 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2193 hdev->adv_addr_type = cp->own_address_type;
2194 hci_dev_unlock(hdev);
2199 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2200 struct sk_buff *skb)
2202 struct hci_rp_le_set_ext_adv_params *rp = data;
2203 struct hci_cp_le_set_ext_adv_params *cp;
2204 struct adv_info *adv_instance;
2206 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2211 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2216 hdev->adv_addr_type = cp->own_addr_type;
2218 /* Store in hdev for instance 0 */
2219 hdev->adv_tx_power = rp->tx_power;
2221 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2223 adv_instance->tx_power = rp->tx_power;
2225 /* Update adv data as tx power is known now */
2226 hci_update_adv_data(hdev, cp->handle);
2228 hci_dev_unlock(hdev);
2233 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2234 struct sk_buff *skb)
2236 struct hci_rp_read_rssi *rp = data;
2237 struct hci_conn *conn;
2239 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2246 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2248 conn->rssi = rp->rssi;
2250 hci_dev_unlock(hdev);
2255 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2256 struct sk_buff *skb)
2258 struct hci_cp_read_tx_power *sent;
2259 struct hci_rp_read_tx_power *rp = data;
2260 struct hci_conn *conn;
2262 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2267 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2273 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2277 switch (sent->type) {
2279 conn->tx_power = rp->tx_power;
2282 conn->max_tx_power = rp->tx_power;
2287 hci_dev_unlock(hdev);
2291 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2292 struct sk_buff *skb)
2294 struct hci_ev_status *rp = data;
2297 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2302 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2304 hdev->ssp_debug_mode = *mode;
2309 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2311 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2314 hci_conn_check_pending(hdev);
2318 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2319 set_bit(HCI_INQUIRY, &hdev->flags);
2322 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2324 struct hci_cp_create_conn *cp;
2325 struct hci_conn *conn;
2327 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2329 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2335 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2337 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2340 if (conn && conn->state == BT_CONNECT) {
2341 if (status != 0x0c || conn->attempt > 2) {
2342 conn->state = BT_CLOSED;
2343 hci_connect_cfm(conn, status);
2346 conn->state = BT_CONNECT2;
2350 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2353 bt_dev_err(hdev, "no memory for new connection");
2357 hci_dev_unlock(hdev);
2360 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2362 struct hci_cp_add_sco *cp;
2363 struct hci_conn *acl, *sco;
2366 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2371 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2375 handle = __le16_to_cpu(cp->handle);
2377 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2381 acl = hci_conn_hash_lookup_handle(hdev, handle);
2385 sco->state = BT_CLOSED;
2387 hci_connect_cfm(sco, status);
2392 hci_dev_unlock(hdev);
2395 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2397 struct hci_cp_auth_requested *cp;
2398 struct hci_conn *conn;
2400 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2405 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2411 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2413 if (conn->state == BT_CONFIG) {
2414 hci_connect_cfm(conn, status);
2415 hci_conn_drop(conn);
2419 hci_dev_unlock(hdev);
2422 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2424 struct hci_cp_set_conn_encrypt *cp;
2425 struct hci_conn *conn;
2427 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2432 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2440 if (conn->state == BT_CONFIG) {
2441 hci_connect_cfm(conn, status);
2442 hci_conn_drop(conn);
2446 hci_dev_unlock(hdev);
2449 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2450 struct hci_conn *conn)
2452 if (conn->state != BT_CONFIG || !conn->out)
2455 if (conn->pending_sec_level == BT_SECURITY_SDP)
2458 /* Only request authentication for SSP connections or non-SSP
2459 * devices with sec_level MEDIUM or HIGH or if MITM protection
2462 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2463 conn->pending_sec_level != BT_SECURITY_FIPS &&
2464 conn->pending_sec_level != BT_SECURITY_HIGH &&
2465 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2471 static int hci_resolve_name(struct hci_dev *hdev,
2472 struct inquiry_entry *e)
2474 struct hci_cp_remote_name_req cp;
2476 memset(&cp, 0, sizeof(cp));
2478 bacpy(&cp.bdaddr, &e->data.bdaddr);
2479 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2480 cp.pscan_mode = e->data.pscan_mode;
2481 cp.clock_offset = e->data.clock_offset;
2483 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2486 static bool hci_resolve_next_name(struct hci_dev *hdev)
2488 struct discovery_state *discov = &hdev->discovery;
2489 struct inquiry_entry *e;
2491 if (list_empty(&discov->resolve))
2494 /* We should stop if we already spent too much time resolving names. */
2495 if (time_after(jiffies, discov->name_resolve_timeout)) {
2496 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2500 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2504 if (hci_resolve_name(hdev, e) == 0) {
2505 e->name_state = NAME_PENDING;
2512 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2513 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2515 struct discovery_state *discov = &hdev->discovery;
2516 struct inquiry_entry *e;
2518 /* Update the mgmt connected state if necessary. Be careful with
2519 * conn objects that exist but are not (yet) connected however.
2520 * Only those in BT_CONFIG or BT_CONNECTED states can be
2521 * considered connected.
2524 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2525 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2526 mgmt_device_connected(hdev, conn, name, name_len);
2528 if (discov->state == DISCOVERY_STOPPED)
2531 if (discov->state == DISCOVERY_STOPPING)
2532 goto discov_complete;
2534 if (discov->state != DISCOVERY_RESOLVING)
2537 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2538 /* If the device was not found in a list of found devices names of which
2539 * are pending. there is no need to continue resolving a next name as it
2540 * will be done upon receiving another Remote Name Request Complete
2547 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2548 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2551 if (hci_resolve_next_name(hdev))
2555 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2558 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2560 struct hci_cp_remote_name_req *cp;
2561 struct hci_conn *conn;
2563 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2565 /* If successful wait for the name req complete event before
2566 * checking for the need to do authentication */
2570 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2576 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2578 if (hci_dev_test_flag(hdev, HCI_MGMT))
2579 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2584 if (!hci_outgoing_auth_needed(hdev, conn))
2587 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2588 struct hci_cp_auth_requested auth_cp;
2590 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2592 auth_cp.handle = __cpu_to_le16(conn->handle);
2593 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2594 sizeof(auth_cp), &auth_cp);
2598 hci_dev_unlock(hdev);
2601 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2603 struct hci_cp_read_remote_features *cp;
2604 struct hci_conn *conn;
2606 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2611 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2617 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2619 if (conn->state == BT_CONFIG) {
2620 hci_connect_cfm(conn, status);
2621 hci_conn_drop(conn);
2625 hci_dev_unlock(hdev);
2628 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2630 struct hci_cp_read_remote_ext_features *cp;
2631 struct hci_conn *conn;
2633 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2638 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2644 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2646 if (conn->state == BT_CONFIG) {
2647 hci_connect_cfm(conn, status);
2648 hci_conn_drop(conn);
2652 hci_dev_unlock(hdev);
2655 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2657 struct hci_cp_setup_sync_conn *cp;
2658 struct hci_conn *acl, *sco;
2661 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2666 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2670 handle = __le16_to_cpu(cp->handle);
2672 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2676 acl = hci_conn_hash_lookup_handle(hdev, handle);
2680 sco->state = BT_CLOSED;
2682 hci_connect_cfm(sco, status);
2687 hci_dev_unlock(hdev);
2690 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2692 struct hci_cp_enhanced_setup_sync_conn *cp;
2693 struct hci_conn *acl, *sco;
2696 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2701 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2705 handle = __le16_to_cpu(cp->handle);
2707 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2711 acl = hci_conn_hash_lookup_handle(hdev, handle);
2715 sco->state = BT_CLOSED;
2717 hci_connect_cfm(sco, status);
2722 hci_dev_unlock(hdev);
2725 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2727 struct hci_cp_sniff_mode *cp;
2728 struct hci_conn *conn;
2730 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2735 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2741 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2743 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2745 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2746 hci_sco_setup(conn, status);
2749 hci_dev_unlock(hdev);
2752 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2754 struct hci_cp_exit_sniff_mode *cp;
2755 struct hci_conn *conn;
2757 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2762 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2770 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2772 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2773 hci_sco_setup(conn, status);
2776 hci_dev_unlock(hdev);
2779 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2781 struct hci_cp_disconnect *cp;
2782 struct hci_conn_params *params;
2783 struct hci_conn *conn;
2786 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2788 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2789 * otherwise cleanup the connection immediately.
2791 if (!status && !hdev->suspended)
2794 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2800 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2805 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2806 conn->dst_type, status);
2808 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2809 hdev->cur_adv_instance = conn->adv_instance;
2810 hci_enable_advertising(hdev);
2813 /* Inform sockets conn is gone before we delete it */
2814 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2819 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2821 if (conn->type == ACL_LINK) {
2822 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2823 hci_remove_link_key(hdev, &conn->dst);
2826 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2828 switch (params->auto_connect) {
2829 case HCI_AUTO_CONN_LINK_LOSS:
2830 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2834 case HCI_AUTO_CONN_DIRECT:
2835 case HCI_AUTO_CONN_ALWAYS:
2836 hci_pend_le_list_del_init(params);
2837 hci_pend_le_list_add(params, &hdev->pend_le_conns);
2845 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2846 cp->reason, mgmt_conn);
2848 hci_disconn_cfm(conn, cp->reason);
2851 /* If the disconnection failed for any reason, the upper layer
2852 * does not retry to disconnect in current implementation.
2853 * Hence, we need to do some basic cleanup here and re-enable
2854 * advertising if necessary.
2858 hci_dev_unlock(hdev);
2861 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2863 /* When using controller based address resolution, then the new
2864 * address types 0x02 and 0x03 are used. These types need to be
2865 * converted back into either public address or random address type
2868 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2871 return ADDR_LE_DEV_PUBLIC;
2872 case ADDR_LE_DEV_RANDOM_RESOLVED:
2875 return ADDR_LE_DEV_RANDOM;
2883 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2884 u8 peer_addr_type, u8 own_address_type,
2887 struct hci_conn *conn;
2889 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2894 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2896 /* Store the initiator and responder address information which
2897 * is needed for SMP. These values will not change during the
2898 * lifetime of the connection.
2900 conn->init_addr_type = own_address_type;
2901 if (own_address_type == ADDR_LE_DEV_RANDOM)
2902 bacpy(&conn->init_addr, &hdev->random_addr);
2904 bacpy(&conn->init_addr, &hdev->bdaddr);
2906 conn->resp_addr_type = peer_addr_type;
2907 bacpy(&conn->resp_addr, peer_addr);
2910 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2912 struct hci_cp_le_create_conn *cp;
2914 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2916 /* All connection failure handling is taken care of by the
2917 * hci_conn_failed function which is triggered by the HCI
2918 * request completion callbacks used for connecting.
2923 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2929 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2930 cp->own_address_type, cp->filter_policy);
2932 hci_dev_unlock(hdev);
2935 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2937 struct hci_cp_le_ext_create_conn *cp;
2939 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2941 /* All connection failure handling is taken care of by the
2942 * hci_conn_failed function which is triggered by the HCI
2943 * request completion callbacks used for connecting.
2948 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2954 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2955 cp->own_addr_type, cp->filter_policy);
2957 hci_dev_unlock(hdev);
2960 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2962 struct hci_cp_le_read_remote_features *cp;
2963 struct hci_conn *conn;
2965 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2970 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2976 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2978 if (conn->state == BT_CONFIG) {
2979 hci_connect_cfm(conn, status);
2980 hci_conn_drop(conn);
2984 hci_dev_unlock(hdev);
2987 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2989 struct hci_cp_le_start_enc *cp;
2990 struct hci_conn *conn;
2992 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2999 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3003 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3007 if (conn->state != BT_CONNECTED)
3010 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3011 hci_conn_drop(conn);
3014 hci_dev_unlock(hdev);
3017 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3019 struct hci_cp_switch_role *cp;
3020 struct hci_conn *conn;
3022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
3027 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3033 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3035 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3037 hci_dev_unlock(hdev);
3040 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3041 struct sk_buff *skb)
3043 struct hci_ev_status *ev = data;
3044 struct discovery_state *discov = &hdev->discovery;
3045 struct inquiry_entry *e;
3047 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3049 hci_conn_check_pending(hdev);
3051 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3054 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3055 wake_up_bit(&hdev->flags, HCI_INQUIRY);
3057 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3062 if (discov->state != DISCOVERY_FINDING)
3065 if (list_empty(&discov->resolve)) {
3066 /* When BR/EDR inquiry is active and no LE scanning is in
3067 * progress, then change discovery state to indicate completion.
3069 * When running LE scanning and BR/EDR inquiry simultaneously
3070 * and the LE scan already finished, then change the discovery
3071 * state to indicate completion.
3073 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3074 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3075 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3079 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3080 if (e && hci_resolve_name(hdev, e) == 0) {
3081 e->name_state = NAME_PENDING;
3082 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3083 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3085 /* When BR/EDR inquiry is active and no LE scanning is in
3086 * progress, then change discovery state to indicate completion.
3088 * When running LE scanning and BR/EDR inquiry simultaneously
3089 * and the LE scan already finished, then change the discovery
3090 * state to indicate completion.
3092 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3093 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3094 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3098 hci_dev_unlock(hdev);
3101 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3102 struct sk_buff *skb)
3104 struct hci_ev_inquiry_result *ev = edata;
3105 struct inquiry_data data;
3108 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3109 flex_array_size(ev, info, ev->num)))
3112 bt_dev_dbg(hdev, "num %d", ev->num);
3117 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3122 for (i = 0; i < ev->num; i++) {
3123 struct inquiry_info *info = &ev->info[i];
3126 bacpy(&data.bdaddr, &info->bdaddr);
3127 data.pscan_rep_mode = info->pscan_rep_mode;
3128 data.pscan_period_mode = info->pscan_period_mode;
3129 data.pscan_mode = info->pscan_mode;
3130 memcpy(data.dev_class, info->dev_class, 3);
3131 data.clock_offset = info->clock_offset;
3132 data.rssi = HCI_RSSI_INVALID;
3133 data.ssp_mode = 0x00;
3135 flags = hci_inquiry_cache_update(hdev, &data, false);
3137 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3138 info->dev_class, HCI_RSSI_INVALID,
3139 flags, NULL, 0, NULL, 0, 0);
3142 hci_dev_unlock(hdev);
3145 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3146 struct sk_buff *skb)
3148 struct hci_ev_conn_complete *ev = data;
3149 struct hci_conn *conn;
3150 u8 status = ev->status;
3152 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3156 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3158 /* In case of error status and there is no connection pending
3159 * just unlock as there is nothing to cleanup.
3164 /* Connection may not exist if auto-connected. Check the bredr
3165 * allowlist to see if this device is allowed to auto connect.
3166 * If link is an ACL type, create a connection class
3169 * Auto-connect will only occur if the event filter is
3170 * programmed with a given address. Right now, event filter is
3171 * only used during suspend.
3173 if (ev->link_type == ACL_LINK &&
3174 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3177 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3180 bt_dev_err(hdev, "no memory for new conn");
3184 if (ev->link_type != SCO_LINK)
3187 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3192 conn->type = SCO_LINK;
3196 /* The HCI_Connection_Complete event is only sent once per connection.
3197 * Processing it more than once per connection can corrupt kernel memory.
3199 * As the connection handle is set here for the first time, it indicates
3200 * whether the connection is already set up.
3202 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3203 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3208 conn->handle = __le16_to_cpu(ev->handle);
3209 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3210 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3211 conn->handle, HCI_CONN_HANDLE_MAX);
3212 status = HCI_ERROR_INVALID_PARAMETERS;
3216 if (conn->type == ACL_LINK) {
3217 conn->state = BT_CONFIG;
3218 hci_conn_hold(conn);
3220 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3221 !hci_find_link_key(hdev, &ev->bdaddr))
3222 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3224 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3226 conn->state = BT_CONNECTED;
3228 hci_debugfs_create_conn(conn);
3229 hci_conn_add_sysfs(conn);
3231 if (test_bit(HCI_AUTH, &hdev->flags))
3232 set_bit(HCI_CONN_AUTH, &conn->flags);
3234 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3235 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3237 /* "Link key request" completed ahead of "connect request" completes */
3238 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3239 ev->link_type == ACL_LINK) {
3240 struct link_key *key;
3241 struct hci_cp_read_enc_key_size cp;
3243 key = hci_find_link_key(hdev, &ev->bdaddr);
3245 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3247 if (!(hdev->commands[20] & 0x10)) {
3248 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3250 cp.handle = cpu_to_le16(conn->handle);
3251 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3253 bt_dev_err(hdev, "sending read key size failed");
3254 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3258 hci_encrypt_cfm(conn, ev->status);
3262 /* Get remote features */
3263 if (conn->type == ACL_LINK) {
3264 struct hci_cp_read_remote_features cp;
3265 cp.handle = ev->handle;
3266 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3269 hci_update_scan(hdev);
3272 /* Set packet type for incoming connection */
3273 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3274 struct hci_cp_change_conn_ptype cp;
3275 cp.handle = ev->handle;
3276 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3277 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3282 if (conn->type == ACL_LINK)
3283 hci_sco_setup(conn, ev->status);
3287 hci_conn_failed(conn, status);
3288 } else if (ev->link_type == SCO_LINK) {
3289 switch (conn->setting & SCO_AIRMODE_MASK) {
3290 case SCO_AIRMODE_CVSD:
3292 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3296 hci_connect_cfm(conn, status);
3300 hci_dev_unlock(hdev);
3302 hci_conn_check_pending(hdev);
3305 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3307 struct hci_cp_reject_conn_req cp;
3309 bacpy(&cp.bdaddr, bdaddr);
3310 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3311 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3314 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3315 struct sk_buff *skb)
3317 struct hci_ev_conn_request *ev = data;
3318 int mask = hdev->link_mode;
3319 struct inquiry_entry *ie;
3320 struct hci_conn *conn;
3323 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3325 /* Reject incoming connection from device with same BD ADDR against
3328 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3329 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3331 hci_reject_conn(hdev, &ev->bdaddr);
3335 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3338 if (!(mask & HCI_LM_ACCEPT)) {
3339 hci_reject_conn(hdev, &ev->bdaddr);
3345 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3347 hci_reject_conn(hdev, &ev->bdaddr);
3351 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3352 * connection. These features are only touched through mgmt so
3353 * only do the checks if HCI_MGMT is set.
3355 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3356 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3357 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3359 hci_reject_conn(hdev, &ev->bdaddr);
3363 /* Connection accepted */
3365 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3367 memcpy(ie->data.dev_class, ev->dev_class, 3);
3369 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3372 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3375 bt_dev_err(hdev, "no memory for new connection");
3380 memcpy(conn->dev_class, ev->dev_class, 3);
3382 hci_dev_unlock(hdev);
3384 if (ev->link_type == ACL_LINK ||
3385 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3386 struct hci_cp_accept_conn_req cp;
3387 conn->state = BT_CONNECT;
3389 bacpy(&cp.bdaddr, &ev->bdaddr);
3391 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3392 cp.role = 0x00; /* Become central */
3394 cp.role = 0x01; /* Remain peripheral */
3396 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3397 } else if (!(flags & HCI_PROTO_DEFER)) {
3398 struct hci_cp_accept_sync_conn_req cp;
3399 conn->state = BT_CONNECT;
3401 bacpy(&cp.bdaddr, &ev->bdaddr);
3402 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3404 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3405 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3406 cp.max_latency = cpu_to_le16(0xffff);
3407 cp.content_format = cpu_to_le16(hdev->voice_setting);
3408 cp.retrans_effort = 0xff;
3410 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3413 conn->state = BT_CONNECT2;
3414 hci_connect_cfm(conn, 0);
3419 hci_dev_unlock(hdev);
3422 static u8 hci_to_mgmt_reason(u8 err)
3425 case HCI_ERROR_CONNECTION_TIMEOUT:
3426 return MGMT_DEV_DISCONN_TIMEOUT;
3427 case HCI_ERROR_REMOTE_USER_TERM:
3428 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3429 case HCI_ERROR_REMOTE_POWER_OFF:
3430 return MGMT_DEV_DISCONN_REMOTE;
3431 case HCI_ERROR_LOCAL_HOST_TERM:
3432 return MGMT_DEV_DISCONN_LOCAL_HOST;
3434 return MGMT_DEV_DISCONN_UNKNOWN;
3438 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3439 struct sk_buff *skb)
3441 struct hci_ev_disconn_complete *ev = data;
3443 struct hci_conn_params *params;
3444 struct hci_conn *conn;
3445 bool mgmt_connected;
3447 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3456 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3457 conn->dst_type, ev->status);
3461 conn->state = BT_CLOSED;
3463 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3465 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3466 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3468 reason = hci_to_mgmt_reason(ev->reason);
3470 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3471 reason, mgmt_connected);
3473 if (conn->type == ACL_LINK) {
3474 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3475 hci_remove_link_key(hdev, &conn->dst);
3477 hci_update_scan(hdev);
3480 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3482 switch (params->auto_connect) {
3483 case HCI_AUTO_CONN_LINK_LOSS:
3484 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3488 case HCI_AUTO_CONN_DIRECT:
3489 case HCI_AUTO_CONN_ALWAYS:
3490 hci_pend_le_list_del_init(params);
3491 hci_pend_le_list_add(params, &hdev->pend_le_conns);
3492 hci_update_passive_scan(hdev);
3500 hci_disconn_cfm(conn, ev->reason);
3502 /* Re-enable advertising if necessary, since it might
3503 * have been disabled by the connection. From the
3504 * HCI_LE_Set_Advertise_Enable command description in
3505 * the core specification (v4.0):
3506 * "The Controller shall continue advertising until the Host
3507 * issues an LE_Set_Advertise_Enable command with
3508 * Advertising_Enable set to 0x00 (Advertising is disabled)
3509 * or until a connection is created or until the Advertising
3510 * is timed out due to Directed Advertising."
3512 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3513 hdev->cur_adv_instance = conn->adv_instance;
3514 hci_enable_advertising(hdev);
3520 hci_dev_unlock(hdev);
3523 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3524 struct sk_buff *skb)
3526 struct hci_ev_auth_complete *ev = data;
3527 struct hci_conn *conn;
3529 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3533 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3538 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3539 set_bit(HCI_CONN_AUTH, &conn->flags);
3540 conn->sec_level = conn->pending_sec_level;
3542 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3543 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3545 mgmt_auth_failed(conn, ev->status);
3548 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3550 if (conn->state == BT_CONFIG) {
3551 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3552 struct hci_cp_set_conn_encrypt cp;
3553 cp.handle = ev->handle;
3555 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3558 conn->state = BT_CONNECTED;
3559 hci_connect_cfm(conn, ev->status);
3560 hci_conn_drop(conn);
3563 hci_auth_cfm(conn, ev->status);
3565 hci_conn_hold(conn);
3566 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3567 hci_conn_drop(conn);
3570 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3572 struct hci_cp_set_conn_encrypt cp;
3573 cp.handle = ev->handle;
3575 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3578 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3579 hci_encrypt_cfm(conn, ev->status);
3584 hci_dev_unlock(hdev);
3587 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3588 struct sk_buff *skb)
3590 struct hci_ev_remote_name *ev = data;
3591 struct hci_conn *conn;
3593 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3599 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3602 if (ev->status == 0)
3603 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3604 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3606 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3612 if (!hci_outgoing_auth_needed(hdev, conn))
3615 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3616 struct hci_cp_auth_requested cp;
3618 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3620 cp.handle = __cpu_to_le16(conn->handle);
3621 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3625 hci_dev_unlock(hdev);
3628 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3629 struct sk_buff *skb)
3631 struct hci_ev_encrypt_change *ev = data;
3632 struct hci_conn *conn;
3634 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3638 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3644 /* Encryption implies authentication */
3645 set_bit(HCI_CONN_AUTH, &conn->flags);
3646 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3647 conn->sec_level = conn->pending_sec_level;
3649 /* P-256 authentication key implies FIPS */
3650 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3651 set_bit(HCI_CONN_FIPS, &conn->flags);
3653 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3654 conn->type == LE_LINK)
3655 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3657 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3658 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3662 /* We should disregard the current RPA and generate a new one
3663 * whenever the encryption procedure fails.
3665 if (ev->status && conn->type == LE_LINK) {
3666 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3667 hci_adv_instances_set_rpa_expired(hdev, true);
3670 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3672 /* Check link security requirements are met */
3673 if (!hci_conn_check_link_mode(conn))
3674 ev->status = HCI_ERROR_AUTH_FAILURE;
3676 if (ev->status && conn->state == BT_CONNECTED) {
3677 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3678 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3680 /* Notify upper layers so they can cleanup before
3683 hci_encrypt_cfm(conn, ev->status);
3684 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3685 hci_conn_drop(conn);
3689 /* Try reading the encryption key size for encrypted ACL links */
3690 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3691 struct hci_cp_read_enc_key_size cp;
3693 /* Only send HCI_Read_Encryption_Key_Size if the
3694 * controller really supports it. If it doesn't, assume
3695 * the default size (16).
3697 if (!(hdev->commands[20] & 0x10)) {
3698 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3702 cp.handle = cpu_to_le16(conn->handle);
3703 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3705 bt_dev_err(hdev, "sending read key size failed");
3706 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3713 /* Set the default Authenticated Payload Timeout after
3714 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3715 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3716 * sent when the link is active and Encryption is enabled, the conn
3717 * type can be either LE or ACL and controller must support LMP Ping.
3718 * Ensure for AES-CCM encryption as well.
3720 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3721 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3722 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3723 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3724 struct hci_cp_write_auth_payload_to cp;
3726 cp.handle = cpu_to_le16(conn->handle);
3727 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3728 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3733 hci_encrypt_cfm(conn, ev->status);
3736 hci_dev_unlock(hdev);
3739 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3740 struct sk_buff *skb)
3742 struct hci_ev_change_link_key_complete *ev = data;
3743 struct hci_conn *conn;
3745 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3749 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3752 set_bit(HCI_CONN_SECURE, &conn->flags);
3754 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3756 hci_key_change_cfm(conn, ev->status);
3759 hci_dev_unlock(hdev);
3762 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3763 struct sk_buff *skb)
3765 struct hci_ev_remote_features *ev = data;
3766 struct hci_conn *conn;
3768 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3777 memcpy(conn->features[0], ev->features, 8);
3779 if (conn->state != BT_CONFIG)
3782 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3783 lmp_ext_feat_capable(conn)) {
3784 struct hci_cp_read_remote_ext_features cp;
3785 cp.handle = ev->handle;
3787 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3792 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3793 struct hci_cp_remote_name_req cp;
3794 memset(&cp, 0, sizeof(cp));
3795 bacpy(&cp.bdaddr, &conn->dst);
3796 cp.pscan_rep_mode = 0x02;
3797 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3798 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3799 mgmt_device_connected(hdev, conn, NULL, 0);
3801 if (!hci_outgoing_auth_needed(hdev, conn)) {
3802 conn->state = BT_CONNECTED;
3803 hci_connect_cfm(conn, ev->status);
3804 hci_conn_drop(conn);
3808 hci_dev_unlock(hdev);
3811 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3813 cancel_delayed_work(&hdev->cmd_timer);
3816 if (!test_bit(HCI_RESET, &hdev->flags)) {
3818 cancel_delayed_work(&hdev->ncmd_timer);
3819 atomic_set(&hdev->cmd_cnt, 1);
3821 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3822 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3829 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3830 struct sk_buff *skb)
3832 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3834 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3839 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3840 hdev->le_pkts = rp->acl_max_pkt;
3841 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3842 hdev->iso_pkts = rp->iso_max_pkt;
3844 hdev->le_cnt = hdev->le_pkts;
3845 hdev->iso_cnt = hdev->iso_pkts;
3847 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3848 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3853 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3854 struct sk_buff *skb)
3856 struct hci_rp_le_set_cig_params *rp = data;
3857 struct hci_conn *conn;
3860 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3865 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3866 conn->state = BT_CLOSED;
3867 hci_connect_cfm(conn, rp->status);
3875 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3876 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3877 conn->state == BT_CONNECTED)
3880 conn->handle = __le16_to_cpu(rp->handle[i++]);
3882 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3883 conn->handle, conn->link);
3885 /* Create CIS if LE is already connected */
3886 if (conn->link && conn->link->state == BT_CONNECTED) {
3888 hci_le_create_cis(conn->link);
3892 if (i == rp->num_handles)
3899 hci_dev_unlock(hdev);
3904 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3905 struct sk_buff *skb)
3907 struct hci_rp_le_setup_iso_path *rp = data;
3908 struct hci_cp_le_setup_iso_path *cp;
3909 struct hci_conn *conn;
3911 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3913 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3919 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3924 hci_connect_cfm(conn, rp->status);
3929 switch (cp->direction) {
3930 /* Input (Host to Controller) */
3932 /* Only confirm connection if output only */
3933 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3934 hci_connect_cfm(conn, rp->status);
3936 /* Output (Controller to Host) */
3938 /* Confirm connection since conn->iso_qos is always configured
3941 hci_connect_cfm(conn, rp->status);
3946 hci_dev_unlock(hdev);
3950 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3952 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3955 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3956 struct sk_buff *skb)
3958 struct hci_ev_status *rp = data;
3959 struct hci_cp_le_set_per_adv_params *cp;
3961 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3966 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3970 /* TODO: set the conn state */
3974 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3975 struct sk_buff *skb)
3977 struct hci_ev_status *rp = data;
3980 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3985 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3992 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3994 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3996 hci_dev_unlock(hdev);
4001 #define HCI_CC_VL(_op, _func, _min, _max) \
4009 #define HCI_CC(_op, _func, _len) \
4010 HCI_CC_VL(_op, _func, _len, _len)
4012 #define HCI_CC_STATUS(_op, _func) \
4013 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4015 static const struct hci_cc {
4017 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4020 } hci_cc_table[] = {
4021 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4022 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4023 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4024 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4025 hci_cc_remote_name_req_cancel),
4026 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4027 sizeof(struct hci_rp_role_discovery)),
4028 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4029 sizeof(struct hci_rp_read_link_policy)),
4030 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4031 sizeof(struct hci_rp_write_link_policy)),
4032 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4033 sizeof(struct hci_rp_read_def_link_policy)),
4034 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4035 hci_cc_write_def_link_policy),
4036 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4037 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4038 sizeof(struct hci_rp_read_stored_link_key)),
4039 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4040 sizeof(struct hci_rp_delete_stored_link_key)),
4041 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4042 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4043 sizeof(struct hci_rp_read_local_name)),
4044 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4045 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4046 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4047 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4048 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4049 sizeof(struct hci_rp_read_class_of_dev)),
4050 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4051 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4052 sizeof(struct hci_rp_read_voice_setting)),
4053 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4054 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4055 sizeof(struct hci_rp_read_num_supported_iac)),
4056 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4057 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4058 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4059 sizeof(struct hci_rp_read_auth_payload_to)),
4060 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4061 sizeof(struct hci_rp_write_auth_payload_to)),
4062 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4063 sizeof(struct hci_rp_read_local_version)),
4064 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4065 sizeof(struct hci_rp_read_local_commands)),
4066 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4067 sizeof(struct hci_rp_read_local_features)),
4068 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4069 sizeof(struct hci_rp_read_local_ext_features)),
4070 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4071 sizeof(struct hci_rp_read_buffer_size)),
4072 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4073 sizeof(struct hci_rp_read_bd_addr)),
4074 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4075 sizeof(struct hci_rp_read_local_pairing_opts)),
4076 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4077 sizeof(struct hci_rp_read_page_scan_activity)),
4078 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4079 hci_cc_write_page_scan_activity),
4080 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4081 sizeof(struct hci_rp_read_page_scan_type)),
4082 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4083 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4084 sizeof(struct hci_rp_read_data_block_size)),
4085 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4086 sizeof(struct hci_rp_read_flow_control_mode)),
4087 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4088 sizeof(struct hci_rp_read_local_amp_info)),
4089 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4090 sizeof(struct hci_rp_read_clock)),
4091 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4092 sizeof(struct hci_rp_read_enc_key_size)),
4093 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4094 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4095 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4096 hci_cc_read_def_err_data_reporting,
4097 sizeof(struct hci_rp_read_def_err_data_reporting)),
4098 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4099 hci_cc_write_def_err_data_reporting),
4100 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4101 sizeof(struct hci_rp_pin_code_reply)),
4102 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4103 sizeof(struct hci_rp_pin_code_neg_reply)),
4104 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4105 sizeof(struct hci_rp_read_local_oob_data)),
4106 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4107 sizeof(struct hci_rp_read_local_oob_ext_data)),
4108 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4109 sizeof(struct hci_rp_le_read_buffer_size)),
4110 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4111 sizeof(struct hci_rp_le_read_local_features)),
4112 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4113 sizeof(struct hci_rp_le_read_adv_tx_power)),
4114 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4115 sizeof(struct hci_rp_user_confirm_reply)),
4116 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4117 sizeof(struct hci_rp_user_confirm_reply)),
4118 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4119 sizeof(struct hci_rp_user_confirm_reply)),
4120 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4121 sizeof(struct hci_rp_user_confirm_reply)),
4122 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4123 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4124 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4125 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4126 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4127 hci_cc_le_read_accept_list_size,
4128 sizeof(struct hci_rp_le_read_accept_list_size)),
4129 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4130 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4131 hci_cc_le_add_to_accept_list),
4132 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4133 hci_cc_le_del_from_accept_list),
4134 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4135 sizeof(struct hci_rp_le_read_supported_states)),
4136 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4137 sizeof(struct hci_rp_le_read_def_data_len)),
4138 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4139 hci_cc_le_write_def_data_len),
4140 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4141 hci_cc_le_add_to_resolv_list),
4142 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4143 hci_cc_le_del_from_resolv_list),
4144 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4145 hci_cc_le_clear_resolv_list),
4146 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4147 sizeof(struct hci_rp_le_read_resolv_list_size)),
4148 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4149 hci_cc_le_set_addr_resolution_enable),
4150 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4151 sizeof(struct hci_rp_le_read_max_data_len)),
4152 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4153 hci_cc_write_le_host_supported),
4154 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4155 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4156 sizeof(struct hci_rp_read_rssi)),
4157 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4158 sizeof(struct hci_rp_read_tx_power)),
4159 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4160 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4161 hci_cc_le_set_ext_scan_param),
4162 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4163 hci_cc_le_set_ext_scan_enable),
4164 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4165 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4166 hci_cc_le_read_num_adv_sets,
4167 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4168 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4169 sizeof(struct hci_rp_le_set_ext_adv_params)),
4170 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4171 hci_cc_le_set_ext_adv_enable),
4172 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4173 hci_cc_le_set_adv_set_random_addr),
4174 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4175 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4176 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4177 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4178 hci_cc_le_set_per_adv_enable),
4179 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4180 sizeof(struct hci_rp_le_read_transmit_power)),
4181 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4182 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4183 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4184 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4185 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4186 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4187 sizeof(struct hci_rp_le_setup_iso_path)),
4190 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4191 struct sk_buff *skb)
4195 if (skb->len < cc->min_len) {
4196 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4197 cc->op, skb->len, cc->min_len);
4198 return HCI_ERROR_UNSPECIFIED;
4201 /* Just warn if the length is over max_len size it still be possible to
4202 * partially parse the cc so leave to callback to decide if that is
4205 if (skb->len > cc->max_len)
4206 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4207 cc->op, skb->len, cc->max_len);
4209 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4211 return HCI_ERROR_UNSPECIFIED;
4213 return cc->func(hdev, data, skb);
4216 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4217 struct sk_buff *skb, u16 *opcode, u8 *status,
4218 hci_req_complete_t *req_complete,
4219 hci_req_complete_skb_t *req_complete_skb)
4221 struct hci_ev_cmd_complete *ev = data;
4224 *opcode = __le16_to_cpu(ev->opcode);
4226 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4228 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4229 if (hci_cc_table[i].op == *opcode) {
4230 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4235 if (i == ARRAY_SIZE(hci_cc_table)) {
4236 /* Unknown opcode, assume byte 0 contains the status, so
4237 * that e.g. __hci_cmd_sync() properly returns errors
4238 * for vendor specific commands send by HCI drivers.
4239 * If a vendor doesn't actually follow this convention we may
4240 * need to introduce a vendor CC table in order to properly set
4243 *status = skb->data[0];
4246 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4248 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4251 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4253 "unexpected event for opcode 0x%4.4x", *opcode);
4257 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4258 queue_work(hdev->workqueue, &hdev->cmd_work);
4261 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4263 struct hci_cp_le_create_cis *cp;
4266 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4271 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4277 /* Remove connection if command failed */
4278 for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4279 struct hci_conn *conn;
4282 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4284 conn = hci_conn_hash_lookup_handle(hdev, handle);
4286 conn->state = BT_CLOSED;
4287 hci_connect_cfm(conn, status);
4292 hci_dev_unlock(hdev);
4295 #define HCI_CS(_op, _func) \
4301 static const struct hci_cs {
4303 void (*func)(struct hci_dev *hdev, __u8 status);
4304 } hci_cs_table[] = {
4305 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4306 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4307 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4308 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4309 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4310 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4311 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4312 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4313 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4314 hci_cs_read_remote_ext_features),
4315 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4316 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4317 hci_cs_enhanced_setup_sync_conn),
4318 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4319 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4320 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4321 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4322 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4323 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4324 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4325 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4326 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4329 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4330 struct sk_buff *skb, u16 *opcode, u8 *status,
4331 hci_req_complete_t *req_complete,
4332 hci_req_complete_skb_t *req_complete_skb)
4334 struct hci_ev_cmd_status *ev = data;
4337 *opcode = __le16_to_cpu(ev->opcode);
4338 *status = ev->status;
4340 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4342 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4343 if (hci_cs_table[i].op == *opcode) {
4344 hci_cs_table[i].func(hdev, ev->status);
4349 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4351 /* Indicate request completion if the command failed. Also, if
4352 * we're not waiting for a special event and we get a success
4353 * command status we should try to flag the request as completed
4354 * (since for this kind of commands there will not be a command
4357 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4358 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4360 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4361 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4367 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4368 queue_work(hdev->workqueue, &hdev->cmd_work);
4371 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4372 struct sk_buff *skb)
4374 struct hci_ev_hardware_error *ev = data;
4376 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4378 hdev->hw_error_code = ev->code;
4380 queue_work(hdev->req_workqueue, &hdev->error_reset);
4383 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4384 struct sk_buff *skb)
4386 struct hci_ev_role_change *ev = data;
4387 struct hci_conn *conn;
4389 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4393 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4396 conn->role = ev->role;
4398 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4400 hci_role_switch_cfm(conn, ev->status, ev->role);
4403 hci_dev_unlock(hdev);
4406 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4407 struct sk_buff *skb)
4409 struct hci_ev_num_comp_pkts *ev = data;
4412 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4413 flex_array_size(ev, handles, ev->num)))
4416 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4417 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4421 bt_dev_dbg(hdev, "num %d", ev->num);
4423 for (i = 0; i < ev->num; i++) {
4424 struct hci_comp_pkts_info *info = &ev->handles[i];
4425 struct hci_conn *conn;
4426 __u16 handle, count;
4428 handle = __le16_to_cpu(info->handle);
4429 count = __le16_to_cpu(info->count);
4431 conn = hci_conn_hash_lookup_handle(hdev, handle);
4435 conn->sent -= count;
4437 switch (conn->type) {
4439 hdev->acl_cnt += count;
4440 if (hdev->acl_cnt > hdev->acl_pkts)
4441 hdev->acl_cnt = hdev->acl_pkts;
4445 if (hdev->le_pkts) {
4446 hdev->le_cnt += count;
4447 if (hdev->le_cnt > hdev->le_pkts)
4448 hdev->le_cnt = hdev->le_pkts;
4450 hdev->acl_cnt += count;
4451 if (hdev->acl_cnt > hdev->acl_pkts)
4452 hdev->acl_cnt = hdev->acl_pkts;
4457 hdev->sco_cnt += count;
4458 if (hdev->sco_cnt > hdev->sco_pkts)
4459 hdev->sco_cnt = hdev->sco_pkts;
4463 if (hdev->iso_pkts) {
4464 hdev->iso_cnt += count;
4465 if (hdev->iso_cnt > hdev->iso_pkts)
4466 hdev->iso_cnt = hdev->iso_pkts;
4467 } else if (hdev->le_pkts) {
4468 hdev->le_cnt += count;
4469 if (hdev->le_cnt > hdev->le_pkts)
4470 hdev->le_cnt = hdev->le_pkts;
4472 hdev->acl_cnt += count;
4473 if (hdev->acl_cnt > hdev->acl_pkts)
4474 hdev->acl_cnt = hdev->acl_pkts;
4479 bt_dev_err(hdev, "unknown type %d conn %p",
4485 queue_work(hdev->workqueue, &hdev->tx_work);
4488 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4491 struct hci_chan *chan;
4493 switch (hdev->dev_type) {
4495 return hci_conn_hash_lookup_handle(hdev, handle);
4497 chan = hci_chan_lookup_handle(hdev, handle);
4502 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4509 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4510 struct sk_buff *skb)
4512 struct hci_ev_num_comp_blocks *ev = data;
4515 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4516 flex_array_size(ev, handles, ev->num_hndl)))
4519 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4520 bt_dev_err(hdev, "wrong event for mode %d",
4521 hdev->flow_ctl_mode);
4525 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4528 for (i = 0; i < ev->num_hndl; i++) {
4529 struct hci_comp_blocks_info *info = &ev->handles[i];
4530 struct hci_conn *conn = NULL;
4531 __u16 handle, block_count;
4533 handle = __le16_to_cpu(info->handle);
4534 block_count = __le16_to_cpu(info->blocks);
4536 conn = __hci_conn_lookup_handle(hdev, handle);
4540 conn->sent -= block_count;
4542 switch (conn->type) {
4545 hdev->block_cnt += block_count;
4546 if (hdev->block_cnt > hdev->num_blocks)
4547 hdev->block_cnt = hdev->num_blocks;
4551 bt_dev_err(hdev, "unknown type %d conn %p",
4557 queue_work(hdev->workqueue, &hdev->tx_work);
4560 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4561 struct sk_buff *skb)
4563 struct hci_ev_mode_change *ev = data;
4564 struct hci_conn *conn;
4566 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4570 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4572 conn->mode = ev->mode;
4574 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4576 if (conn->mode == HCI_CM_ACTIVE)
4577 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4579 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4582 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4583 hci_sco_setup(conn, ev->status);
4586 hci_dev_unlock(hdev);
4589 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4590 struct sk_buff *skb)
4592 struct hci_ev_pin_code_req *ev = data;
4593 struct hci_conn *conn;
4595 bt_dev_dbg(hdev, "");
4599 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4603 if (conn->state == BT_CONNECTED) {
4604 hci_conn_hold(conn);
4605 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4606 hci_conn_drop(conn);
4609 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4610 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4611 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4612 sizeof(ev->bdaddr), &ev->bdaddr);
4613 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4616 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4621 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4625 hci_dev_unlock(hdev);
4628 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4630 if (key_type == HCI_LK_CHANGED_COMBINATION)
4633 conn->pin_length = pin_len;
4634 conn->key_type = key_type;
4637 case HCI_LK_LOCAL_UNIT:
4638 case HCI_LK_REMOTE_UNIT:
4639 case HCI_LK_DEBUG_COMBINATION:
4641 case HCI_LK_COMBINATION:
4643 conn->pending_sec_level = BT_SECURITY_HIGH;
4645 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4647 case HCI_LK_UNAUTH_COMBINATION_P192:
4648 case HCI_LK_UNAUTH_COMBINATION_P256:
4649 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4651 case HCI_LK_AUTH_COMBINATION_P192:
4652 conn->pending_sec_level = BT_SECURITY_HIGH;
4654 case HCI_LK_AUTH_COMBINATION_P256:
4655 conn->pending_sec_level = BT_SECURITY_FIPS;
4660 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4661 struct sk_buff *skb)
4663 struct hci_ev_link_key_req *ev = data;
4664 struct hci_cp_link_key_reply cp;
4665 struct hci_conn *conn;
4666 struct link_key *key;
4668 bt_dev_dbg(hdev, "");
4670 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4675 key = hci_find_link_key(hdev, &ev->bdaddr);
4677 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4681 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4683 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4685 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4687 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4688 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4689 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4690 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4694 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4695 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4696 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4697 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4701 conn_set_key(conn, key->type, key->pin_len);
4704 bacpy(&cp.bdaddr, &ev->bdaddr);
4705 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4707 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4709 hci_dev_unlock(hdev);
4714 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4715 hci_dev_unlock(hdev);
4718 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4719 struct sk_buff *skb)
4721 struct hci_ev_link_key_notify *ev = data;
4722 struct hci_conn *conn;
4723 struct link_key *key;
4727 bt_dev_dbg(hdev, "");
4731 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4735 /* Ignore NULL link key against CVE-2020-26555 */
4736 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4737 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4739 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4740 hci_conn_drop(conn);
4744 hci_conn_hold(conn);
4745 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4746 hci_conn_drop(conn);
4748 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4749 conn_set_key(conn, ev->key_type, conn->pin_length);
4751 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4754 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4755 ev->key_type, pin_len, &persistent);
4759 /* Update connection information since adding the key will have
4760 * fixed up the type in the case of changed combination keys.
4762 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4763 conn_set_key(conn, key->type, key->pin_len);
4765 mgmt_new_link_key(hdev, key, persistent);
4767 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4768 * is set. If it's not set simply remove the key from the kernel
4769 * list (we've still notified user space about it but with
4770 * store_hint being 0).
4772 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4773 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4774 list_del_rcu(&key->list);
4775 kfree_rcu(key, rcu);
4780 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4782 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4785 hci_dev_unlock(hdev);
4788 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4789 struct sk_buff *skb)
4791 struct hci_ev_clock_offset *ev = data;
4792 struct hci_conn *conn;
4794 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4798 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4799 if (conn && !ev->status) {
4800 struct inquiry_entry *ie;
4802 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4804 ie->data.clock_offset = ev->clock_offset;
4805 ie->timestamp = jiffies;
4809 hci_dev_unlock(hdev);
4812 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4813 struct sk_buff *skb)
4815 struct hci_ev_pkt_type_change *ev = data;
4816 struct hci_conn *conn;
4818 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4822 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4823 if (conn && !ev->status)
4824 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4826 hci_dev_unlock(hdev);
4829 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4830 struct sk_buff *skb)
4832 struct hci_ev_pscan_rep_mode *ev = data;
4833 struct inquiry_entry *ie;
4835 bt_dev_dbg(hdev, "");
4839 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4841 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4842 ie->timestamp = jiffies;
4845 hci_dev_unlock(hdev);
4848 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4849 struct sk_buff *skb)
4851 struct hci_ev_inquiry_result_rssi *ev = edata;
4852 struct inquiry_data data;
4855 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4860 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4865 if (skb->len == array_size(ev->num,
4866 sizeof(struct inquiry_info_rssi_pscan))) {
4867 struct inquiry_info_rssi_pscan *info;
4869 for (i = 0; i < ev->num; i++) {
4872 info = hci_ev_skb_pull(hdev, skb,
4873 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4876 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4877 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4881 bacpy(&data.bdaddr, &info->bdaddr);
4882 data.pscan_rep_mode = info->pscan_rep_mode;
4883 data.pscan_period_mode = info->pscan_period_mode;
4884 data.pscan_mode = info->pscan_mode;
4885 memcpy(data.dev_class, info->dev_class, 3);
4886 data.clock_offset = info->clock_offset;
4887 data.rssi = info->rssi;
4888 data.ssp_mode = 0x00;
4890 flags = hci_inquiry_cache_update(hdev, &data, false);
4892 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4893 info->dev_class, info->rssi,
4894 flags, NULL, 0, NULL, 0, 0);
4896 } else if (skb->len == array_size(ev->num,
4897 sizeof(struct inquiry_info_rssi))) {
4898 struct inquiry_info_rssi *info;
4900 for (i = 0; i < ev->num; i++) {
4903 info = hci_ev_skb_pull(hdev, skb,
4904 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4907 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4908 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4912 bacpy(&data.bdaddr, &info->bdaddr);
4913 data.pscan_rep_mode = info->pscan_rep_mode;
4914 data.pscan_period_mode = info->pscan_period_mode;
4915 data.pscan_mode = 0x00;
4916 memcpy(data.dev_class, info->dev_class, 3);
4917 data.clock_offset = info->clock_offset;
4918 data.rssi = info->rssi;
4919 data.ssp_mode = 0x00;
4921 flags = hci_inquiry_cache_update(hdev, &data, false);
4923 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4924 info->dev_class, info->rssi,
4925 flags, NULL, 0, NULL, 0, 0);
4928 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4929 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4932 hci_dev_unlock(hdev);
4935 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4936 struct sk_buff *skb)
4938 struct hci_ev_remote_ext_features *ev = data;
4939 struct hci_conn *conn;
4941 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4945 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4949 if (ev->page < HCI_MAX_PAGES)
4950 memcpy(conn->features[ev->page], ev->features, 8);
4952 if (!ev->status && ev->page == 0x01) {
4953 struct inquiry_entry *ie;
4955 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4957 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4959 if (ev->features[0] & LMP_HOST_SSP) {
4960 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4962 /* It is mandatory by the Bluetooth specification that
4963 * Extended Inquiry Results are only used when Secure
4964 * Simple Pairing is enabled, but some devices violate
4967 * To make these devices work, the internal SSP
4968 * enabled flag needs to be cleared if the remote host
4969 * features do not indicate SSP support */
4970 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4973 if (ev->features[0] & LMP_HOST_SC)
4974 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4977 if (conn->state != BT_CONFIG)
4980 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4981 struct hci_cp_remote_name_req cp;
4982 memset(&cp, 0, sizeof(cp));
4983 bacpy(&cp.bdaddr, &conn->dst);
4984 cp.pscan_rep_mode = 0x02;
4985 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4986 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4987 mgmt_device_connected(hdev, conn, NULL, 0);
4989 if (!hci_outgoing_auth_needed(hdev, conn)) {
4990 conn->state = BT_CONNECTED;
4991 hci_connect_cfm(conn, ev->status);
4992 hci_conn_drop(conn);
4996 hci_dev_unlock(hdev);
4999 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5000 struct sk_buff *skb)
5002 struct hci_ev_sync_conn_complete *ev = data;
5003 struct hci_conn *conn;
5004 u8 status = ev->status;
5006 switch (ev->link_type) {
5011 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5012 * for HCI_Synchronous_Connection_Complete is limited to
5013 * either SCO or eSCO
5015 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5019 bt_dev_dbg(hdev, "status 0x%2.2x", status);
5023 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5025 if (ev->link_type == ESCO_LINK)
5028 /* When the link type in the event indicates SCO connection
5029 * and lookup of the connection object fails, then check
5030 * if an eSCO connection object exists.
5032 * The core limits the synchronous connections to either
5033 * SCO or eSCO. The eSCO connection is preferred and tried
5034 * to be setup first and until successfully established,
5035 * the link type will be hinted as eSCO.
5037 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5042 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5043 * Processing it more than once per connection can corrupt kernel memory.
5045 * As the connection handle is set here for the first time, it indicates
5046 * whether the connection is already set up.
5048 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5049 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5055 conn->handle = __le16_to_cpu(ev->handle);
5056 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5057 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5058 conn->handle, HCI_CONN_HANDLE_MAX);
5059 status = HCI_ERROR_INVALID_PARAMETERS;
5060 conn->state = BT_CLOSED;
5064 conn->state = BT_CONNECTED;
5065 conn->type = ev->link_type;
5067 hci_debugfs_create_conn(conn);
5068 hci_conn_add_sysfs(conn);
5071 case 0x10: /* Connection Accept Timeout */
5072 case 0x0d: /* Connection Rejected due to Limited Resources */
5073 case 0x11: /* Unsupported Feature or Parameter Value */
5074 case 0x1c: /* SCO interval rejected */
5075 case 0x1a: /* Unsupported Remote Feature */
5076 case 0x1e: /* Invalid LMP Parameters */
5077 case 0x1f: /* Unspecified error */
5078 case 0x20: /* Unsupported LMP Parameter value */
5080 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5081 (hdev->esco_type & EDR_ESCO_MASK);
5082 if (hci_setup_sync(conn, conn->link->handle))
5088 conn->state = BT_CLOSED;
5092 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5093 /* Notify only in case of SCO over HCI transport data path which
5094 * is zero and non-zero value shall be non-HCI transport data path
5096 if (conn->codec.data_path == 0 && hdev->notify) {
5097 switch (ev->air_mode) {
5099 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5102 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5107 hci_connect_cfm(conn, status);
5112 hci_dev_unlock(hdev);
5115 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5119 while (parsed < eir_len) {
5120 u8 field_len = eir[0];
5125 parsed += field_len + 1;
5126 eir += field_len + 1;
5132 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5133 struct sk_buff *skb)
5135 struct hci_ev_ext_inquiry_result *ev = edata;
5136 struct inquiry_data data;
5140 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5141 flex_array_size(ev, info, ev->num)))
5144 bt_dev_dbg(hdev, "num %d", ev->num);
5149 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5154 for (i = 0; i < ev->num; i++) {
5155 struct extended_inquiry_info *info = &ev->info[i];
5159 bacpy(&data.bdaddr, &info->bdaddr);
5160 data.pscan_rep_mode = info->pscan_rep_mode;
5161 data.pscan_period_mode = info->pscan_period_mode;
5162 data.pscan_mode = 0x00;
5163 memcpy(data.dev_class, info->dev_class, 3);
5164 data.clock_offset = info->clock_offset;
5165 data.rssi = info->rssi;
5166 data.ssp_mode = 0x01;
5168 if (hci_dev_test_flag(hdev, HCI_MGMT))
5169 name_known = eir_get_data(info->data,
5171 EIR_NAME_COMPLETE, NULL);
5175 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5177 eir_len = eir_get_length(info->data, sizeof(info->data));
5179 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5180 info->dev_class, info->rssi,
5181 flags, info->data, eir_len, NULL, 0, 0);
5184 hci_dev_unlock(hdev);
5187 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5188 struct sk_buff *skb)
5190 struct hci_ev_key_refresh_complete *ev = data;
5191 struct hci_conn *conn;
5193 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5194 __le16_to_cpu(ev->handle));
5198 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5202 /* For BR/EDR the necessary steps are taken through the
5203 * auth_complete event.
5205 if (conn->type != LE_LINK)
5209 conn->sec_level = conn->pending_sec_level;
5211 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5213 if (ev->status && conn->state == BT_CONNECTED) {
5214 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5215 hci_conn_drop(conn);
5219 if (conn->state == BT_CONFIG) {
5221 conn->state = BT_CONNECTED;
5223 hci_connect_cfm(conn, ev->status);
5224 hci_conn_drop(conn);
5226 hci_auth_cfm(conn, ev->status);
5228 hci_conn_hold(conn);
5229 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5230 hci_conn_drop(conn);
5234 hci_dev_unlock(hdev);
5237 static u8 hci_get_auth_req(struct hci_conn *conn)
5239 /* If remote requests no-bonding follow that lead */
5240 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5241 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5242 return conn->remote_auth | (conn->auth_type & 0x01);
5244 /* If both remote and local have enough IO capabilities, require
5247 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5248 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5249 return conn->remote_auth | 0x01;
5251 /* No MITM protection possible so ignore remote requirement */
5252 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5255 static u8 bredr_oob_data_present(struct hci_conn *conn)
5257 struct hci_dev *hdev = conn->hdev;
5258 struct oob_data *data;
5260 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5264 if (bredr_sc_enabled(hdev)) {
5265 /* When Secure Connections is enabled, then just
5266 * return the present value stored with the OOB
5267 * data. The stored value contains the right present
5268 * information. However it can only be trusted when
5269 * not in Secure Connection Only mode.
5271 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5272 return data->present;
5274 /* When Secure Connections Only mode is enabled, then
5275 * the P-256 values are required. If they are not
5276 * available, then do not declare that OOB data is
5279 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5280 !crypto_memneq(data->hash256, ZERO_KEY, 16))
5286 /* When Secure Connections is not enabled or actually
5287 * not supported by the hardware, then check that if
5288 * P-192 data values are present.
5290 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5291 !crypto_memneq(data->hash192, ZERO_KEY, 16))
5297 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5298 struct sk_buff *skb)
5300 struct hci_ev_io_capa_request *ev = data;
5301 struct hci_conn *conn;
5303 bt_dev_dbg(hdev, "");
5307 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5308 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5311 /* Assume remote supports SSP since it has triggered this event */
5312 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5314 hci_conn_hold(conn);
5316 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5319 /* Allow pairing if we're pairable, the initiators of the
5320 * pairing or if the remote is not requesting bonding.
5322 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5323 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5324 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5325 struct hci_cp_io_capability_reply cp;
5327 bacpy(&cp.bdaddr, &ev->bdaddr);
5328 /* Change the IO capability from KeyboardDisplay
5329 * to DisplayYesNo as it is not supported by BT spec. */
5330 cp.capability = (conn->io_capability == 0x04) ?
5331 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5333 /* If we are initiators, there is no remote information yet */
5334 if (conn->remote_auth == 0xff) {
5335 /* Request MITM protection if our IO caps allow it
5336 * except for the no-bonding case.
5338 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5339 conn->auth_type != HCI_AT_NO_BONDING)
5340 conn->auth_type |= 0x01;
5342 conn->auth_type = hci_get_auth_req(conn);
5345 /* If we're not bondable, force one of the non-bondable
5346 * authentication requirement values.
5348 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5349 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5351 cp.authentication = conn->auth_type;
5352 cp.oob_data = bredr_oob_data_present(conn);
5354 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5357 struct hci_cp_io_capability_neg_reply cp;
5359 bacpy(&cp.bdaddr, &ev->bdaddr);
5360 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5362 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5367 hci_dev_unlock(hdev);
5370 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5371 struct sk_buff *skb)
5373 struct hci_ev_io_capa_reply *ev = data;
5374 struct hci_conn *conn;
5376 bt_dev_dbg(hdev, "");
5380 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5384 conn->remote_cap = ev->capability;
5385 conn->remote_auth = ev->authentication;
5388 hci_dev_unlock(hdev);
5391 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5392 struct sk_buff *skb)
5394 struct hci_ev_user_confirm_req *ev = data;
5395 int loc_mitm, rem_mitm, confirm_hint = 0;
5396 struct hci_conn *conn;
5398 bt_dev_dbg(hdev, "");
5402 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5405 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5409 loc_mitm = (conn->auth_type & 0x01);
5410 rem_mitm = (conn->remote_auth & 0x01);
5412 /* If we require MITM but the remote device can't provide that
5413 * (it has NoInputNoOutput) then reject the confirmation
5414 * request. We check the security level here since it doesn't
5415 * necessarily match conn->auth_type.
5417 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5418 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5419 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5420 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5421 sizeof(ev->bdaddr), &ev->bdaddr);
5425 /* If no side requires MITM protection; auto-accept */
5426 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5427 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5429 /* If we're not the initiators request authorization to
5430 * proceed from user space (mgmt_user_confirm with
5431 * confirm_hint set to 1). The exception is if neither
5432 * side had MITM or if the local IO capability is
5433 * NoInputNoOutput, in which case we do auto-accept
5435 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5436 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5437 (loc_mitm || rem_mitm)) {
5438 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5443 /* If there already exists link key in local host, leave the
5444 * decision to user space since the remote device could be
5445 * legitimate or malicious.
5447 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5448 bt_dev_dbg(hdev, "Local host already has link key");
5453 BT_DBG("Auto-accept of user confirmation with %ums delay",
5454 hdev->auto_accept_delay);
5456 if (hdev->auto_accept_delay > 0) {
5457 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5458 queue_delayed_work(conn->hdev->workqueue,
5459 &conn->auto_accept_work, delay);
5463 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5464 sizeof(ev->bdaddr), &ev->bdaddr);
5469 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5470 le32_to_cpu(ev->passkey), confirm_hint);
5473 hci_dev_unlock(hdev);
5476 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5477 struct sk_buff *skb)
5479 struct hci_ev_user_passkey_req *ev = data;
5481 bt_dev_dbg(hdev, "");
5483 if (hci_dev_test_flag(hdev, HCI_MGMT))
5484 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5487 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5488 struct sk_buff *skb)
5490 struct hci_ev_user_passkey_notify *ev = data;
5491 struct hci_conn *conn;
5493 bt_dev_dbg(hdev, "");
5495 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5499 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5500 conn->passkey_entered = 0;
5502 if (hci_dev_test_flag(hdev, HCI_MGMT))
5503 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5504 conn->dst_type, conn->passkey_notify,
5505 conn->passkey_entered);
5508 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5509 struct sk_buff *skb)
5511 struct hci_ev_keypress_notify *ev = data;
5512 struct hci_conn *conn;
5514 bt_dev_dbg(hdev, "");
5516 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5521 case HCI_KEYPRESS_STARTED:
5522 conn->passkey_entered = 0;
5525 case HCI_KEYPRESS_ENTERED:
5526 conn->passkey_entered++;
5529 case HCI_KEYPRESS_ERASED:
5530 conn->passkey_entered--;
5533 case HCI_KEYPRESS_CLEARED:
5534 conn->passkey_entered = 0;
5537 case HCI_KEYPRESS_COMPLETED:
5541 if (hci_dev_test_flag(hdev, HCI_MGMT))
5542 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5543 conn->dst_type, conn->passkey_notify,
5544 conn->passkey_entered);
5547 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5548 struct sk_buff *skb)
5550 struct hci_ev_simple_pair_complete *ev = data;
5551 struct hci_conn *conn;
5553 bt_dev_dbg(hdev, "");
5557 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5558 if (!conn || !hci_conn_ssp_enabled(conn))
5561 /* Reset the authentication requirement to unknown */
5562 conn->remote_auth = 0xff;
5564 /* To avoid duplicate auth_failed events to user space we check
5565 * the HCI_CONN_AUTH_PEND flag which will be set if we
5566 * initiated the authentication. A traditional auth_complete
5567 * event gets always produced as initiator and is also mapped to
5568 * the mgmt_auth_failed event */
5569 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5570 mgmt_auth_failed(conn, ev->status);
5572 hci_conn_drop(conn);
5575 hci_dev_unlock(hdev);
5578 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5579 struct sk_buff *skb)
5581 struct hci_ev_remote_host_features *ev = data;
5582 struct inquiry_entry *ie;
5583 struct hci_conn *conn;
5585 bt_dev_dbg(hdev, "");
5589 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5591 memcpy(conn->features[1], ev->features, 8);
5593 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5595 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5597 hci_dev_unlock(hdev);
5600 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5601 struct sk_buff *skb)
5603 struct hci_ev_remote_oob_data_request *ev = edata;
5604 struct oob_data *data;
5606 bt_dev_dbg(hdev, "");
5610 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5613 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5615 struct hci_cp_remote_oob_data_neg_reply cp;
5617 bacpy(&cp.bdaddr, &ev->bdaddr);
5618 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5623 if (bredr_sc_enabled(hdev)) {
5624 struct hci_cp_remote_oob_ext_data_reply cp;
5626 bacpy(&cp.bdaddr, &ev->bdaddr);
5627 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5628 memset(cp.hash192, 0, sizeof(cp.hash192));
5629 memset(cp.rand192, 0, sizeof(cp.rand192));
5631 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5632 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5634 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5635 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5637 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5640 struct hci_cp_remote_oob_data_reply cp;
5642 bacpy(&cp.bdaddr, &ev->bdaddr);
5643 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5644 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5646 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5651 hci_dev_unlock(hdev);
5654 #if IS_ENABLED(CONFIG_BT_HS)
5655 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5656 struct sk_buff *skb)
5658 struct hci_ev_channel_selected *ev = data;
5659 struct hci_conn *hcon;
5661 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5663 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5667 amp_read_loc_assoc_final_data(hdev, hcon);
5670 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5671 struct sk_buff *skb)
5673 struct hci_ev_phy_link_complete *ev = data;
5674 struct hci_conn *hcon, *bredr_hcon;
5676 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5681 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5693 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5695 hcon->state = BT_CONNECTED;
5696 bacpy(&hcon->dst, &bredr_hcon->dst);
5698 hci_conn_hold(hcon);
5699 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5700 hci_conn_drop(hcon);
5702 hci_debugfs_create_conn(hcon);
5703 hci_conn_add_sysfs(hcon);
5705 amp_physical_cfm(bredr_hcon, hcon);
5708 hci_dev_unlock(hdev);
5711 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5712 struct sk_buff *skb)
5714 struct hci_ev_logical_link_complete *ev = data;
5715 struct hci_conn *hcon;
5716 struct hci_chan *hchan;
5717 struct amp_mgr *mgr;
5719 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5720 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5722 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5726 /* Create AMP hchan */
5727 hchan = hci_chan_create(hcon);
5731 hchan->handle = le16_to_cpu(ev->handle);
5734 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5736 mgr = hcon->amp_mgr;
5737 if (mgr && mgr->bredr_chan) {
5738 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5740 l2cap_chan_lock(bredr_chan);
5742 bredr_chan->conn->mtu = hdev->block_mtu;
5743 l2cap_logical_cfm(bredr_chan, hchan, 0);
5744 hci_conn_hold(hcon);
5746 l2cap_chan_unlock(bredr_chan);
5750 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5751 struct sk_buff *skb)
5753 struct hci_ev_disconn_logical_link_complete *ev = data;
5754 struct hci_chan *hchan;
5756 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5757 le16_to_cpu(ev->handle), ev->status);
5764 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5765 if (!hchan || !hchan->amp)
5768 amp_destroy_logical_link(hchan, ev->reason);
5771 hci_dev_unlock(hdev);
5774 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5775 struct sk_buff *skb)
5777 struct hci_ev_disconn_phy_link_complete *ev = data;
5778 struct hci_conn *hcon;
5780 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5787 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5788 if (hcon && hcon->type == AMP_LINK) {
5789 hcon->state = BT_CLOSED;
5790 hci_disconn_cfm(hcon, ev->reason);
5794 hci_dev_unlock(hdev);
5798 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5799 u8 bdaddr_type, bdaddr_t *local_rpa)
5802 conn->dst_type = bdaddr_type;
5803 conn->resp_addr_type = bdaddr_type;
5804 bacpy(&conn->resp_addr, bdaddr);
5806 /* Check if the controller has set a Local RPA then it must be
5807 * used instead or hdev->rpa.
5809 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5810 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5811 bacpy(&conn->init_addr, local_rpa);
5812 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5813 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5814 bacpy(&conn->init_addr, &conn->hdev->rpa);
5816 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5817 &conn->init_addr_type);
5820 conn->resp_addr_type = conn->hdev->adv_addr_type;
5821 /* Check if the controller has set a Local RPA then it must be
5822 * used instead or hdev->rpa.
5824 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5825 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5826 bacpy(&conn->resp_addr, local_rpa);
5827 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5828 /* In case of ext adv, resp_addr will be updated in
5829 * Adv Terminated event.
5831 if (!ext_adv_capable(conn->hdev))
5832 bacpy(&conn->resp_addr,
5833 &conn->hdev->random_addr);
5835 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5838 conn->init_addr_type = bdaddr_type;
5839 bacpy(&conn->init_addr, bdaddr);
5841 /* For incoming connections, set the default minimum
5842 * and maximum connection interval. They will be used
5843 * to check if the parameters are in range and if not
5844 * trigger the connection update procedure.
5846 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5847 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5851 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5852 bdaddr_t *bdaddr, u8 bdaddr_type,
5853 bdaddr_t *local_rpa, u8 role, u16 handle,
5854 u16 interval, u16 latency,
5855 u16 supervision_timeout)
5857 struct hci_conn_params *params;
5858 struct hci_conn *conn;
5859 struct smp_irk *irk;
5864 /* All controllers implicitly stop advertising in the event of a
5865 * connection, so ensure that the state bit is cleared.
5867 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5869 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5871 /* In case of error status and there is no connection pending
5872 * just unlock as there is nothing to cleanup.
5877 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5879 bt_dev_err(hdev, "no memory for new connection");
5883 conn->dst_type = bdaddr_type;
5885 /* If we didn't have a hci_conn object previously
5886 * but we're in central role this must be something
5887 * initiated using an accept list. Since accept list based
5888 * connections are not "first class citizens" we don't
5889 * have full tracking of them. Therefore, we go ahead
5890 * with a "best effort" approach of determining the
5891 * initiator address based on the HCI_PRIVACY flag.
5894 conn->resp_addr_type = bdaddr_type;
5895 bacpy(&conn->resp_addr, bdaddr);
5896 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5897 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5898 bacpy(&conn->init_addr, &hdev->rpa);
5900 hci_copy_identity_address(hdev,
5902 &conn->init_addr_type);
5906 cancel_delayed_work(&conn->le_conn_timeout);
5909 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5910 * Processing it more than once per connection can corrupt kernel memory.
5912 * As the connection handle is set here for the first time, it indicates
5913 * whether the connection is already set up.
5915 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5916 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5920 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5922 /* Lookup the identity address from the stored connection
5923 * address and address type.
5925 * When establishing connections to an identity address, the
5926 * connection procedure will store the resolvable random
5927 * address first. Now if it can be converted back into the
5928 * identity address, start using the identity address from
5931 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5933 bacpy(&conn->dst, &irk->bdaddr);
5934 conn->dst_type = irk->addr_type;
5937 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5939 if (handle > HCI_CONN_HANDLE_MAX) {
5940 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5941 HCI_CONN_HANDLE_MAX);
5942 status = HCI_ERROR_INVALID_PARAMETERS;
5945 /* All connection failure handling is taken care of by the
5946 * hci_conn_failed function which is triggered by the HCI
5947 * request completion callbacks used for connecting.
5952 /* Drop the connection if it has been aborted */
5953 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5954 hci_conn_drop(conn);
5958 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5959 addr_type = BDADDR_LE_PUBLIC;
5961 addr_type = BDADDR_LE_RANDOM;
5963 /* Drop the connection if the device is blocked */
5964 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5965 hci_conn_drop(conn);
5969 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5970 mgmt_device_connected(hdev, conn, NULL, 0);
5972 conn->sec_level = BT_SECURITY_LOW;
5973 conn->handle = handle;
5974 conn->state = BT_CONFIG;
5976 /* Store current advertising instance as connection advertising instance
5977 * when sotfware rotation is in use so it can be re-enabled when
5980 if (!ext_adv_capable(hdev))
5981 conn->adv_instance = hdev->cur_adv_instance;
5983 conn->le_conn_interval = interval;
5984 conn->le_conn_latency = latency;
5985 conn->le_supv_timeout = supervision_timeout;
5987 hci_debugfs_create_conn(conn);
5988 hci_conn_add_sysfs(conn);
5990 /* The remote features procedure is defined for central
5991 * role only. So only in case of an initiated connection
5992 * request the remote features.
5994 * If the local controller supports peripheral-initiated features
5995 * exchange, then requesting the remote features in peripheral
5996 * role is possible. Otherwise just transition into the
5997 * connected state without requesting the remote features.
6000 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6001 struct hci_cp_le_read_remote_features cp;
6003 cp.handle = __cpu_to_le16(conn->handle);
6005 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6008 hci_conn_hold(conn);
6010 conn->state = BT_CONNECTED;
6011 hci_connect_cfm(conn, status);
6014 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6017 hci_pend_le_list_del_init(params);
6019 hci_conn_drop(params->conn);
6020 hci_conn_put(params->conn);
6021 params->conn = NULL;
6026 hci_update_passive_scan(hdev);
6027 hci_dev_unlock(hdev);
6030 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6031 struct sk_buff *skb)
6033 struct hci_ev_le_conn_complete *ev = data;
6035 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6037 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6038 NULL, ev->role, le16_to_cpu(ev->handle),
6039 le16_to_cpu(ev->interval),
6040 le16_to_cpu(ev->latency),
6041 le16_to_cpu(ev->supervision_timeout));
6044 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6045 struct sk_buff *skb)
6047 struct hci_ev_le_enh_conn_complete *ev = data;
6049 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6051 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6052 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6053 le16_to_cpu(ev->interval),
6054 le16_to_cpu(ev->latency),
6055 le16_to_cpu(ev->supervision_timeout));
6058 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6059 struct sk_buff *skb)
6061 struct hci_evt_le_ext_adv_set_term *ev = data;
6062 struct hci_conn *conn;
6063 struct adv_info *adv, *n;
6065 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6067 /* The Bluetooth Core 5.3 specification clearly states that this event
6068 * shall not be sent when the Host disables the advertising set. So in
6069 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6071 * When the Host disables an advertising set, all cleanup is done via
6072 * its command callback and not needed to be duplicated here.
6074 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6075 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6081 adv = hci_find_adv_instance(hdev, ev->handle);
6087 /* Remove advertising as it has been terminated */
6088 hci_remove_adv_instance(hdev, ev->handle);
6089 mgmt_advertising_removed(NULL, hdev, ev->handle);
6091 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6096 /* We are no longer advertising, clear HCI_LE_ADV */
6097 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6102 adv->enabled = false;
6104 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6106 /* Store handle in the connection so the correct advertising
6107 * instance can be re-enabled when disconnected.
6109 conn->adv_instance = ev->handle;
6111 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6112 bacmp(&conn->resp_addr, BDADDR_ANY))
6116 bacpy(&conn->resp_addr, &hdev->random_addr);
6121 bacpy(&conn->resp_addr, &adv->random_addr);
6125 hci_dev_unlock(hdev);
6128 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6129 struct sk_buff *skb)
6131 struct hci_ev_le_conn_update_complete *ev = data;
6132 struct hci_conn *conn;
6134 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6141 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6143 conn->le_conn_interval = le16_to_cpu(ev->interval);
6144 conn->le_conn_latency = le16_to_cpu(ev->latency);
6145 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6148 hci_dev_unlock(hdev);
6151 /* This function requires the caller holds hdev->lock */
6152 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6154 u8 addr_type, bool addr_resolved,
6157 struct hci_conn *conn;
6158 struct hci_conn_params *params;
6160 /* If the event is not connectable don't proceed further */
6161 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6164 /* Ignore if the device is blocked or hdev is suspended */
6165 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6169 /* Most controller will fail if we try to create new connections
6170 * while we have an existing one in peripheral role.
6172 if (hdev->conn_hash.le_num_peripheral > 0 &&
6173 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6174 !(hdev->le_states[3] & 0x10)))
6177 /* If we're not connectable only connect devices that we have in
6178 * our pend_le_conns list.
6180 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6185 if (!params->explicit_connect) {
6186 switch (params->auto_connect) {
6187 case HCI_AUTO_CONN_DIRECT:
6188 /* Only devices advertising with ADV_DIRECT_IND are
6189 * triggering a connection attempt. This is allowing
6190 * incoming connections from peripheral devices.
6192 if (adv_type != LE_ADV_DIRECT_IND)
6195 case HCI_AUTO_CONN_ALWAYS:
6196 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6197 * are triggering a connection attempt. This means
6198 * that incoming connections from peripheral device are
6199 * accepted and also outgoing connections to peripheral
6200 * devices are established when found.
6208 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6209 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6211 if (!IS_ERR(conn)) {
6212 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6213 * by higher layer that tried to connect, if no then
6214 * store the pointer since we don't really have any
6215 * other owner of the object besides the params that
6216 * triggered it. This way we can abort the connection if
6217 * the parameters get removed and keep the reference
6218 * count consistent once the connection is established.
6221 if (!params->explicit_connect)
6222 params->conn = hci_conn_get(conn);
6227 switch (PTR_ERR(conn)) {
6229 /* If hci_connect() returns -EBUSY it means there is already
6230 * an LE connection attempt going on. Since controllers don't
6231 * support more than one connection attempt at the time, we
6232 * don't consider this an error case.
6236 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6243 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6244 u8 bdaddr_type, bdaddr_t *direct_addr,
6245 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6246 bool ext_adv, bool ctl_time, u64 instant)
6248 struct discovery_state *d = &hdev->discovery;
6249 struct smp_irk *irk;
6250 struct hci_conn *conn;
6251 bool match, bdaddr_resolved;
6257 case LE_ADV_DIRECT_IND:
6258 case LE_ADV_SCAN_IND:
6259 case LE_ADV_NONCONN_IND:
6260 case LE_ADV_SCAN_RSP:
6263 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6264 "type: 0x%02x", type);
6268 if (len > max_adv_len(hdev)) {
6269 bt_dev_err_ratelimited(hdev,
6270 "adv larger than maximum supported");
6274 /* Find the end of the data in case the report contains padded zero
6275 * bytes at the end causing an invalid length value.
6277 * When data is NULL, len is 0 so there is no need for extra ptr
6278 * check as 'ptr < data + 0' is already false in such case.
6280 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6281 if (ptr + 1 + *ptr > data + len)
6285 /* Adjust for actual length. This handles the case when remote
6286 * device is advertising with incorrect data length.
6290 /* If the direct address is present, then this report is from
6291 * a LE Direct Advertising Report event. In that case it is
6292 * important to see if the address is matching the local
6293 * controller address.
6295 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6296 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6299 /* Only resolvable random addresses are valid for these
6300 * kind of reports and others can be ignored.
6302 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6305 /* If the controller is not using resolvable random
6306 * addresses, then this report can be ignored.
6308 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6311 /* If the local IRK of the controller does not match
6312 * with the resolvable random address provided, then
6313 * this report can be ignored.
6315 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6319 /* Check if we need to convert to identity address */
6320 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6322 bdaddr = &irk->bdaddr;
6323 bdaddr_type = irk->addr_type;
6326 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6328 /* Check if we have been requested to connect to this device.
6330 * direct_addr is set only for directed advertising reports (it is NULL
6331 * for advertising reports) and is already verified to be RPA above.
6333 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6335 if (!ext_adv && conn && type == LE_ADV_IND &&
6336 len <= max_adv_len(hdev)) {
6337 /* Store report for later inclusion by
6338 * mgmt_device_connected
6340 memcpy(conn->le_adv_data, data, len);
6341 conn->le_adv_data_len = len;
6344 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6345 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6349 /* All scan results should be sent up for Mesh systems */
6350 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6351 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6352 rssi, flags, data, len, NULL, 0, instant);
6356 /* Passive scanning shouldn't trigger any device found events,
6357 * except for devices marked as CONN_REPORT for which we do send
6358 * device found events, or advertisement monitoring requested.
6360 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6361 if (type == LE_ADV_DIRECT_IND)
6364 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6365 bdaddr, bdaddr_type) &&
6366 idr_is_empty(&hdev->adv_monitors_idr))
6369 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6370 rssi, flags, data, len, NULL, 0, 0);
6374 /* When receiving a scan response, then there is no way to
6375 * know if the remote device is connectable or not. However
6376 * since scan responses are merged with a previously seen
6377 * advertising report, the flags field from that report
6380 * In the unlikely case that a controller just sends a scan
6381 * response event that doesn't match the pending report, then
6382 * it is marked as a standalone SCAN_RSP.
6384 if (type == LE_ADV_SCAN_RSP)
6385 flags = MGMT_DEV_FOUND_SCAN_RSP;
6387 /* If there's nothing pending either store the data from this
6388 * event or send an immediate device found event if the data
6389 * should not be stored for later.
6391 if (!ext_adv && !has_pending_adv_report(hdev)) {
6392 /* If the report will trigger a SCAN_REQ store it for
6395 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6396 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6397 rssi, flags, data, len);
6401 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6402 rssi, flags, data, len, NULL, 0, 0);
6406 /* Check if the pending report is for the same device as the new one */
6407 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6408 bdaddr_type == d->last_adv_addr_type);
6410 /* If the pending data doesn't match this report or this isn't a
6411 * scan response (e.g. we got a duplicate ADV_IND) then force
6412 * sending of the pending data.
6414 if (type != LE_ADV_SCAN_RSP || !match) {
6415 /* Send out whatever is in the cache, but skip duplicates */
6417 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6418 d->last_adv_addr_type, NULL,
6419 d->last_adv_rssi, d->last_adv_flags,
6421 d->last_adv_data_len, NULL, 0, 0);
6423 /* If the new report will trigger a SCAN_REQ store it for
6426 if (!ext_adv && (type == LE_ADV_IND ||
6427 type == LE_ADV_SCAN_IND)) {
6428 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6429 rssi, flags, data, len);
6433 /* The advertising reports cannot be merged, so clear
6434 * the pending report and send out a device found event.
6436 clear_pending_adv_report(hdev);
6437 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6438 rssi, flags, data, len, NULL, 0, 0);
6442 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6443 * the new event is a SCAN_RSP. We can therefore proceed with
6444 * sending a merged device found event.
6446 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6447 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6448 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6449 clear_pending_adv_report(hdev);
6452 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6453 struct sk_buff *skb)
6455 struct hci_ev_le_advertising_report *ev = data;
6456 u64 instant = jiffies;
6464 struct hci_ev_le_advertising_info *info;
6467 info = hci_le_ev_skb_pull(hdev, skb,
6468 HCI_EV_LE_ADVERTISING_REPORT,
6473 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6477 if (info->length <= max_adv_len(hdev)) {
6478 rssi = info->data[info->length];
6479 process_adv_report(hdev, info->type, &info->bdaddr,
6480 info->bdaddr_type, NULL, 0, rssi,
6481 info->data, info->length, false,
6484 bt_dev_err(hdev, "Dropping invalid advertising data");
6488 hci_dev_unlock(hdev);
6491 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6493 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6495 case LE_LEGACY_ADV_IND:
6497 case LE_LEGACY_ADV_DIRECT_IND:
6498 return LE_ADV_DIRECT_IND;
6499 case LE_LEGACY_ADV_SCAN_IND:
6500 return LE_ADV_SCAN_IND;
6501 case LE_LEGACY_NONCONN_IND:
6502 return LE_ADV_NONCONN_IND;
6503 case LE_LEGACY_SCAN_RSP_ADV:
6504 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6505 return LE_ADV_SCAN_RSP;
6511 if (evt_type & LE_EXT_ADV_CONN_IND) {
6512 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6513 return LE_ADV_DIRECT_IND;
6518 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6519 return LE_ADV_SCAN_RSP;
6521 if (evt_type & LE_EXT_ADV_SCAN_IND)
6522 return LE_ADV_SCAN_IND;
6524 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6525 evt_type & LE_EXT_ADV_DIRECT_IND)
6526 return LE_ADV_NONCONN_IND;
6529 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6532 return LE_ADV_INVALID;
6535 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6536 struct sk_buff *skb)
6538 struct hci_ev_le_ext_adv_report *ev = data;
6539 u64 instant = jiffies;
6547 struct hci_ev_le_ext_adv_info *info;
6551 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6556 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6560 evt_type = __le16_to_cpu(info->type);
6561 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6562 if (legacy_evt_type != LE_ADV_INVALID) {
6563 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6564 info->bdaddr_type, NULL, 0,
6565 info->rssi, info->data, info->length,
6566 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6571 hci_dev_unlock(hdev);
6574 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6576 struct hci_cp_le_pa_term_sync cp;
6578 memset(&cp, 0, sizeof(cp));
6581 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6584 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6585 struct sk_buff *skb)
6587 struct hci_ev_le_pa_sync_established *ev = data;
6588 int mask = hdev->link_mode;
6591 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6598 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6600 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6601 if (!(mask & HCI_LM_ACCEPT))
6602 hci_le_pa_term_sync(hdev, ev->handle);
6604 hci_dev_unlock(hdev);
6607 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6608 struct sk_buff *skb)
6610 struct hci_ev_le_remote_feat_complete *ev = data;
6611 struct hci_conn *conn;
6613 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6617 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6620 memcpy(conn->features[0], ev->features, 8);
6622 if (conn->state == BT_CONFIG) {
6625 /* If the local controller supports peripheral-initiated
6626 * features exchange, but the remote controller does
6627 * not, then it is possible that the error code 0x1a
6628 * for unsupported remote feature gets returned.
6630 * In this specific case, allow the connection to
6631 * transition into connected state and mark it as
6634 if (!conn->out && ev->status == 0x1a &&
6635 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6638 status = ev->status;
6640 conn->state = BT_CONNECTED;
6641 hci_connect_cfm(conn, status);
6642 hci_conn_drop(conn);
6646 hci_dev_unlock(hdev);
6649 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6650 struct sk_buff *skb)
6652 struct hci_ev_le_ltk_req *ev = data;
6653 struct hci_cp_le_ltk_reply cp;
6654 struct hci_cp_le_ltk_neg_reply neg;
6655 struct hci_conn *conn;
6656 struct smp_ltk *ltk;
6658 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6662 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6666 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6670 if (smp_ltk_is_sc(ltk)) {
6671 /* With SC both EDiv and Rand are set to zero */
6672 if (ev->ediv || ev->rand)
6675 /* For non-SC keys check that EDiv and Rand match */
6676 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6680 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6681 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6682 cp.handle = cpu_to_le16(conn->handle);
6684 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6686 conn->enc_key_size = ltk->enc_size;
6688 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6690 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6691 * temporary key used to encrypt a connection following
6692 * pairing. It is used during the Encrypted Session Setup to
6693 * distribute the keys. Later, security can be re-established
6694 * using a distributed LTK.
6696 if (ltk->type == SMP_STK) {
6697 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6698 list_del_rcu(<k->list);
6699 kfree_rcu(ltk, rcu);
6701 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6704 hci_dev_unlock(hdev);
6709 neg.handle = ev->handle;
6710 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6711 hci_dev_unlock(hdev);
6714 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6717 struct hci_cp_le_conn_param_req_neg_reply cp;
6719 cp.handle = cpu_to_le16(handle);
6722 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6726 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6727 struct sk_buff *skb)
6729 struct hci_ev_le_remote_conn_param_req *ev = data;
6730 struct hci_cp_le_conn_param_req_reply cp;
6731 struct hci_conn *hcon;
6732 u16 handle, min, max, latency, timeout;
6734 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6736 handle = le16_to_cpu(ev->handle);
6737 min = le16_to_cpu(ev->interval_min);
6738 max = le16_to_cpu(ev->interval_max);
6739 latency = le16_to_cpu(ev->latency);
6740 timeout = le16_to_cpu(ev->timeout);
6742 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6743 if (!hcon || hcon->state != BT_CONNECTED)
6744 return send_conn_param_neg_reply(hdev, handle,
6745 HCI_ERROR_UNKNOWN_CONN_ID);
6747 if (max > hcon->le_conn_max_interval)
6748 return send_conn_param_neg_reply(hdev, handle,
6749 HCI_ERROR_INVALID_LL_PARAMS);
6751 if (hci_check_conn_params(min, max, latency, timeout))
6752 return send_conn_param_neg_reply(hdev, handle,
6753 HCI_ERROR_INVALID_LL_PARAMS);
6755 if (hcon->role == HCI_ROLE_MASTER) {
6756 struct hci_conn_params *params;
6761 params = hci_conn_params_lookup(hdev, &hcon->dst,
6764 params->conn_min_interval = min;
6765 params->conn_max_interval = max;
6766 params->conn_latency = latency;
6767 params->supervision_timeout = timeout;
6773 hci_dev_unlock(hdev);
6775 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6776 store_hint, min, max, latency, timeout);
6779 cp.handle = ev->handle;
6780 cp.interval_min = ev->interval_min;
6781 cp.interval_max = ev->interval_max;
6782 cp.latency = ev->latency;
6783 cp.timeout = ev->timeout;
6787 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6790 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6791 struct sk_buff *skb)
6793 struct hci_ev_le_direct_adv_report *ev = data;
6794 u64 instant = jiffies;
6797 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6798 flex_array_size(ev, info, ev->num)))
6806 for (i = 0; i < ev->num; i++) {
6807 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6809 process_adv_report(hdev, info->type, &info->bdaddr,
6810 info->bdaddr_type, &info->direct_addr,
6811 info->direct_addr_type, info->rssi, NULL, 0,
6812 false, false, instant);
6815 hci_dev_unlock(hdev);
6818 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6819 struct sk_buff *skb)
6821 struct hci_ev_le_phy_update_complete *ev = data;
6822 struct hci_conn *conn;
6824 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6831 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6835 conn->le_tx_phy = ev->tx_phy;
6836 conn->le_rx_phy = ev->rx_phy;
6839 hci_dev_unlock(hdev);
6842 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6843 struct sk_buff *skb)
6845 struct hci_evt_le_cis_established *ev = data;
6846 struct hci_conn *conn;
6847 u16 handle = __le16_to_cpu(ev->handle);
6849 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6853 conn = hci_conn_hash_lookup_handle(hdev, handle);
6856 "Unable to find connection with handle 0x%4.4x",
6861 if (conn->type != ISO_LINK) {
6863 "Invalid connection link type handle 0x%4.4x",
6868 if (conn->role == HCI_ROLE_SLAVE) {
6871 memset(&interval, 0, sizeof(interval));
6873 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6874 conn->iso_qos.in.interval = le32_to_cpu(interval);
6875 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6876 conn->iso_qos.out.interval = le32_to_cpu(interval);
6877 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6878 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6879 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6880 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6881 conn->iso_qos.in.phy = ev->c_phy;
6882 conn->iso_qos.out.phy = ev->p_phy;
6886 conn->state = BT_CONNECTED;
6887 hci_debugfs_create_conn(conn);
6888 hci_conn_add_sysfs(conn);
6889 hci_iso_setup_path(conn);
6893 hci_connect_cfm(conn, ev->status);
6897 hci_dev_unlock(hdev);
6900 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6902 struct hci_cp_le_reject_cis cp;
6904 memset(&cp, 0, sizeof(cp));
6906 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6907 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6910 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6912 struct hci_cp_le_accept_cis cp;
6914 memset(&cp, 0, sizeof(cp));
6916 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6919 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6920 struct sk_buff *skb)
6922 struct hci_evt_le_cis_req *ev = data;
6923 u16 acl_handle, cis_handle;
6924 struct hci_conn *acl, *cis;
6928 acl_handle = __le16_to_cpu(ev->acl_handle);
6929 cis_handle = __le16_to_cpu(ev->cis_handle);
6931 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6932 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6936 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6940 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6941 if (!(mask & HCI_LM_ACCEPT)) {
6942 hci_le_reject_cis(hdev, ev->cis_handle);
6946 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6948 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6950 hci_le_reject_cis(hdev, ev->cis_handle);
6953 cis->handle = cis_handle;
6956 cis->iso_qos.cig = ev->cig_id;
6957 cis->iso_qos.cis = ev->cis_id;
6959 if (!(flags & HCI_PROTO_DEFER)) {
6960 hci_le_accept_cis(hdev, ev->cis_handle);
6962 cis->state = BT_CONNECT2;
6963 hci_connect_cfm(cis, 0);
6967 hci_dev_unlock(hdev);
6970 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6971 struct sk_buff *skb)
6973 struct hci_evt_le_create_big_complete *ev = data;
6974 struct hci_conn *conn;
6976 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6978 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6979 flex_array_size(ev, bis_handle, ev->num_bis)))
6984 conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6988 if (conn->type != ISO_LINK) {
6990 "Invalid connection link type handle 0x%2.2x",
6996 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6999 conn->state = BT_CONNECTED;
7000 hci_debugfs_create_conn(conn);
7001 hci_conn_add_sysfs(conn);
7002 hci_iso_setup_path(conn);
7006 hci_connect_cfm(conn, ev->status);
7010 hci_dev_unlock(hdev);
7013 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7014 struct sk_buff *skb)
7016 struct hci_evt_le_big_sync_estabilished *ev = data;
7017 struct hci_conn *bis;
7020 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7022 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7023 flex_array_size(ev, bis, ev->num_bis)))
7031 for (i = 0; i < ev->num_bis; i++) {
7032 u16 handle = le16_to_cpu(ev->bis[i]);
7035 bis = hci_conn_hash_lookup_handle(hdev, handle);
7037 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7041 bis->handle = handle;
7044 bis->iso_qos.big = ev->handle;
7045 memset(&interval, 0, sizeof(interval));
7046 memcpy(&interval, ev->latency, sizeof(ev->latency));
7047 bis->iso_qos.in.interval = le32_to_cpu(interval);
7048 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7049 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7050 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7052 hci_iso_setup_path(bis);
7055 hci_dev_unlock(hdev);
7058 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7059 struct sk_buff *skb)
7061 struct hci_evt_le_big_info_adv_report *ev = data;
7062 int mask = hdev->link_mode;
7065 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7069 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7070 if (!(mask & HCI_LM_ACCEPT))
7071 hci_le_pa_term_sync(hdev, ev->sync_handle);
7073 hci_dev_unlock(hdev);
7076 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7079 .min_len = _min_len, \
7080 .max_len = _max_len, \
7083 #define HCI_LE_EV(_op, _func, _len) \
7084 HCI_LE_EV_VL(_op, _func, _len, _len)
7086 #define HCI_LE_EV_STATUS(_op, _func) \
7087 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7089 /* Entries in this table shall have their position according to the subevent
7090 * opcode they handle so the use of the macros above is recommend since it does
7091 * attempt to initialize at its proper index using Designated Initializers that
7092 * way events without a callback function can be ommited.
7094 static const struct hci_le_ev {
7095 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7098 } hci_le_ev_table[U8_MAX + 1] = {
7099 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7100 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7101 sizeof(struct hci_ev_le_conn_complete)),
7102 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7103 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7104 sizeof(struct hci_ev_le_advertising_report),
7105 HCI_MAX_EVENT_SIZE),
7106 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7107 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7108 hci_le_conn_update_complete_evt,
7109 sizeof(struct hci_ev_le_conn_update_complete)),
7110 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7111 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7112 hci_le_remote_feat_complete_evt,
7113 sizeof(struct hci_ev_le_remote_feat_complete)),
7114 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7115 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7116 sizeof(struct hci_ev_le_ltk_req)),
7117 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7118 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7119 hci_le_remote_conn_param_req_evt,
7120 sizeof(struct hci_ev_le_remote_conn_param_req)),
7121 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7122 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7123 hci_le_enh_conn_complete_evt,
7124 sizeof(struct hci_ev_le_enh_conn_complete)),
7125 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7126 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7127 sizeof(struct hci_ev_le_direct_adv_report),
7128 HCI_MAX_EVENT_SIZE),
7129 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7130 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7131 sizeof(struct hci_ev_le_phy_update_complete)),
7132 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7133 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7134 sizeof(struct hci_ev_le_ext_adv_report),
7135 HCI_MAX_EVENT_SIZE),
7136 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7137 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7138 hci_le_pa_sync_estabilished_evt,
7139 sizeof(struct hci_ev_le_pa_sync_established)),
7140 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7141 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7142 sizeof(struct hci_evt_le_ext_adv_set_term)),
7143 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7144 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7145 sizeof(struct hci_evt_le_cis_established)),
7146 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7147 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7148 sizeof(struct hci_evt_le_cis_req)),
7149 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7150 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7151 hci_le_create_big_complete_evt,
7152 sizeof(struct hci_evt_le_create_big_complete),
7153 HCI_MAX_EVENT_SIZE),
7154 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7155 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7156 hci_le_big_sync_established_evt,
7157 sizeof(struct hci_evt_le_big_sync_estabilished),
7158 HCI_MAX_EVENT_SIZE),
7159 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7160 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7161 hci_le_big_info_adv_report_evt,
7162 sizeof(struct hci_evt_le_big_info_adv_report),
7163 HCI_MAX_EVENT_SIZE),
7166 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7167 struct sk_buff *skb, u16 *opcode, u8 *status,
7168 hci_req_complete_t *req_complete,
7169 hci_req_complete_skb_t *req_complete_skb)
7171 struct hci_ev_le_meta *ev = data;
7172 const struct hci_le_ev *subev;
7174 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7176 /* Only match event if command OGF is for LE */
7177 if (hdev->req_skb &&
7178 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7179 hci_skb_event(hdev->req_skb) == ev->subevent) {
7180 *opcode = hci_skb_opcode(hdev->req_skb);
7181 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7185 subev = &hci_le_ev_table[ev->subevent];
7189 if (skb->len < subev->min_len) {
7190 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7191 ev->subevent, skb->len, subev->min_len);
7195 /* Just warn if the length is over max_len size it still be
7196 * possible to partially parse the event so leave to callback to
7197 * decide if that is acceptable.
7199 if (skb->len > subev->max_len)
7200 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7201 ev->subevent, skb->len, subev->max_len);
7202 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7206 subev->func(hdev, data, skb);
7209 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7210 u8 event, struct sk_buff *skb)
7212 struct hci_ev_cmd_complete *ev;
7213 struct hci_event_hdr *hdr;
7218 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7223 if (hdr->evt != event)
7228 /* Check if request ended in Command Status - no way to retrieve
7229 * any extra parameters in this case.
7231 if (hdr->evt == HCI_EV_CMD_STATUS)
7234 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7235 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7240 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7244 if (opcode != __le16_to_cpu(ev->opcode)) {
7245 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7246 __le16_to_cpu(ev->opcode));
7253 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7254 struct sk_buff *skb)
7256 struct hci_ev_le_advertising_info *adv;
7257 struct hci_ev_le_direct_adv_info *direct_adv;
7258 struct hci_ev_le_ext_adv_info *ext_adv;
7259 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7260 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7264 /* If we are currently suspended and this is the first BT event seen,
7265 * save the wake reason associated with the event.
7267 if (!hdev->suspended || hdev->wake_reason)
7270 /* Default to remote wake. Values for wake_reason are documented in the
7271 * Bluez mgmt api docs.
7273 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7275 /* Once configured for remote wakeup, we should only wake up for
7276 * reconnections. It's useful to see which device is waking us up so
7277 * keep track of the bdaddr of the connection event that woke us up.
7279 if (event == HCI_EV_CONN_REQUEST) {
7280 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7281 hdev->wake_addr_type = BDADDR_BREDR;
7282 } else if (event == HCI_EV_CONN_COMPLETE) {
7283 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7284 hdev->wake_addr_type = BDADDR_BREDR;
7285 } else if (event == HCI_EV_LE_META) {
7286 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7287 u8 subevent = le_ev->subevent;
7288 u8 *ptr = &skb->data[sizeof(*le_ev)];
7289 u8 num_reports = *ptr;
7291 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7292 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7293 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7295 adv = (void *)(ptr + 1);
7296 direct_adv = (void *)(ptr + 1);
7297 ext_adv = (void *)(ptr + 1);
7300 case HCI_EV_LE_ADVERTISING_REPORT:
7301 bacpy(&hdev->wake_addr, &adv->bdaddr);
7302 hdev->wake_addr_type = adv->bdaddr_type;
7304 case HCI_EV_LE_DIRECT_ADV_REPORT:
7305 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7306 hdev->wake_addr_type = direct_adv->bdaddr_type;
7308 case HCI_EV_LE_EXT_ADV_REPORT:
7309 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7310 hdev->wake_addr_type = ext_adv->bdaddr_type;
7315 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7319 hci_dev_unlock(hdev);
7322 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7326 .min_len = _min_len, \
7327 .max_len = _max_len, \
7330 #define HCI_EV(_op, _func, _len) \
7331 HCI_EV_VL(_op, _func, _len, _len)
7333 #define HCI_EV_STATUS(_op, _func) \
7334 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7336 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7339 .func_req = _func, \
7340 .min_len = _min_len, \
7341 .max_len = _max_len, \
7344 #define HCI_EV_REQ(_op, _func, _len) \
7345 HCI_EV_REQ_VL(_op, _func, _len, _len)
7347 /* Entries in this table shall have their position according to the event opcode
7348 * they handle so the use of the macros above is recommend since it does attempt
7349 * to initialize at its proper index using Designated Initializers that way
7350 * events without a callback function don't have entered.
7352 static const struct hci_ev {
7355 void (*func)(struct hci_dev *hdev, void *data,
7356 struct sk_buff *skb);
7357 void (*func_req)(struct hci_dev *hdev, void *data,
7358 struct sk_buff *skb, u16 *opcode, u8 *status,
7359 hci_req_complete_t *req_complete,
7360 hci_req_complete_skb_t *req_complete_skb);
7364 } hci_ev_table[U8_MAX + 1] = {
7365 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7366 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7367 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7368 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7369 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7370 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7371 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7372 sizeof(struct hci_ev_conn_complete)),
7373 /* [0x04 = HCI_EV_CONN_REQUEST] */
7374 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7375 sizeof(struct hci_ev_conn_request)),
7376 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7377 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7378 sizeof(struct hci_ev_disconn_complete)),
7379 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7380 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7381 sizeof(struct hci_ev_auth_complete)),
7382 /* [0x07 = HCI_EV_REMOTE_NAME] */
7383 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7384 sizeof(struct hci_ev_remote_name)),
7385 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7386 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7387 sizeof(struct hci_ev_encrypt_change)),
7388 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7389 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7390 hci_change_link_key_complete_evt,
7391 sizeof(struct hci_ev_change_link_key_complete)),
7392 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7393 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7394 sizeof(struct hci_ev_remote_features)),
7395 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7396 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7397 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7398 /* [0x0f = HCI_EV_CMD_STATUS] */
7399 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7400 sizeof(struct hci_ev_cmd_status)),
7401 /* [0x10 = HCI_EV_CMD_STATUS] */
7402 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7403 sizeof(struct hci_ev_hardware_error)),
7404 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7405 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7406 sizeof(struct hci_ev_role_change)),
7407 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7408 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7409 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7410 /* [0x14 = HCI_EV_MODE_CHANGE] */
7411 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7412 sizeof(struct hci_ev_mode_change)),
7413 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7414 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7415 sizeof(struct hci_ev_pin_code_req)),
7416 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7417 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7418 sizeof(struct hci_ev_link_key_req)),
7419 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7420 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7421 sizeof(struct hci_ev_link_key_notify)),
7422 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7423 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7424 sizeof(struct hci_ev_clock_offset)),
7425 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7426 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7427 sizeof(struct hci_ev_pkt_type_change)),
7428 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7429 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7430 sizeof(struct hci_ev_pscan_rep_mode)),
7431 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7432 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7433 hci_inquiry_result_with_rssi_evt,
7434 sizeof(struct hci_ev_inquiry_result_rssi),
7435 HCI_MAX_EVENT_SIZE),
7436 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7437 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7438 sizeof(struct hci_ev_remote_ext_features)),
7439 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7440 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7441 sizeof(struct hci_ev_sync_conn_complete)),
7442 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7443 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7444 hci_extended_inquiry_result_evt,
7445 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7446 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7447 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7448 sizeof(struct hci_ev_key_refresh_complete)),
7449 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7450 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7451 sizeof(struct hci_ev_io_capa_request)),
7452 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7453 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7454 sizeof(struct hci_ev_io_capa_reply)),
7455 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7456 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7457 sizeof(struct hci_ev_user_confirm_req)),
7458 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7459 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7460 sizeof(struct hci_ev_user_passkey_req)),
7461 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7462 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7463 sizeof(struct hci_ev_remote_oob_data_request)),
7464 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7465 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7466 sizeof(struct hci_ev_simple_pair_complete)),
7467 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7468 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7469 sizeof(struct hci_ev_user_passkey_notify)),
7470 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7471 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7472 sizeof(struct hci_ev_keypress_notify)),
7473 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7474 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7475 sizeof(struct hci_ev_remote_host_features)),
7476 /* [0x3e = HCI_EV_LE_META] */
7477 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7478 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7479 #if IS_ENABLED(CONFIG_BT_HS)
7480 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7481 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7482 sizeof(struct hci_ev_phy_link_complete)),
7483 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7484 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7485 sizeof(struct hci_ev_channel_selected)),
7486 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7487 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7488 hci_disconn_loglink_complete_evt,
7489 sizeof(struct hci_ev_disconn_logical_link_complete)),
7490 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7491 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7492 sizeof(struct hci_ev_logical_link_complete)),
7493 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7494 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7495 hci_disconn_phylink_complete_evt,
7496 sizeof(struct hci_ev_disconn_phy_link_complete)),
7498 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7499 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7500 sizeof(struct hci_ev_num_comp_blocks)),
7501 /* [0xff = HCI_EV_VENDOR] */
7502 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7505 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7506 u16 *opcode, u8 *status,
7507 hci_req_complete_t *req_complete,
7508 hci_req_complete_skb_t *req_complete_skb)
7510 const struct hci_ev *ev = &hci_ev_table[event];
7516 if (skb->len < ev->min_len) {
7517 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7518 event, skb->len, ev->min_len);
7522 /* Just warn if the length is over max_len size it still be
7523 * possible to partially parse the event so leave to callback to
7524 * decide if that is acceptable.
7526 if (skb->len > ev->max_len)
7527 bt_dev_warn_ratelimited(hdev,
7528 "unexpected event 0x%2.2x length: %u > %u",
7529 event, skb->len, ev->max_len);
7531 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7536 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7539 ev->func(hdev, data, skb);
7542 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7544 struct hci_event_hdr *hdr = (void *) skb->data;
7545 hci_req_complete_t req_complete = NULL;
7546 hci_req_complete_skb_t req_complete_skb = NULL;
7547 struct sk_buff *orig_skb = NULL;
7548 u8 status = 0, event, req_evt = 0;
7549 u16 opcode = HCI_OP_NOP;
7551 if (skb->len < sizeof(*hdr)) {
7552 bt_dev_err(hdev, "Malformed HCI Event");
7556 kfree_skb(hdev->recv_event);
7557 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7561 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7566 /* Only match event if command OGF is not for LE */
7567 if (hdev->req_skb &&
7568 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7569 hci_skb_event(hdev->req_skb) == event) {
7570 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7571 status, &req_complete, &req_complete_skb);
7575 /* If it looks like we might end up having to call
7576 * req_complete_skb, store a pristine copy of the skb since the
7577 * various handlers may modify the original one through
7578 * skb_pull() calls, etc.
7580 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7581 event == HCI_EV_CMD_COMPLETE)
7582 orig_skb = skb_clone(skb, GFP_KERNEL);
7584 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7586 /* Store wake reason if we're suspended */
7587 hci_store_wake_reason(hdev, event, skb);
7589 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7591 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7595 req_complete(hdev, status, opcode);
7596 } else if (req_complete_skb) {
7597 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7598 kfree_skb(orig_skb);
7601 req_complete_skb(hdev, status, opcode, orig_skb);
7605 kfree_skb(orig_skb);
7607 hdev->stat.evt_rx++;