2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI event handling. */
28 #include <asm/unaligned.h>
29 #include <linux/crypto.h>
30 #include <crypto/algapi.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
37 #include "hci_debugfs.h"
38 #include "hci_codec.h"
43 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44 "\x00\x00\x00\x00\x00\x00\x00\x00"
46 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
48 /* Handle HCI Event packets */
50 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
55 data = skb_pull_data(skb, len);
57 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
62 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
67 data = skb_pull_data(skb, len);
69 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
74 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
79 data = skb_pull_data(skb, len);
81 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
86 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
89 struct hci_ev_status *rp = data;
91 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
93 /* It is possible that we receive Inquiry Complete event right
94 * before we receive Inquiry Cancel Command Complete event, in
95 * which case the latter event should have status of Command
96 * Disallowed. This should not be treated as error, since
97 * we actually achieve what Inquiry Cancel wants to achieve,
98 * which is to end the last Inquiry session.
100 if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
108 clear_bit(HCI_INQUIRY, &hdev->flags);
109 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110 wake_up_bit(&hdev->flags, HCI_INQUIRY);
113 /* Set discovery state to stopped if we're not doing LE active
116 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117 hdev->le_scan_type != LE_SCAN_ACTIVE)
118 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119 hci_dev_unlock(hdev);
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 struct hci_ev_status *rp = data;
129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 struct hci_ev_status *rp = data;
144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
154 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
157 struct hci_ev_status *rp = data;
159 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
164 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
167 struct hci_rp_role_discovery *rp = data;
168 struct hci_conn *conn;
170 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
177 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
179 conn->role = rp->role;
181 hci_dev_unlock(hdev);
186 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
189 struct hci_rp_read_link_policy *rp = data;
190 struct hci_conn *conn;
192 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
199 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
201 conn->link_policy = __le16_to_cpu(rp->policy);
203 hci_dev_unlock(hdev);
208 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
211 struct hci_rp_write_link_policy *rp = data;
212 struct hci_conn *conn;
215 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
220 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
226 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
228 conn->link_policy = get_unaligned_le16(sent + 2);
230 hci_dev_unlock(hdev);
235 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
238 struct hci_rp_read_def_link_policy *rp = data;
240 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
245 hdev->link_policy = __le16_to_cpu(rp->policy);
250 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
253 struct hci_ev_status *rp = data;
256 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
261 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
265 hdev->link_policy = get_unaligned_le16(sent);
270 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
272 struct hci_ev_status *rp = data;
274 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
276 clear_bit(HCI_RESET, &hdev->flags);
281 /* Reset all non-persistent flags */
282 hci_dev_clear_volatile_flags(hdev);
284 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
286 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
287 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
289 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
290 hdev->adv_data_len = 0;
292 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
293 hdev->scan_rsp_data_len = 0;
295 hdev->le_scan_type = LE_SCAN_PASSIVE;
297 hdev->ssp_debug_mode = 0;
299 hci_bdaddr_list_clear(&hdev->le_accept_list);
300 hci_bdaddr_list_clear(&hdev->le_resolv_list);
305 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
308 struct hci_rp_read_stored_link_key *rp = data;
309 struct hci_cp_read_stored_link_key *sent;
311 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
313 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
317 if (!rp->status && sent->read_all == 0x01) {
318 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
319 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
325 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
328 struct hci_rp_delete_stored_link_key *rp = data;
331 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
336 num_keys = le16_to_cpu(rp->num_keys);
338 if (num_keys <= hdev->stored_num_keys)
339 hdev->stored_num_keys -= num_keys;
341 hdev->stored_num_keys = 0;
346 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
349 struct hci_ev_status *rp = data;
352 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
360 if (hci_dev_test_flag(hdev, HCI_MGMT))
361 mgmt_set_local_name_complete(hdev, sent, rp->status);
362 else if (!rp->status)
363 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
365 hci_dev_unlock(hdev);
370 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
373 struct hci_rp_read_local_name *rp = data;
375 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
380 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
381 hci_dev_test_flag(hdev, HCI_CONFIG))
382 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
387 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
390 struct hci_ev_status *rp = data;
393 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
395 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
402 __u8 param = *((__u8 *) sent);
404 if (param == AUTH_ENABLED)
405 set_bit(HCI_AUTH, &hdev->flags);
407 clear_bit(HCI_AUTH, &hdev->flags);
410 if (hci_dev_test_flag(hdev, HCI_MGMT))
411 mgmt_auth_enable_complete(hdev, rp->status);
413 hci_dev_unlock(hdev);
418 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
421 struct hci_ev_status *rp = data;
425 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
430 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
434 param = *((__u8 *) sent);
437 set_bit(HCI_ENCRYPT, &hdev->flags);
439 clear_bit(HCI_ENCRYPT, &hdev->flags);
444 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
447 struct hci_ev_status *rp = data;
451 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
453 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
457 param = *((__u8 *) sent);
462 hdev->discov_timeout = 0;
466 if (param & SCAN_INQUIRY)
467 set_bit(HCI_ISCAN, &hdev->flags);
469 clear_bit(HCI_ISCAN, &hdev->flags);
471 if (param & SCAN_PAGE)
472 set_bit(HCI_PSCAN, &hdev->flags);
474 clear_bit(HCI_PSCAN, &hdev->flags);
477 hci_dev_unlock(hdev);
482 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
485 struct hci_ev_status *rp = data;
486 struct hci_cp_set_event_filter *cp;
489 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
494 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
498 cp = (struct hci_cp_set_event_filter *)sent;
500 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
501 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
503 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
508 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
511 struct hci_rp_read_class_of_dev *rp = data;
514 return HCI_ERROR_UNSPECIFIED;
516 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
521 memcpy(hdev->dev_class, rp->dev_class, 3);
523 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
524 hdev->dev_class[1], hdev->dev_class[0]);
529 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
532 struct hci_ev_status *rp = data;
535 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
537 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
544 memcpy(hdev->dev_class, sent, 3);
546 if (hci_dev_test_flag(hdev, HCI_MGMT))
547 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
549 hci_dev_unlock(hdev);
554 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
557 struct hci_rp_read_voice_setting *rp = data;
560 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
565 setting = __le16_to_cpu(rp->voice_setting);
567 if (hdev->voice_setting == setting)
570 hdev->voice_setting = setting;
572 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
575 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
580 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
583 struct hci_ev_status *rp = data;
587 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
592 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
596 setting = get_unaligned_le16(sent);
598 if (hdev->voice_setting == setting)
601 hdev->voice_setting = setting;
603 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
606 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
611 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
614 struct hci_rp_read_num_supported_iac *rp = data;
616 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
621 hdev->num_iac = rp->num_iac;
623 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
628 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
631 struct hci_ev_status *rp = data;
632 struct hci_cp_write_ssp_mode *sent;
634 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
636 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
644 hdev->features[1][0] |= LMP_HOST_SSP;
646 hdev->features[1][0] &= ~LMP_HOST_SSP;
651 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
653 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
656 hci_dev_unlock(hdev);
661 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
664 struct hci_ev_status *rp = data;
665 struct hci_cp_write_sc_support *sent;
667 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
669 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
677 hdev->features[1][0] |= LMP_HOST_SC;
679 hdev->features[1][0] &= ~LMP_HOST_SC;
682 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
684 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
686 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
689 hci_dev_unlock(hdev);
694 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
697 struct hci_rp_read_local_version *rp = data;
699 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
704 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
705 hci_dev_test_flag(hdev, HCI_CONFIG)) {
706 hdev->hci_ver = rp->hci_ver;
707 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
708 hdev->lmp_ver = rp->lmp_ver;
709 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
710 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
716 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
719 struct hci_rp_read_enc_key_size *rp = data;
720 struct hci_conn *conn;
722 u8 status = rp->status;
724 bt_dev_dbg(hdev, "status 0x%2.2x", status);
726 handle = le16_to_cpu(rp->handle);
730 conn = hci_conn_hash_lookup_handle(hdev, handle);
736 /* While unexpected, the read_enc_key_size command may fail. The most
737 * secure approach is to then assume the key size is 0 to force a
741 bt_dev_err(hdev, "failed to read key size for handle %u",
743 conn->enc_key_size = 0;
745 conn->enc_key_size = rp->key_size;
748 if (conn->enc_key_size < hdev->min_enc_key_size) {
749 /* As slave role, the conn->state has been set to
750 * BT_CONNECTED and l2cap conn req might not be received
751 * yet, at this moment the l2cap layer almost does
752 * nothing with the non-zero status.
753 * So we also clear encrypt related bits, and then the
754 * handler of l2cap conn req will get the right secure
755 * state at a later time.
757 status = HCI_ERROR_AUTH_FAILURE;
758 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
759 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
763 hci_encrypt_cfm(conn, status);
766 hci_dev_unlock(hdev);
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
774 struct hci_rp_read_local_commands *rp = data;
776 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
781 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782 hci_dev_test_flag(hdev, HCI_CONFIG))
783 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
791 struct hci_rp_read_auth_payload_to *rp = data;
792 struct hci_conn *conn;
794 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
801 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
803 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
805 hci_dev_unlock(hdev);
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
813 struct hci_rp_write_auth_payload_to *rp = data;
814 struct hci_conn *conn;
817 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
819 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
832 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
835 hci_dev_unlock(hdev);
840 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
843 struct hci_rp_read_local_features *rp = data;
845 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
850 memcpy(hdev->features, rp->features, 8);
852 /* Adjust default settings according to features
853 * supported by device. */
855 if (hdev->features[0][0] & LMP_3SLOT)
856 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
858 if (hdev->features[0][0] & LMP_5SLOT)
859 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
861 if (hdev->features[0][1] & LMP_HV2) {
862 hdev->pkt_type |= (HCI_HV2);
863 hdev->esco_type |= (ESCO_HV2);
866 if (hdev->features[0][1] & LMP_HV3) {
867 hdev->pkt_type |= (HCI_HV3);
868 hdev->esco_type |= (ESCO_HV3);
871 if (lmp_esco_capable(hdev))
872 hdev->esco_type |= (ESCO_EV3);
874 if (hdev->features[0][4] & LMP_EV4)
875 hdev->esco_type |= (ESCO_EV4);
877 if (hdev->features[0][4] & LMP_EV5)
878 hdev->esco_type |= (ESCO_EV5);
880 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
881 hdev->esco_type |= (ESCO_2EV3);
883 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
884 hdev->esco_type |= (ESCO_3EV3);
886 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
887 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
892 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
895 struct hci_rp_read_local_ext_features *rp = data;
897 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
902 if (hdev->max_page < rp->max_page) {
903 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
905 bt_dev_warn(hdev, "broken local ext features page 2");
907 hdev->max_page = rp->max_page;
910 if (rp->page < HCI_MAX_PAGES)
911 memcpy(hdev->features[rp->page], rp->features, 8);
916 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
919 struct hci_rp_read_flow_control_mode *rp = data;
921 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
926 hdev->flow_ctl_mode = rp->mode;
931 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
934 struct hci_rp_read_buffer_size *rp = data;
936 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
941 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
942 hdev->sco_mtu = rp->sco_mtu;
943 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
944 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
946 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
951 hdev->acl_cnt = hdev->acl_pkts;
952 hdev->sco_cnt = hdev->sco_pkts;
954 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
955 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
960 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
963 struct hci_rp_read_bd_addr *rp = data;
965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
970 if (test_bit(HCI_INIT, &hdev->flags))
971 bacpy(&hdev->bdaddr, &rp->bdaddr);
973 if (hci_dev_test_flag(hdev, HCI_SETUP))
974 bacpy(&hdev->setup_addr, &rp->bdaddr);
979 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
982 struct hci_rp_read_local_pairing_opts *rp = data;
984 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
989 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
990 hci_dev_test_flag(hdev, HCI_CONFIG)) {
991 hdev->pairing_opts = rp->pairing_opts;
992 hdev->max_enc_key_size = rp->max_key_size;
998 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1001 struct hci_rp_read_page_scan_activity *rp = data;
1003 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1008 if (test_bit(HCI_INIT, &hdev->flags)) {
1009 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1010 hdev->page_scan_window = __le16_to_cpu(rp->window);
1016 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1017 struct sk_buff *skb)
1019 struct hci_ev_status *rp = data;
1020 struct hci_cp_write_page_scan_activity *sent;
1022 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1027 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1031 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1032 hdev->page_scan_window = __le16_to_cpu(sent->window);
1037 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1038 struct sk_buff *skb)
1040 struct hci_rp_read_page_scan_type *rp = data;
1042 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1047 if (test_bit(HCI_INIT, &hdev->flags))
1048 hdev->page_scan_type = rp->type;
1053 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1054 struct sk_buff *skb)
1056 struct hci_ev_status *rp = data;
1059 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1064 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1066 hdev->page_scan_type = *type;
1071 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1072 struct sk_buff *skb)
1074 struct hci_rp_read_data_block_size *rp = data;
1076 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1081 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1082 hdev->block_len = __le16_to_cpu(rp->block_len);
1083 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1085 hdev->block_cnt = hdev->num_blocks;
1087 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1088 hdev->block_cnt, hdev->block_len);
1093 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1094 struct sk_buff *skb)
1096 struct hci_rp_read_clock *rp = data;
1097 struct hci_cp_read_clock *cp;
1098 struct hci_conn *conn;
1100 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1107 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1111 if (cp->which == 0x00) {
1112 hdev->clock = le32_to_cpu(rp->clock);
1116 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1118 conn->clock = le32_to_cpu(rp->clock);
1119 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1123 hci_dev_unlock(hdev);
1127 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1128 struct sk_buff *skb)
1130 struct hci_rp_read_local_amp_info *rp = data;
1132 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1137 hdev->amp_status = rp->amp_status;
1138 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1139 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1140 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1141 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1142 hdev->amp_type = rp->amp_type;
1143 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1144 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1145 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1146 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1151 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1152 struct sk_buff *skb)
1154 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1156 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1161 hdev->inq_tx_power = rp->tx_power;
1166 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1167 struct sk_buff *skb)
1169 struct hci_rp_read_def_err_data_reporting *rp = data;
1171 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1176 hdev->err_data_reporting = rp->err_data_reporting;
1181 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1182 struct sk_buff *skb)
1184 struct hci_ev_status *rp = data;
1185 struct hci_cp_write_def_err_data_reporting *cp;
1187 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1192 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1196 hdev->err_data_reporting = cp->err_data_reporting;
1201 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1202 struct sk_buff *skb)
1204 struct hci_rp_pin_code_reply *rp = data;
1205 struct hci_cp_pin_code_reply *cp;
1206 struct hci_conn *conn;
1208 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1212 if (hci_dev_test_flag(hdev, HCI_MGMT))
1213 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1218 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1222 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1224 conn->pin_length = cp->pin_len;
1227 hci_dev_unlock(hdev);
1231 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1232 struct sk_buff *skb)
1234 struct hci_rp_pin_code_neg_reply *rp = data;
1236 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1240 if (hci_dev_test_flag(hdev, HCI_MGMT))
1241 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1244 hci_dev_unlock(hdev);
1249 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1250 struct sk_buff *skb)
1252 struct hci_rp_le_read_buffer_size *rp = data;
1254 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1259 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1260 hdev->le_pkts = rp->le_max_pkt;
1262 hdev->le_cnt = hdev->le_pkts;
1264 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1269 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1270 struct sk_buff *skb)
1272 struct hci_rp_le_read_local_features *rp = data;
1274 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1279 memcpy(hdev->le_features, rp->features, 8);
1284 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1285 struct sk_buff *skb)
1287 struct hci_rp_le_read_adv_tx_power *rp = data;
1289 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1294 hdev->adv_tx_power = rp->tx_power;
1299 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1300 struct sk_buff *skb)
1302 struct hci_rp_user_confirm_reply *rp = data;
1304 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1308 if (hci_dev_test_flag(hdev, HCI_MGMT))
1309 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1312 hci_dev_unlock(hdev);
1317 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1318 struct sk_buff *skb)
1320 struct hci_rp_user_confirm_reply *rp = data;
1322 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1326 if (hci_dev_test_flag(hdev, HCI_MGMT))
1327 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1328 ACL_LINK, 0, rp->status);
1330 hci_dev_unlock(hdev);
1335 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1336 struct sk_buff *skb)
1338 struct hci_rp_user_confirm_reply *rp = data;
1340 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1344 if (hci_dev_test_flag(hdev, HCI_MGMT))
1345 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1348 hci_dev_unlock(hdev);
1353 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1354 struct sk_buff *skb)
1356 struct hci_rp_user_confirm_reply *rp = data;
1358 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1362 if (hci_dev_test_flag(hdev, HCI_MGMT))
1363 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1364 ACL_LINK, 0, rp->status);
1366 hci_dev_unlock(hdev);
1371 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1372 struct sk_buff *skb)
1374 struct hci_rp_read_local_oob_data *rp = data;
1376 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1381 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1382 struct sk_buff *skb)
1384 struct hci_rp_read_local_oob_ext_data *rp = data;
1386 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1391 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1392 struct sk_buff *skb)
1394 struct hci_ev_status *rp = data;
1397 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1402 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1408 bacpy(&hdev->random_addr, sent);
1410 if (!bacmp(&hdev->rpa, sent)) {
1411 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1412 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1413 secs_to_jiffies(hdev->rpa_timeout));
1416 hci_dev_unlock(hdev);
1421 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1422 struct sk_buff *skb)
1424 struct hci_ev_status *rp = data;
1425 struct hci_cp_le_set_default_phy *cp;
1427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1432 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1438 hdev->le_tx_def_phys = cp->tx_phys;
1439 hdev->le_rx_def_phys = cp->rx_phys;
1441 hci_dev_unlock(hdev);
1446 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1447 struct sk_buff *skb)
1449 struct hci_ev_status *rp = data;
1450 struct hci_cp_le_set_adv_set_rand_addr *cp;
1451 struct adv_info *adv;
1453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1458 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1459 /* Update only in case the adv instance since handle 0x00 shall be using
1460 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1461 * non-extended adverting.
1463 if (!cp || !cp->handle)
1468 adv = hci_find_adv_instance(hdev, cp->handle);
1470 bacpy(&adv->random_addr, &cp->bdaddr);
1471 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1472 adv->rpa_expired = false;
1473 queue_delayed_work(hdev->workqueue,
1474 &adv->rpa_expired_cb,
1475 secs_to_jiffies(hdev->rpa_timeout));
1479 hci_dev_unlock(hdev);
1484 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1485 struct sk_buff *skb)
1487 struct hci_ev_status *rp = data;
1491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1496 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1502 err = hci_remove_adv_instance(hdev, *instance);
1504 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1507 hci_dev_unlock(hdev);
1512 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1513 struct sk_buff *skb)
1515 struct hci_ev_status *rp = data;
1516 struct adv_info *adv, *n;
1519 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1524 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1529 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1530 u8 instance = adv->instance;
1532 err = hci_remove_adv_instance(hdev, instance);
1534 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1538 hci_dev_unlock(hdev);
1543 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1544 struct sk_buff *skb)
1546 struct hci_rp_le_read_transmit_power *rp = data;
1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1553 hdev->min_le_tx_power = rp->min_le_tx_power;
1554 hdev->max_le_tx_power = rp->max_le_tx_power;
1559 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1560 struct sk_buff *skb)
1562 struct hci_ev_status *rp = data;
1563 struct hci_cp_le_set_privacy_mode *cp;
1564 struct hci_conn_params *params;
1566 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1571 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1577 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1579 WRITE_ONCE(params->privacy_mode, cp->mode);
1581 hci_dev_unlock(hdev);
1586 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1587 struct sk_buff *skb)
1589 struct hci_ev_status *rp = data;
1592 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1603 /* If we're doing connection initiation as peripheral. Set a
1604 * timeout in case something goes wrong.
1607 struct hci_conn *conn;
1609 hci_dev_set_flag(hdev, HCI_LE_ADV);
1611 conn = hci_lookup_le_connect(hdev);
1613 queue_delayed_work(hdev->workqueue,
1614 &conn->le_conn_timeout,
1615 conn->conn_timeout);
1617 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1620 hci_dev_unlock(hdev);
1625 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1626 struct sk_buff *skb)
1628 struct hci_cp_le_set_ext_adv_enable *cp;
1629 struct hci_cp_ext_adv_set *set;
1630 struct adv_info *adv = NULL, *n;
1631 struct hci_ev_status *rp = data;
1633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1638 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1642 set = (void *)cp->data;
1646 if (cp->num_of_sets)
1647 adv = hci_find_adv_instance(hdev, set->handle);
1650 struct hci_conn *conn;
1652 hci_dev_set_flag(hdev, HCI_LE_ADV);
1654 if (adv && !adv->periodic)
1655 adv->enabled = true;
1657 conn = hci_lookup_le_connect(hdev);
1659 queue_delayed_work(hdev->workqueue,
1660 &conn->le_conn_timeout,
1661 conn->conn_timeout);
1663 if (cp->num_of_sets) {
1665 adv->enabled = false;
1667 /* If just one instance was disabled check if there are
1668 * any other instance enabled before clearing HCI_LE_ADV
1670 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1676 /* All instances shall be considered disabled */
1677 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1679 adv->enabled = false;
1682 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1686 hci_dev_unlock(hdev);
1690 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1691 struct sk_buff *skb)
1693 struct hci_cp_le_set_scan_param *cp;
1694 struct hci_ev_status *rp = data;
1696 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1701 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1707 hdev->le_scan_type = cp->type;
1709 hci_dev_unlock(hdev);
1714 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1715 struct sk_buff *skb)
1717 struct hci_cp_le_set_ext_scan_params *cp;
1718 struct hci_ev_status *rp = data;
1719 struct hci_cp_le_scan_phy_params *phy_param;
1721 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1726 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1730 phy_param = (void *)cp->data;
1734 hdev->le_scan_type = phy_param->type;
1736 hci_dev_unlock(hdev);
1741 static bool has_pending_adv_report(struct hci_dev *hdev)
1743 struct discovery_state *d = &hdev->discovery;
1745 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1748 static void clear_pending_adv_report(struct hci_dev *hdev)
1750 struct discovery_state *d = &hdev->discovery;
1752 bacpy(&d->last_adv_addr, BDADDR_ANY);
1753 d->last_adv_data_len = 0;
1756 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1757 u8 bdaddr_type, s8 rssi, u32 flags,
1760 struct discovery_state *d = &hdev->discovery;
1762 if (len > max_adv_len(hdev))
1765 bacpy(&d->last_adv_addr, bdaddr);
1766 d->last_adv_addr_type = bdaddr_type;
1767 d->last_adv_rssi = rssi;
1768 d->last_adv_flags = flags;
1769 memcpy(d->last_adv_data, data, len);
1770 d->last_adv_data_len = len;
1773 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1778 case LE_SCAN_ENABLE:
1779 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1780 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1781 clear_pending_adv_report(hdev);
1782 if (hci_dev_test_flag(hdev, HCI_MESH))
1783 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1786 case LE_SCAN_DISABLE:
1787 /* We do this here instead of when setting DISCOVERY_STOPPED
1788 * since the latter would potentially require waiting for
1789 * inquiry to stop too.
1791 if (has_pending_adv_report(hdev)) {
1792 struct discovery_state *d = &hdev->discovery;
1794 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1795 d->last_adv_addr_type, NULL,
1796 d->last_adv_rssi, d->last_adv_flags,
1798 d->last_adv_data_len, NULL, 0, 0);
1801 /* Cancel this timer so that we don't try to disable scanning
1802 * when it's already disabled.
1804 cancel_delayed_work(&hdev->le_scan_disable);
1806 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1808 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1809 * interrupted scanning due to a connect request. Mark
1810 * therefore discovery as stopped.
1812 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1813 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1814 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1815 hdev->discovery.state == DISCOVERY_FINDING)
1816 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1821 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1826 hci_dev_unlock(hdev);
1829 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1830 struct sk_buff *skb)
1832 struct hci_cp_le_set_scan_enable *cp;
1833 struct hci_ev_status *rp = data;
1835 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1840 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1844 le_set_scan_enable_complete(hdev, cp->enable);
1849 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1850 struct sk_buff *skb)
1852 struct hci_cp_le_set_ext_scan_enable *cp;
1853 struct hci_ev_status *rp = data;
1855 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1860 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1864 le_set_scan_enable_complete(hdev, cp->enable);
1869 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1870 struct sk_buff *skb)
1872 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1874 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1880 hdev->le_num_of_adv_sets = rp->num_of_sets;
1885 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1886 struct sk_buff *skb)
1888 struct hci_rp_le_read_accept_list_size *rp = data;
1890 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1895 hdev->le_accept_list_size = rp->size;
1900 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1901 struct sk_buff *skb)
1903 struct hci_ev_status *rp = data;
1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1911 hci_bdaddr_list_clear(&hdev->le_accept_list);
1912 hci_dev_unlock(hdev);
1917 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1918 struct sk_buff *skb)
1920 struct hci_cp_le_add_to_accept_list *sent;
1921 struct hci_ev_status *rp = data;
1923 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1928 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1933 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1935 hci_dev_unlock(hdev);
1940 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1941 struct sk_buff *skb)
1943 struct hci_cp_le_del_from_accept_list *sent;
1944 struct hci_ev_status *rp = data;
1946 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1951 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1956 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1958 hci_dev_unlock(hdev);
1963 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1964 struct sk_buff *skb)
1966 struct hci_rp_le_read_supported_states *rp = data;
1968 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1973 memcpy(hdev->le_states, rp->le_states, 8);
1978 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1979 struct sk_buff *skb)
1981 struct hci_rp_le_read_def_data_len *rp = data;
1983 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1988 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1989 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1994 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1995 struct sk_buff *skb)
1997 struct hci_cp_le_write_def_data_len *sent;
1998 struct hci_ev_status *rp = data;
2000 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2005 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2009 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2010 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2015 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2016 struct sk_buff *skb)
2018 struct hci_cp_le_add_to_resolv_list *sent;
2019 struct hci_ev_status *rp = data;
2021 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2026 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2031 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2032 sent->bdaddr_type, sent->peer_irk,
2034 hci_dev_unlock(hdev);
2039 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2040 struct sk_buff *skb)
2042 struct hci_cp_le_del_from_resolv_list *sent;
2043 struct hci_ev_status *rp = data;
2045 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2050 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2055 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2057 hci_dev_unlock(hdev);
2062 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2063 struct sk_buff *skb)
2065 struct hci_ev_status *rp = data;
2067 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2073 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2074 hci_dev_unlock(hdev);
2079 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2080 struct sk_buff *skb)
2082 struct hci_rp_le_read_resolv_list_size *rp = data;
2084 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2089 hdev->le_resolv_list_size = rp->size;
2094 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2095 struct sk_buff *skb)
2097 struct hci_ev_status *rp = data;
2100 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2112 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2114 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2116 hci_dev_unlock(hdev);
2121 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2122 struct sk_buff *skb)
2124 struct hci_rp_le_read_max_data_len *rp = data;
2126 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2131 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2132 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2133 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2134 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2139 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2140 struct sk_buff *skb)
2142 struct hci_cp_write_le_host_supported *sent;
2143 struct hci_ev_status *rp = data;
2145 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2157 hdev->features[1][0] |= LMP_HOST_LE;
2158 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2160 hdev->features[1][0] &= ~LMP_HOST_LE;
2161 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2162 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2166 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2168 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2170 hci_dev_unlock(hdev);
2175 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2176 struct sk_buff *skb)
2178 struct hci_cp_le_set_adv_param *cp;
2179 struct hci_ev_status *rp = data;
2181 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2191 hdev->adv_addr_type = cp->own_address_type;
2192 hci_dev_unlock(hdev);
2197 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2198 struct sk_buff *skb)
2200 struct hci_rp_le_set_ext_adv_params *rp = data;
2201 struct hci_cp_le_set_ext_adv_params *cp;
2202 struct adv_info *adv_instance;
2204 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2209 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2214 hdev->adv_addr_type = cp->own_addr_type;
2216 /* Store in hdev for instance 0 */
2217 hdev->adv_tx_power = rp->tx_power;
2219 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2221 adv_instance->tx_power = rp->tx_power;
2223 /* Update adv data as tx power is known now */
2224 hci_update_adv_data(hdev, cp->handle);
2226 hci_dev_unlock(hdev);
2231 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2232 struct sk_buff *skb)
2234 struct hci_rp_read_rssi *rp = data;
2235 struct hci_conn *conn;
2237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2244 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2246 conn->rssi = rp->rssi;
2248 hci_dev_unlock(hdev);
2253 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2254 struct sk_buff *skb)
2256 struct hci_cp_read_tx_power *sent;
2257 struct hci_rp_read_tx_power *rp = data;
2258 struct hci_conn *conn;
2260 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2265 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2271 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2275 switch (sent->type) {
2277 conn->tx_power = rp->tx_power;
2280 conn->max_tx_power = rp->tx_power;
2285 hci_dev_unlock(hdev);
2289 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2290 struct sk_buff *skb)
2292 struct hci_ev_status *rp = data;
2295 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2300 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2302 hdev->ssp_debug_mode = *mode;
2307 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2309 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2314 if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2315 set_bit(HCI_INQUIRY, &hdev->flags);
2318 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2320 struct hci_cp_create_conn *cp;
2321 struct hci_conn *conn;
2323 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2325 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2331 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2333 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2336 if (conn && conn->state == BT_CONNECT) {
2337 conn->state = BT_CLOSED;
2338 hci_connect_cfm(conn, status);
2343 conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2346 bt_dev_err(hdev, "no memory for new connection");
2350 hci_dev_unlock(hdev);
2353 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2355 struct hci_cp_add_sco *cp;
2356 struct hci_conn *acl;
2357 struct hci_link *link;
2360 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2365 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2369 handle = __le16_to_cpu(cp->handle);
2371 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2375 acl = hci_conn_hash_lookup_handle(hdev, handle);
2377 link = list_first_entry_or_null(&acl->link_list,
2378 struct hci_link, list);
2379 if (link && link->conn) {
2380 link->conn->state = BT_CLOSED;
2382 hci_connect_cfm(link->conn, status);
2383 hci_conn_del(link->conn);
2387 hci_dev_unlock(hdev);
2390 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2392 struct hci_cp_auth_requested *cp;
2393 struct hci_conn *conn;
2395 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2400 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2406 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2408 if (conn->state == BT_CONFIG) {
2409 hci_connect_cfm(conn, status);
2410 hci_conn_drop(conn);
2414 hci_dev_unlock(hdev);
2417 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2419 struct hci_cp_set_conn_encrypt *cp;
2420 struct hci_conn *conn;
2422 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2427 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2435 if (conn->state == BT_CONFIG) {
2436 hci_connect_cfm(conn, status);
2437 hci_conn_drop(conn);
2441 hci_dev_unlock(hdev);
2444 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2445 struct hci_conn *conn)
2447 if (conn->state != BT_CONFIG || !conn->out)
2450 if (conn->pending_sec_level == BT_SECURITY_SDP)
2453 /* Only request authentication for SSP connections or non-SSP
2454 * devices with sec_level MEDIUM or HIGH or if MITM protection
2457 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2458 conn->pending_sec_level != BT_SECURITY_FIPS &&
2459 conn->pending_sec_level != BT_SECURITY_HIGH &&
2460 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2466 static int hci_resolve_name(struct hci_dev *hdev,
2467 struct inquiry_entry *e)
2469 struct hci_cp_remote_name_req cp;
2471 memset(&cp, 0, sizeof(cp));
2473 bacpy(&cp.bdaddr, &e->data.bdaddr);
2474 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2475 cp.pscan_mode = e->data.pscan_mode;
2476 cp.clock_offset = e->data.clock_offset;
2478 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2481 static bool hci_resolve_next_name(struct hci_dev *hdev)
2483 struct discovery_state *discov = &hdev->discovery;
2484 struct inquiry_entry *e;
2486 if (list_empty(&discov->resolve))
2489 /* We should stop if we already spent too much time resolving names. */
2490 if (time_after(jiffies, discov->name_resolve_timeout)) {
2491 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2495 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2499 if (hci_resolve_name(hdev, e) == 0) {
2500 e->name_state = NAME_PENDING;
2507 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2508 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2510 struct discovery_state *discov = &hdev->discovery;
2511 struct inquiry_entry *e;
2513 /* Update the mgmt connected state if necessary. Be careful with
2514 * conn objects that exist but are not (yet) connected however.
2515 * Only those in BT_CONFIG or BT_CONNECTED states can be
2516 * considered connected.
2518 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2519 mgmt_device_connected(hdev, conn, name, name_len);
2521 if (discov->state == DISCOVERY_STOPPED)
2524 if (discov->state == DISCOVERY_STOPPING)
2525 goto discov_complete;
2527 if (discov->state != DISCOVERY_RESOLVING)
2530 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2531 /* If the device was not found in a list of found devices names of which
2532 * are pending. there is no need to continue resolving a next name as it
2533 * will be done upon receiving another Remote Name Request Complete
2540 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2541 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2544 if (hci_resolve_next_name(hdev))
2548 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2551 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2553 struct hci_cp_remote_name_req *cp;
2554 struct hci_conn *conn;
2556 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2558 /* If successful wait for the name req complete event before
2559 * checking for the need to do authentication */
2563 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2569 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2571 if (hci_dev_test_flag(hdev, HCI_MGMT))
2572 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2577 if (!hci_outgoing_auth_needed(hdev, conn))
2580 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2581 struct hci_cp_auth_requested auth_cp;
2583 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2585 auth_cp.handle = __cpu_to_le16(conn->handle);
2586 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2587 sizeof(auth_cp), &auth_cp);
2591 hci_dev_unlock(hdev);
2594 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2596 struct hci_cp_read_remote_features *cp;
2597 struct hci_conn *conn;
2599 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2604 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2612 if (conn->state == BT_CONFIG) {
2613 hci_connect_cfm(conn, status);
2614 hci_conn_drop(conn);
2618 hci_dev_unlock(hdev);
2621 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2623 struct hci_cp_read_remote_ext_features *cp;
2624 struct hci_conn *conn;
2626 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2631 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2637 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2639 if (conn->state == BT_CONFIG) {
2640 hci_connect_cfm(conn, status);
2641 hci_conn_drop(conn);
2645 hci_dev_unlock(hdev);
2648 static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2651 struct hci_conn *acl;
2652 struct hci_link *link;
2654 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2658 acl = hci_conn_hash_lookup_handle(hdev, handle);
2660 link = list_first_entry_or_null(&acl->link_list,
2661 struct hci_link, list);
2662 if (link && link->conn) {
2663 link->conn->state = BT_CLOSED;
2665 hci_connect_cfm(link->conn, status);
2666 hci_conn_del(link->conn);
2670 hci_dev_unlock(hdev);
2673 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2675 struct hci_cp_setup_sync_conn *cp;
2677 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2682 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2686 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2689 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2691 struct hci_cp_enhanced_setup_sync_conn *cp;
2693 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2698 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2702 hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2705 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2707 struct hci_cp_sniff_mode *cp;
2708 struct hci_conn *conn;
2710 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2715 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2721 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2723 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2725 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2726 hci_sco_setup(conn, status);
2729 hci_dev_unlock(hdev);
2732 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2734 struct hci_cp_exit_sniff_mode *cp;
2735 struct hci_conn *conn;
2737 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2742 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2748 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2750 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2752 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2753 hci_sco_setup(conn, status);
2756 hci_dev_unlock(hdev);
2759 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2761 struct hci_cp_disconnect *cp;
2762 struct hci_conn_params *params;
2763 struct hci_conn *conn;
2766 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2768 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2769 * otherwise cleanup the connection immediately.
2771 if (!status && !hdev->suspended)
2774 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2780 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2785 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2786 conn->dst_type, status);
2788 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2789 hdev->cur_adv_instance = conn->adv_instance;
2790 hci_enable_advertising(hdev);
2793 /* Inform sockets conn is gone before we delete it */
2794 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2799 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2801 if (conn->type == ACL_LINK) {
2802 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2803 hci_remove_link_key(hdev, &conn->dst);
2806 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2808 switch (params->auto_connect) {
2809 case HCI_AUTO_CONN_LINK_LOSS:
2810 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2814 case HCI_AUTO_CONN_DIRECT:
2815 case HCI_AUTO_CONN_ALWAYS:
2816 hci_pend_le_list_del_init(params);
2817 hci_pend_le_list_add(params, &hdev->pend_le_conns);
2825 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2826 cp->reason, mgmt_conn);
2828 hci_disconn_cfm(conn, cp->reason);
2831 /* If the disconnection failed for any reason, the upper layer
2832 * does not retry to disconnect in current implementation.
2833 * Hence, we need to do some basic cleanup here and re-enable
2834 * advertising if necessary.
2838 hci_dev_unlock(hdev);
2841 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2843 /* When using controller based address resolution, then the new
2844 * address types 0x02 and 0x03 are used. These types need to be
2845 * converted back into either public address or random address type
2848 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2851 return ADDR_LE_DEV_PUBLIC;
2852 case ADDR_LE_DEV_RANDOM_RESOLVED:
2855 return ADDR_LE_DEV_RANDOM;
2863 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2864 u8 peer_addr_type, u8 own_address_type,
2867 struct hci_conn *conn;
2869 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2874 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2876 /* Store the initiator and responder address information which
2877 * is needed for SMP. These values will not change during the
2878 * lifetime of the connection.
2880 conn->init_addr_type = own_address_type;
2881 if (own_address_type == ADDR_LE_DEV_RANDOM)
2882 bacpy(&conn->init_addr, &hdev->random_addr);
2884 bacpy(&conn->init_addr, &hdev->bdaddr);
2886 conn->resp_addr_type = peer_addr_type;
2887 bacpy(&conn->resp_addr, peer_addr);
2890 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2892 struct hci_cp_le_create_conn *cp;
2894 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2896 /* All connection failure handling is taken care of by the
2897 * hci_conn_failed function which is triggered by the HCI
2898 * request completion callbacks used for connecting.
2903 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2909 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2910 cp->own_address_type, cp->filter_policy);
2912 hci_dev_unlock(hdev);
2915 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2917 struct hci_cp_le_ext_create_conn *cp;
2919 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2921 /* All connection failure handling is taken care of by the
2922 * hci_conn_failed function which is triggered by the HCI
2923 * request completion callbacks used for connecting.
2928 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2934 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2935 cp->own_addr_type, cp->filter_policy);
2937 hci_dev_unlock(hdev);
2940 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2942 struct hci_cp_le_read_remote_features *cp;
2943 struct hci_conn *conn;
2945 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2950 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2956 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2958 if (conn->state == BT_CONFIG) {
2959 hci_connect_cfm(conn, status);
2960 hci_conn_drop(conn);
2964 hci_dev_unlock(hdev);
2967 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2969 struct hci_cp_le_start_enc *cp;
2970 struct hci_conn *conn;
2972 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2979 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2983 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2987 if (conn->state != BT_CONNECTED)
2990 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2991 hci_conn_drop(conn);
2994 hci_dev_unlock(hdev);
2997 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2999 struct hci_cp_switch_role *cp;
3000 struct hci_conn *conn;
3002 BT_DBG("%s status 0x%2.2x", hdev->name, status);
3007 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3015 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3017 hci_dev_unlock(hdev);
3020 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3021 struct sk_buff *skb)
3023 struct hci_ev_status *ev = data;
3024 struct discovery_state *discov = &hdev->discovery;
3025 struct inquiry_entry *e;
3027 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3029 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3032 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3033 wake_up_bit(&hdev->flags, HCI_INQUIRY);
3035 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3040 if (discov->state != DISCOVERY_FINDING)
3043 if (list_empty(&discov->resolve)) {
3044 /* When BR/EDR inquiry is active and no LE scanning is in
3045 * progress, then change discovery state to indicate completion.
3047 * When running LE scanning and BR/EDR inquiry simultaneously
3048 * and the LE scan already finished, then change the discovery
3049 * state to indicate completion.
3051 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3052 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3053 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3057 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3058 if (e && hci_resolve_name(hdev, e) == 0) {
3059 e->name_state = NAME_PENDING;
3060 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3061 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3063 /* When BR/EDR inquiry is active and no LE scanning is in
3064 * progress, then change discovery state to indicate completion.
3066 * When running LE scanning and BR/EDR inquiry simultaneously
3067 * and the LE scan already finished, then change the discovery
3068 * state to indicate completion.
3070 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3071 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3072 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3076 hci_dev_unlock(hdev);
3079 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3080 struct sk_buff *skb)
3082 struct hci_ev_inquiry_result *ev = edata;
3083 struct inquiry_data data;
3086 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3087 flex_array_size(ev, info, ev->num)))
3090 bt_dev_dbg(hdev, "num %d", ev->num);
3095 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3100 for (i = 0; i < ev->num; i++) {
3101 struct inquiry_info *info = &ev->info[i];
3104 bacpy(&data.bdaddr, &info->bdaddr);
3105 data.pscan_rep_mode = info->pscan_rep_mode;
3106 data.pscan_period_mode = info->pscan_period_mode;
3107 data.pscan_mode = info->pscan_mode;
3108 memcpy(data.dev_class, info->dev_class, 3);
3109 data.clock_offset = info->clock_offset;
3110 data.rssi = HCI_RSSI_INVALID;
3111 data.ssp_mode = 0x00;
3113 flags = hci_inquiry_cache_update(hdev, &data, false);
3115 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3116 info->dev_class, HCI_RSSI_INVALID,
3117 flags, NULL, 0, NULL, 0, 0);
3120 hci_dev_unlock(hdev);
3123 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3124 struct sk_buff *skb)
3126 struct hci_ev_conn_complete *ev = data;
3127 struct hci_conn *conn;
3128 u8 status = ev->status;
3130 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3134 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3136 /* In case of error status and there is no connection pending
3137 * just unlock as there is nothing to cleanup.
3142 /* Connection may not exist if auto-connected. Check the bredr
3143 * allowlist to see if this device is allowed to auto connect.
3144 * If link is an ACL type, create a connection class
3147 * Auto-connect will only occur if the event filter is
3148 * programmed with a given address. Right now, event filter is
3149 * only used during suspend.
3151 if (ev->link_type == ACL_LINK &&
3152 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3155 conn = hci_conn_add_unset(hdev, ev->link_type,
3156 &ev->bdaddr, HCI_ROLE_SLAVE);
3158 bt_dev_err(hdev, "no memory for new conn");
3162 if (ev->link_type != SCO_LINK)
3165 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3170 conn->type = SCO_LINK;
3174 /* The HCI_Connection_Complete event is only sent once per connection.
3175 * Processing it more than once per connection can corrupt kernel memory.
3177 * As the connection handle is set here for the first time, it indicates
3178 * whether the connection is already set up.
3180 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3181 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3186 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3190 if (conn->type == ACL_LINK) {
3191 conn->state = BT_CONFIG;
3192 hci_conn_hold(conn);
3194 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3195 !hci_find_link_key(hdev, &ev->bdaddr))
3196 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3198 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3200 conn->state = BT_CONNECTED;
3202 hci_debugfs_create_conn(conn);
3203 hci_conn_add_sysfs(conn);
3205 if (test_bit(HCI_AUTH, &hdev->flags))
3206 set_bit(HCI_CONN_AUTH, &conn->flags);
3208 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3209 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3211 /* "Link key request" completed ahead of "connect request" completes */
3212 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3213 ev->link_type == ACL_LINK) {
3214 struct link_key *key;
3215 struct hci_cp_read_enc_key_size cp;
3217 key = hci_find_link_key(hdev, &ev->bdaddr);
3219 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3221 if (!read_key_size_capable(hdev)) {
3222 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3224 cp.handle = cpu_to_le16(conn->handle);
3225 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3227 bt_dev_err(hdev, "sending read key size failed");
3228 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3232 hci_encrypt_cfm(conn, ev->status);
3236 /* Get remote features */
3237 if (conn->type == ACL_LINK) {
3238 struct hci_cp_read_remote_features cp;
3239 cp.handle = ev->handle;
3240 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3243 hci_update_scan(hdev);
3246 /* Set packet type for incoming connection */
3247 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3248 struct hci_cp_change_conn_ptype cp;
3249 cp.handle = ev->handle;
3250 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3251 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3256 if (conn->type == ACL_LINK)
3257 hci_sco_setup(conn, ev->status);
3261 hci_conn_failed(conn, status);
3262 } else if (ev->link_type == SCO_LINK) {
3263 switch (conn->setting & SCO_AIRMODE_MASK) {
3264 case SCO_AIRMODE_CVSD:
3266 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3270 hci_connect_cfm(conn, status);
3274 hci_dev_unlock(hdev);
3277 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3279 struct hci_cp_reject_conn_req cp;
3281 bacpy(&cp.bdaddr, bdaddr);
3282 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3283 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3286 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3287 struct sk_buff *skb)
3289 struct hci_ev_conn_request *ev = data;
3290 int mask = hdev->link_mode;
3291 struct inquiry_entry *ie;
3292 struct hci_conn *conn;
3295 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3297 /* Reject incoming connection from device with same BD ADDR against
3300 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3301 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3303 hci_reject_conn(hdev, &ev->bdaddr);
3307 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3310 if (!(mask & HCI_LM_ACCEPT)) {
3311 hci_reject_conn(hdev, &ev->bdaddr);
3317 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3319 hci_reject_conn(hdev, &ev->bdaddr);
3323 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3324 * connection. These features are only touched through mgmt so
3325 * only do the checks if HCI_MGMT is set.
3327 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3328 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3329 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3331 hci_reject_conn(hdev, &ev->bdaddr);
3335 /* Connection accepted */
3337 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3339 memcpy(ie->data.dev_class, ev->dev_class, 3);
3341 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3344 conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3347 bt_dev_err(hdev, "no memory for new connection");
3352 memcpy(conn->dev_class, ev->dev_class, 3);
3354 hci_dev_unlock(hdev);
3356 if (ev->link_type == ACL_LINK ||
3357 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3358 struct hci_cp_accept_conn_req cp;
3359 conn->state = BT_CONNECT;
3361 bacpy(&cp.bdaddr, &ev->bdaddr);
3363 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3364 cp.role = 0x00; /* Become central */
3366 cp.role = 0x01; /* Remain peripheral */
3368 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3369 } else if (!(flags & HCI_PROTO_DEFER)) {
3370 struct hci_cp_accept_sync_conn_req cp;
3371 conn->state = BT_CONNECT;
3373 bacpy(&cp.bdaddr, &ev->bdaddr);
3374 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3376 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3377 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3378 cp.max_latency = cpu_to_le16(0xffff);
3379 cp.content_format = cpu_to_le16(hdev->voice_setting);
3380 cp.retrans_effort = 0xff;
3382 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3385 conn->state = BT_CONNECT2;
3386 hci_connect_cfm(conn, 0);
3391 hci_dev_unlock(hdev);
3394 static u8 hci_to_mgmt_reason(u8 err)
3397 case HCI_ERROR_CONNECTION_TIMEOUT:
3398 return MGMT_DEV_DISCONN_TIMEOUT;
3399 case HCI_ERROR_REMOTE_USER_TERM:
3400 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3401 case HCI_ERROR_REMOTE_POWER_OFF:
3402 return MGMT_DEV_DISCONN_REMOTE;
3403 case HCI_ERROR_LOCAL_HOST_TERM:
3404 return MGMT_DEV_DISCONN_LOCAL_HOST;
3406 return MGMT_DEV_DISCONN_UNKNOWN;
3410 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3411 struct sk_buff *skb)
3413 struct hci_ev_disconn_complete *ev = data;
3415 struct hci_conn_params *params;
3416 struct hci_conn *conn;
3417 bool mgmt_connected;
3419 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3423 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3428 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3429 conn->dst_type, ev->status);
3433 conn->state = BT_CLOSED;
3435 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3437 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3438 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3440 reason = hci_to_mgmt_reason(ev->reason);
3442 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3443 reason, mgmt_connected);
3445 if (conn->type == ACL_LINK) {
3446 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3447 hci_remove_link_key(hdev, &conn->dst);
3449 hci_update_scan(hdev);
3452 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3454 switch (params->auto_connect) {
3455 case HCI_AUTO_CONN_LINK_LOSS:
3456 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3460 case HCI_AUTO_CONN_DIRECT:
3461 case HCI_AUTO_CONN_ALWAYS:
3462 hci_pend_le_list_del_init(params);
3463 hci_pend_le_list_add(params, &hdev->pend_le_conns);
3464 hci_update_passive_scan(hdev);
3472 hci_disconn_cfm(conn, ev->reason);
3474 /* Re-enable advertising if necessary, since it might
3475 * have been disabled by the connection. From the
3476 * HCI_LE_Set_Advertise_Enable command description in
3477 * the core specification (v4.0):
3478 * "The Controller shall continue advertising until the Host
3479 * issues an LE_Set_Advertise_Enable command with
3480 * Advertising_Enable set to 0x00 (Advertising is disabled)
3481 * or until a connection is created or until the Advertising
3482 * is timed out due to Directed Advertising."
3484 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3485 hdev->cur_adv_instance = conn->adv_instance;
3486 hci_enable_advertising(hdev);
3492 hci_dev_unlock(hdev);
3495 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3496 struct sk_buff *skb)
3498 struct hci_ev_auth_complete *ev = data;
3499 struct hci_conn *conn;
3501 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3510 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3511 set_bit(HCI_CONN_AUTH, &conn->flags);
3512 conn->sec_level = conn->pending_sec_level;
3514 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3515 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3517 mgmt_auth_failed(conn, ev->status);
3520 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3522 if (conn->state == BT_CONFIG) {
3523 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3524 struct hci_cp_set_conn_encrypt cp;
3525 cp.handle = ev->handle;
3527 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3530 conn->state = BT_CONNECTED;
3531 hci_connect_cfm(conn, ev->status);
3532 hci_conn_drop(conn);
3535 hci_auth_cfm(conn, ev->status);
3537 hci_conn_hold(conn);
3538 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3539 hci_conn_drop(conn);
3542 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3544 struct hci_cp_set_conn_encrypt cp;
3545 cp.handle = ev->handle;
3547 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3550 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3551 hci_encrypt_cfm(conn, ev->status);
3556 hci_dev_unlock(hdev);
3559 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3560 struct sk_buff *skb)
3562 struct hci_ev_remote_name *ev = data;
3563 struct hci_conn *conn;
3565 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3569 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3571 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3574 if (ev->status == 0)
3575 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3576 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3578 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3584 if (!hci_outgoing_auth_needed(hdev, conn))
3587 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3588 struct hci_cp_auth_requested cp;
3590 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3592 cp.handle = __cpu_to_le16(conn->handle);
3593 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3597 hci_dev_unlock(hdev);
3600 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3601 struct sk_buff *skb)
3603 struct hci_ev_encrypt_change *ev = data;
3604 struct hci_conn *conn;
3606 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3616 /* Encryption implies authentication */
3617 set_bit(HCI_CONN_AUTH, &conn->flags);
3618 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3619 conn->sec_level = conn->pending_sec_level;
3621 /* P-256 authentication key implies FIPS */
3622 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3623 set_bit(HCI_CONN_FIPS, &conn->flags);
3625 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3626 conn->type == LE_LINK)
3627 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3629 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3630 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3634 /* We should disregard the current RPA and generate a new one
3635 * whenever the encryption procedure fails.
3637 if (ev->status && conn->type == LE_LINK) {
3638 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3639 hci_adv_instances_set_rpa_expired(hdev, true);
3642 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3644 /* Check link security requirements are met */
3645 if (!hci_conn_check_link_mode(conn))
3646 ev->status = HCI_ERROR_AUTH_FAILURE;
3648 if (ev->status && conn->state == BT_CONNECTED) {
3649 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3650 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3652 /* Notify upper layers so they can cleanup before
3655 hci_encrypt_cfm(conn, ev->status);
3656 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3657 hci_conn_drop(conn);
3661 /* Try reading the encryption key size for encrypted ACL links */
3662 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3663 struct hci_cp_read_enc_key_size cp;
3665 /* Only send HCI_Read_Encryption_Key_Size if the
3666 * controller really supports it. If it doesn't, assume
3667 * the default size (16).
3669 if (!read_key_size_capable(hdev)) {
3670 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3674 cp.handle = cpu_to_le16(conn->handle);
3675 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3677 bt_dev_err(hdev, "sending read key size failed");
3678 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3685 /* Set the default Authenticated Payload Timeout after
3686 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3687 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3688 * sent when the link is active and Encryption is enabled, the conn
3689 * type can be either LE or ACL and controller must support LMP Ping.
3690 * Ensure for AES-CCM encryption as well.
3692 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3693 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3694 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3695 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3696 struct hci_cp_write_auth_payload_to cp;
3698 cp.handle = cpu_to_le16(conn->handle);
3699 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3700 if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3702 bt_dev_err(hdev, "write auth payload timeout failed");
3706 hci_encrypt_cfm(conn, ev->status);
3709 hci_dev_unlock(hdev);
3712 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3713 struct sk_buff *skb)
3715 struct hci_ev_change_link_key_complete *ev = data;
3716 struct hci_conn *conn;
3718 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3722 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3725 set_bit(HCI_CONN_SECURE, &conn->flags);
3727 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3729 hci_key_change_cfm(conn, ev->status);
3732 hci_dev_unlock(hdev);
3735 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3736 struct sk_buff *skb)
3738 struct hci_ev_remote_features *ev = data;
3739 struct hci_conn *conn;
3741 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3745 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3750 memcpy(conn->features[0], ev->features, 8);
3752 if (conn->state != BT_CONFIG)
3755 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3756 lmp_ext_feat_capable(conn)) {
3757 struct hci_cp_read_remote_ext_features cp;
3758 cp.handle = ev->handle;
3760 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3765 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3766 struct hci_cp_remote_name_req cp;
3767 memset(&cp, 0, sizeof(cp));
3768 bacpy(&cp.bdaddr, &conn->dst);
3769 cp.pscan_rep_mode = 0x02;
3770 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3772 mgmt_device_connected(hdev, conn, NULL, 0);
3775 if (!hci_outgoing_auth_needed(hdev, conn)) {
3776 conn->state = BT_CONNECTED;
3777 hci_connect_cfm(conn, ev->status);
3778 hci_conn_drop(conn);
3782 hci_dev_unlock(hdev);
3785 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3787 cancel_delayed_work(&hdev->cmd_timer);
3790 if (!test_bit(HCI_RESET, &hdev->flags)) {
3792 cancel_delayed_work(&hdev->ncmd_timer);
3793 atomic_set(&hdev->cmd_cnt, 1);
3795 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3796 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3803 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3804 struct sk_buff *skb)
3806 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3808 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3813 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3814 hdev->le_pkts = rp->acl_max_pkt;
3815 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3816 hdev->iso_pkts = rp->iso_max_pkt;
3818 hdev->le_cnt = hdev->le_pkts;
3819 hdev->iso_cnt = hdev->iso_pkts;
3821 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3822 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3827 static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3829 struct hci_conn *conn, *tmp;
3831 lockdep_assert_held(&hdev->lock);
3833 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3834 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3835 conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3838 if (HCI_CONN_HANDLE_UNSET(conn->handle))
3839 hci_conn_failed(conn, status);
3843 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3844 struct sk_buff *skb)
3846 struct hci_rp_le_set_cig_params *rp = data;
3847 struct hci_cp_le_set_cig_params *cp;
3848 struct hci_conn *conn;
3849 u8 status = rp->status;
3850 bool pending = false;
3853 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3855 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3856 if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3857 rp->cig_id != cp->cig_id)) {
3858 bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3859 status = HCI_ERROR_UNSPECIFIED;
3864 /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3866 * If the Status return parameter is non-zero, then the state of the CIG
3867 * and its CIS configurations shall not be changed by the command. If
3868 * the CIG did not already exist, it shall not be created.
3871 /* Keep current configuration, fail only the unbound CIS */
3872 hci_unbound_cis_failed(hdev, rp->cig_id, status);
3876 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3878 * If the Status return parameter is zero, then the Controller shall
3879 * set the Connection_Handle arrayed return parameter to the connection
3880 * handle(s) corresponding to the CIS configurations specified in
3881 * the CIS_IDs command parameter, in the same order.
3883 for (i = 0; i < rp->num_handles; ++i) {
3884 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3886 if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3889 if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3892 if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3895 if (conn->state == BT_CONNECT)
3901 hci_le_create_cis_pending(hdev);
3903 hci_dev_unlock(hdev);
3908 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3909 struct sk_buff *skb)
3911 struct hci_rp_le_setup_iso_path *rp = data;
3912 struct hci_cp_le_setup_iso_path *cp;
3913 struct hci_conn *conn;
3915 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3917 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3923 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3928 hci_connect_cfm(conn, rp->status);
3933 switch (cp->direction) {
3934 /* Input (Host to Controller) */
3936 /* Only confirm connection if output only */
3937 if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3938 hci_connect_cfm(conn, rp->status);
3940 /* Output (Controller to Host) */
3942 /* Confirm connection since conn->iso_qos is always configured
3945 hci_connect_cfm(conn, rp->status);
3947 /* Notify device connected in case it is a BIG Sync */
3948 if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3949 mgmt_device_connected(hdev, conn, NULL, 0);
3955 hci_dev_unlock(hdev);
3959 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3961 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3964 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3965 struct sk_buff *skb)
3967 struct hci_ev_status *rp = data;
3968 struct hci_cp_le_set_per_adv_params *cp;
3970 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3975 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3979 /* TODO: set the conn state */
3983 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3984 struct sk_buff *skb)
3986 struct hci_ev_status *rp = data;
3987 struct hci_cp_le_set_per_adv_enable *cp;
3988 struct adv_info *adv = NULL, *n;
3991 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3996 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4002 adv = hci_find_adv_instance(hdev, cp->handle);
4005 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4008 adv->enabled = true;
4010 /* If just one instance was disabled check if there are
4011 * any other instance enabled before clearing HCI_LE_PER_ADV.
4012 * The current periodic adv instance will be marked as
4013 * disabled once extended advertising is also disabled.
4015 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4017 if (adv->periodic && adv->enabled)
4021 if (per_adv_cnt > 1)
4024 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4028 hci_dev_unlock(hdev);
4033 #define HCI_CC_VL(_op, _func, _min, _max) \
4041 #define HCI_CC(_op, _func, _len) \
4042 HCI_CC_VL(_op, _func, _len, _len)
4044 #define HCI_CC_STATUS(_op, _func) \
4045 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4047 static const struct hci_cc {
4049 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4052 } hci_cc_table[] = {
4053 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4054 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4055 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4056 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4057 hci_cc_remote_name_req_cancel),
4058 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4059 sizeof(struct hci_rp_role_discovery)),
4060 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4061 sizeof(struct hci_rp_read_link_policy)),
4062 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4063 sizeof(struct hci_rp_write_link_policy)),
4064 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4065 sizeof(struct hci_rp_read_def_link_policy)),
4066 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4067 hci_cc_write_def_link_policy),
4068 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4069 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4070 sizeof(struct hci_rp_read_stored_link_key)),
4071 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4072 sizeof(struct hci_rp_delete_stored_link_key)),
4073 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4074 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4075 sizeof(struct hci_rp_read_local_name)),
4076 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4077 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4078 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4079 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4080 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4081 sizeof(struct hci_rp_read_class_of_dev)),
4082 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4083 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4084 sizeof(struct hci_rp_read_voice_setting)),
4085 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4086 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4087 sizeof(struct hci_rp_read_num_supported_iac)),
4088 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4089 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4090 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4091 sizeof(struct hci_rp_read_auth_payload_to)),
4092 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4093 sizeof(struct hci_rp_write_auth_payload_to)),
4094 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4095 sizeof(struct hci_rp_read_local_version)),
4096 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4097 sizeof(struct hci_rp_read_local_commands)),
4098 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4099 sizeof(struct hci_rp_read_local_features)),
4100 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4101 sizeof(struct hci_rp_read_local_ext_features)),
4102 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4103 sizeof(struct hci_rp_read_buffer_size)),
4104 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4105 sizeof(struct hci_rp_read_bd_addr)),
4106 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4107 sizeof(struct hci_rp_read_local_pairing_opts)),
4108 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4109 sizeof(struct hci_rp_read_page_scan_activity)),
4110 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4111 hci_cc_write_page_scan_activity),
4112 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4113 sizeof(struct hci_rp_read_page_scan_type)),
4114 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4115 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4116 sizeof(struct hci_rp_read_data_block_size)),
4117 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4118 sizeof(struct hci_rp_read_flow_control_mode)),
4119 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4120 sizeof(struct hci_rp_read_local_amp_info)),
4121 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4122 sizeof(struct hci_rp_read_clock)),
4123 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4124 sizeof(struct hci_rp_read_enc_key_size)),
4125 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4126 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4127 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4128 hci_cc_read_def_err_data_reporting,
4129 sizeof(struct hci_rp_read_def_err_data_reporting)),
4130 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4131 hci_cc_write_def_err_data_reporting),
4132 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4133 sizeof(struct hci_rp_pin_code_reply)),
4134 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4135 sizeof(struct hci_rp_pin_code_neg_reply)),
4136 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4137 sizeof(struct hci_rp_read_local_oob_data)),
4138 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4139 sizeof(struct hci_rp_read_local_oob_ext_data)),
4140 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4141 sizeof(struct hci_rp_le_read_buffer_size)),
4142 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4143 sizeof(struct hci_rp_le_read_local_features)),
4144 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4145 sizeof(struct hci_rp_le_read_adv_tx_power)),
4146 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4147 sizeof(struct hci_rp_user_confirm_reply)),
4148 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4149 sizeof(struct hci_rp_user_confirm_reply)),
4150 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4151 sizeof(struct hci_rp_user_confirm_reply)),
4152 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4153 sizeof(struct hci_rp_user_confirm_reply)),
4154 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4155 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4156 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4157 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4158 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4159 hci_cc_le_read_accept_list_size,
4160 sizeof(struct hci_rp_le_read_accept_list_size)),
4161 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4162 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4163 hci_cc_le_add_to_accept_list),
4164 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4165 hci_cc_le_del_from_accept_list),
4166 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4167 sizeof(struct hci_rp_le_read_supported_states)),
4168 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4169 sizeof(struct hci_rp_le_read_def_data_len)),
4170 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4171 hci_cc_le_write_def_data_len),
4172 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4173 hci_cc_le_add_to_resolv_list),
4174 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4175 hci_cc_le_del_from_resolv_list),
4176 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4177 hci_cc_le_clear_resolv_list),
4178 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4179 sizeof(struct hci_rp_le_read_resolv_list_size)),
4180 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4181 hci_cc_le_set_addr_resolution_enable),
4182 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4183 sizeof(struct hci_rp_le_read_max_data_len)),
4184 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4185 hci_cc_write_le_host_supported),
4186 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4187 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4188 sizeof(struct hci_rp_read_rssi)),
4189 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4190 sizeof(struct hci_rp_read_tx_power)),
4191 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4192 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4193 hci_cc_le_set_ext_scan_param),
4194 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4195 hci_cc_le_set_ext_scan_enable),
4196 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4197 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4198 hci_cc_le_read_num_adv_sets,
4199 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4200 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4201 sizeof(struct hci_rp_le_set_ext_adv_params)),
4202 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4203 hci_cc_le_set_ext_adv_enable),
4204 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4205 hci_cc_le_set_adv_set_random_addr),
4206 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4207 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4208 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4209 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4210 hci_cc_le_set_per_adv_enable),
4211 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4212 sizeof(struct hci_rp_le_read_transmit_power)),
4213 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4214 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4215 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4216 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4217 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4218 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4219 sizeof(struct hci_rp_le_setup_iso_path)),
4222 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4223 struct sk_buff *skb)
4227 if (skb->len < cc->min_len) {
4228 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4229 cc->op, skb->len, cc->min_len);
4230 return HCI_ERROR_UNSPECIFIED;
4233 /* Just warn if the length is over max_len size it still be possible to
4234 * partially parse the cc so leave to callback to decide if that is
4237 if (skb->len > cc->max_len)
4238 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4239 cc->op, skb->len, cc->max_len);
4241 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4243 return HCI_ERROR_UNSPECIFIED;
4245 return cc->func(hdev, data, skb);
4248 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4249 struct sk_buff *skb, u16 *opcode, u8 *status,
4250 hci_req_complete_t *req_complete,
4251 hci_req_complete_skb_t *req_complete_skb)
4253 struct hci_ev_cmd_complete *ev = data;
4256 *opcode = __le16_to_cpu(ev->opcode);
4258 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4260 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4261 if (hci_cc_table[i].op == *opcode) {
4262 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4267 if (i == ARRAY_SIZE(hci_cc_table)) {
4268 /* Unknown opcode, assume byte 0 contains the status, so
4269 * that e.g. __hci_cmd_sync() properly returns errors
4270 * for vendor specific commands send by HCI drivers.
4271 * If a vendor doesn't actually follow this convention we may
4272 * need to introduce a vendor CC table in order to properly set
4275 *status = skb->data[0];
4278 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4280 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4283 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4285 "unexpected event for opcode 0x%4.4x", *opcode);
4289 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4290 queue_work(hdev->workqueue, &hdev->cmd_work);
4293 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4295 struct hci_cp_le_create_cis *cp;
4296 bool pending = false;
4299 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4304 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4310 /* Remove connection if command failed */
4311 for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4312 struct hci_conn *conn;
4315 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4317 conn = hci_conn_hash_lookup_handle(hdev, handle);
4319 if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4322 conn->state = BT_CLOSED;
4323 hci_connect_cfm(conn, status);
4329 hci_le_create_cis_pending(hdev);
4331 hci_dev_unlock(hdev);
4334 #define HCI_CS(_op, _func) \
4340 static const struct hci_cs {
4342 void (*func)(struct hci_dev *hdev, __u8 status);
4343 } hci_cs_table[] = {
4344 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4345 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4346 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4347 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4348 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4349 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4350 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4351 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4352 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4353 hci_cs_read_remote_ext_features),
4354 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4355 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4356 hci_cs_enhanced_setup_sync_conn),
4357 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4358 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4359 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4360 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4361 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4362 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4363 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4364 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4365 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4368 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4369 struct sk_buff *skb, u16 *opcode, u8 *status,
4370 hci_req_complete_t *req_complete,
4371 hci_req_complete_skb_t *req_complete_skb)
4373 struct hci_ev_cmd_status *ev = data;
4376 *opcode = __le16_to_cpu(ev->opcode);
4377 *status = ev->status;
4379 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4381 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4382 if (hci_cs_table[i].op == *opcode) {
4383 hci_cs_table[i].func(hdev, ev->status);
4388 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4390 /* Indicate request completion if the command failed. Also, if
4391 * we're not waiting for a special event and we get a success
4392 * command status we should try to flag the request as completed
4393 * (since for this kind of commands there will not be a command
4396 if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4397 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4399 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4400 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4406 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4407 queue_work(hdev->workqueue, &hdev->cmd_work);
4410 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4411 struct sk_buff *skb)
4413 struct hci_ev_hardware_error *ev = data;
4415 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4417 hdev->hw_error_code = ev->code;
4419 queue_work(hdev->req_workqueue, &hdev->error_reset);
4422 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4423 struct sk_buff *skb)
4425 struct hci_ev_role_change *ev = data;
4426 struct hci_conn *conn;
4428 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4435 conn->role = ev->role;
4437 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4439 hci_role_switch_cfm(conn, ev->status, ev->role);
4442 hci_dev_unlock(hdev);
4445 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4446 struct sk_buff *skb)
4448 struct hci_ev_num_comp_pkts *ev = data;
4451 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4452 flex_array_size(ev, handles, ev->num)))
4455 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4456 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4460 bt_dev_dbg(hdev, "num %d", ev->num);
4462 for (i = 0; i < ev->num; i++) {
4463 struct hci_comp_pkts_info *info = &ev->handles[i];
4464 struct hci_conn *conn;
4465 __u16 handle, count;
4467 handle = __le16_to_cpu(info->handle);
4468 count = __le16_to_cpu(info->count);
4470 conn = hci_conn_hash_lookup_handle(hdev, handle);
4474 conn->sent -= count;
4476 switch (conn->type) {
4478 hdev->acl_cnt += count;
4479 if (hdev->acl_cnt > hdev->acl_pkts)
4480 hdev->acl_cnt = hdev->acl_pkts;
4484 if (hdev->le_pkts) {
4485 hdev->le_cnt += count;
4486 if (hdev->le_cnt > hdev->le_pkts)
4487 hdev->le_cnt = hdev->le_pkts;
4489 hdev->acl_cnt += count;
4490 if (hdev->acl_cnt > hdev->acl_pkts)
4491 hdev->acl_cnt = hdev->acl_pkts;
4496 hdev->sco_cnt += count;
4497 if (hdev->sco_cnt > hdev->sco_pkts)
4498 hdev->sco_cnt = hdev->sco_pkts;
4502 if (hdev->iso_pkts) {
4503 hdev->iso_cnt += count;
4504 if (hdev->iso_cnt > hdev->iso_pkts)
4505 hdev->iso_cnt = hdev->iso_pkts;
4506 } else if (hdev->le_pkts) {
4507 hdev->le_cnt += count;
4508 if (hdev->le_cnt > hdev->le_pkts)
4509 hdev->le_cnt = hdev->le_pkts;
4511 hdev->acl_cnt += count;
4512 if (hdev->acl_cnt > hdev->acl_pkts)
4513 hdev->acl_cnt = hdev->acl_pkts;
4518 bt_dev_err(hdev, "unknown type %d conn %p",
4524 queue_work(hdev->workqueue, &hdev->tx_work);
4527 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4530 struct hci_chan *chan;
4532 switch (hdev->dev_type) {
4534 return hci_conn_hash_lookup_handle(hdev, handle);
4536 chan = hci_chan_lookup_handle(hdev, handle);
4541 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4548 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4549 struct sk_buff *skb)
4551 struct hci_ev_num_comp_blocks *ev = data;
4554 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4555 flex_array_size(ev, handles, ev->num_hndl)))
4558 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4559 bt_dev_err(hdev, "wrong event for mode %d",
4560 hdev->flow_ctl_mode);
4564 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4567 for (i = 0; i < ev->num_hndl; i++) {
4568 struct hci_comp_blocks_info *info = &ev->handles[i];
4569 struct hci_conn *conn = NULL;
4570 __u16 handle, block_count;
4572 handle = __le16_to_cpu(info->handle);
4573 block_count = __le16_to_cpu(info->blocks);
4575 conn = __hci_conn_lookup_handle(hdev, handle);
4579 conn->sent -= block_count;
4581 switch (conn->type) {
4584 hdev->block_cnt += block_count;
4585 if (hdev->block_cnt > hdev->num_blocks)
4586 hdev->block_cnt = hdev->num_blocks;
4590 bt_dev_err(hdev, "unknown type %d conn %p",
4596 queue_work(hdev->workqueue, &hdev->tx_work);
4599 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4600 struct sk_buff *skb)
4602 struct hci_ev_mode_change *ev = data;
4603 struct hci_conn *conn;
4605 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4609 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4611 conn->mode = ev->mode;
4613 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4615 if (conn->mode == HCI_CM_ACTIVE)
4616 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4618 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4621 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4622 hci_sco_setup(conn, ev->status);
4625 hci_dev_unlock(hdev);
4628 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4629 struct sk_buff *skb)
4631 struct hci_ev_pin_code_req *ev = data;
4632 struct hci_conn *conn;
4634 bt_dev_dbg(hdev, "");
4638 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4642 if (conn->state == BT_CONNECTED) {
4643 hci_conn_hold(conn);
4644 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4645 hci_conn_drop(conn);
4648 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4649 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4650 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4651 sizeof(ev->bdaddr), &ev->bdaddr);
4652 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4655 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4660 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4664 hci_dev_unlock(hdev);
4667 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4669 if (key_type == HCI_LK_CHANGED_COMBINATION)
4672 conn->pin_length = pin_len;
4673 conn->key_type = key_type;
4676 case HCI_LK_LOCAL_UNIT:
4677 case HCI_LK_REMOTE_UNIT:
4678 case HCI_LK_DEBUG_COMBINATION:
4680 case HCI_LK_COMBINATION:
4682 conn->pending_sec_level = BT_SECURITY_HIGH;
4684 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4686 case HCI_LK_UNAUTH_COMBINATION_P192:
4687 case HCI_LK_UNAUTH_COMBINATION_P256:
4688 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4690 case HCI_LK_AUTH_COMBINATION_P192:
4691 conn->pending_sec_level = BT_SECURITY_HIGH;
4693 case HCI_LK_AUTH_COMBINATION_P256:
4694 conn->pending_sec_level = BT_SECURITY_FIPS;
4699 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4700 struct sk_buff *skb)
4702 struct hci_ev_link_key_req *ev = data;
4703 struct hci_cp_link_key_reply cp;
4704 struct hci_conn *conn;
4705 struct link_key *key;
4707 bt_dev_dbg(hdev, "");
4709 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4714 key = hci_find_link_key(hdev, &ev->bdaddr);
4716 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4720 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4724 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4726 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4727 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4728 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4729 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4733 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4734 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4735 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4736 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4740 conn_set_key(conn, key->type, key->pin_len);
4743 bacpy(&cp.bdaddr, &ev->bdaddr);
4744 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4746 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4748 hci_dev_unlock(hdev);
4753 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4754 hci_dev_unlock(hdev);
4757 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4758 struct sk_buff *skb)
4760 struct hci_ev_link_key_notify *ev = data;
4761 struct hci_conn *conn;
4762 struct link_key *key;
4766 bt_dev_dbg(hdev, "");
4770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4774 /* Ignore NULL link key against CVE-2020-26555 */
4775 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4776 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4778 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4779 hci_conn_drop(conn);
4783 hci_conn_hold(conn);
4784 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4785 hci_conn_drop(conn);
4787 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4788 conn_set_key(conn, ev->key_type, conn->pin_length);
4790 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4793 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4794 ev->key_type, pin_len, &persistent);
4798 /* Update connection information since adding the key will have
4799 * fixed up the type in the case of changed combination keys.
4801 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4802 conn_set_key(conn, key->type, key->pin_len);
4804 mgmt_new_link_key(hdev, key, persistent);
4806 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4807 * is set. If it's not set simply remove the key from the kernel
4808 * list (we've still notified user space about it but with
4809 * store_hint being 0).
4811 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4812 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4813 list_del_rcu(&key->list);
4814 kfree_rcu(key, rcu);
4819 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4821 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4824 hci_dev_unlock(hdev);
4827 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4828 struct sk_buff *skb)
4830 struct hci_ev_clock_offset *ev = data;
4831 struct hci_conn *conn;
4833 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4837 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4838 if (conn && !ev->status) {
4839 struct inquiry_entry *ie;
4841 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4843 ie->data.clock_offset = ev->clock_offset;
4844 ie->timestamp = jiffies;
4848 hci_dev_unlock(hdev);
4851 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4852 struct sk_buff *skb)
4854 struct hci_ev_pkt_type_change *ev = data;
4855 struct hci_conn *conn;
4857 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4861 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4862 if (conn && !ev->status)
4863 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4865 hci_dev_unlock(hdev);
4868 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4869 struct sk_buff *skb)
4871 struct hci_ev_pscan_rep_mode *ev = data;
4872 struct inquiry_entry *ie;
4874 bt_dev_dbg(hdev, "");
4878 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4880 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4881 ie->timestamp = jiffies;
4884 hci_dev_unlock(hdev);
4887 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4888 struct sk_buff *skb)
4890 struct hci_ev_inquiry_result_rssi *ev = edata;
4891 struct inquiry_data data;
4894 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4899 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4904 if (skb->len == array_size(ev->num,
4905 sizeof(struct inquiry_info_rssi_pscan))) {
4906 struct inquiry_info_rssi_pscan *info;
4908 for (i = 0; i < ev->num; i++) {
4911 info = hci_ev_skb_pull(hdev, skb,
4912 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4915 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4916 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4920 bacpy(&data.bdaddr, &info->bdaddr);
4921 data.pscan_rep_mode = info->pscan_rep_mode;
4922 data.pscan_period_mode = info->pscan_period_mode;
4923 data.pscan_mode = info->pscan_mode;
4924 memcpy(data.dev_class, info->dev_class, 3);
4925 data.clock_offset = info->clock_offset;
4926 data.rssi = info->rssi;
4927 data.ssp_mode = 0x00;
4929 flags = hci_inquiry_cache_update(hdev, &data, false);
4931 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4932 info->dev_class, info->rssi,
4933 flags, NULL, 0, NULL, 0, 0);
4935 } else if (skb->len == array_size(ev->num,
4936 sizeof(struct inquiry_info_rssi))) {
4937 struct inquiry_info_rssi *info;
4939 for (i = 0; i < ev->num; i++) {
4942 info = hci_ev_skb_pull(hdev, skb,
4943 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4946 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4947 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4951 bacpy(&data.bdaddr, &info->bdaddr);
4952 data.pscan_rep_mode = info->pscan_rep_mode;
4953 data.pscan_period_mode = info->pscan_period_mode;
4954 data.pscan_mode = 0x00;
4955 memcpy(data.dev_class, info->dev_class, 3);
4956 data.clock_offset = info->clock_offset;
4957 data.rssi = info->rssi;
4958 data.ssp_mode = 0x00;
4960 flags = hci_inquiry_cache_update(hdev, &data, false);
4962 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4963 info->dev_class, info->rssi,
4964 flags, NULL, 0, NULL, 0, 0);
4967 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4968 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4971 hci_dev_unlock(hdev);
4974 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4975 struct sk_buff *skb)
4977 struct hci_ev_remote_ext_features *ev = data;
4978 struct hci_conn *conn;
4980 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4988 if (ev->page < HCI_MAX_PAGES)
4989 memcpy(conn->features[ev->page], ev->features, 8);
4991 if (!ev->status && ev->page == 0x01) {
4992 struct inquiry_entry *ie;
4994 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4996 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4998 if (ev->features[0] & LMP_HOST_SSP) {
4999 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5001 /* It is mandatory by the Bluetooth specification that
5002 * Extended Inquiry Results are only used when Secure
5003 * Simple Pairing is enabled, but some devices violate
5006 * To make these devices work, the internal SSP
5007 * enabled flag needs to be cleared if the remote host
5008 * features do not indicate SSP support */
5009 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5012 if (ev->features[0] & LMP_HOST_SC)
5013 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5016 if (conn->state != BT_CONFIG)
5019 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5020 struct hci_cp_remote_name_req cp;
5021 memset(&cp, 0, sizeof(cp));
5022 bacpy(&cp.bdaddr, &conn->dst);
5023 cp.pscan_rep_mode = 0x02;
5024 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5026 mgmt_device_connected(hdev, conn, NULL, 0);
5029 if (!hci_outgoing_auth_needed(hdev, conn)) {
5030 conn->state = BT_CONNECTED;
5031 hci_connect_cfm(conn, ev->status);
5032 hci_conn_drop(conn);
5036 hci_dev_unlock(hdev);
5039 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5040 struct sk_buff *skb)
5042 struct hci_ev_sync_conn_complete *ev = data;
5043 struct hci_conn *conn;
5044 u8 status = ev->status;
5046 switch (ev->link_type) {
5051 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5052 * for HCI_Synchronous_Connection_Complete is limited to
5053 * either SCO or eSCO
5055 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5059 bt_dev_dbg(hdev, "status 0x%2.2x", status);
5063 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5065 if (ev->link_type == ESCO_LINK)
5068 /* When the link type in the event indicates SCO connection
5069 * and lookup of the connection object fails, then check
5070 * if an eSCO connection object exists.
5072 * The core limits the synchronous connections to either
5073 * SCO or eSCO. The eSCO connection is preferred and tried
5074 * to be setup first and until successfully established,
5075 * the link type will be hinted as eSCO.
5077 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5082 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5083 * Processing it more than once per connection can corrupt kernel memory.
5085 * As the connection handle is set here for the first time, it indicates
5086 * whether the connection is already set up.
5088 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5089 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5095 status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5097 conn->state = BT_CLOSED;
5101 conn->state = BT_CONNECTED;
5102 conn->type = ev->link_type;
5104 hci_debugfs_create_conn(conn);
5105 hci_conn_add_sysfs(conn);
5108 case 0x10: /* Connection Accept Timeout */
5109 case 0x0d: /* Connection Rejected due to Limited Resources */
5110 case 0x11: /* Unsupported Feature or Parameter Value */
5111 case 0x1c: /* SCO interval rejected */
5112 case 0x1a: /* Unsupported Remote Feature */
5113 case 0x1e: /* Invalid LMP Parameters */
5114 case 0x1f: /* Unspecified error */
5115 case 0x20: /* Unsupported LMP Parameter value */
5117 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5118 (hdev->esco_type & EDR_ESCO_MASK);
5119 if (hci_setup_sync(conn, conn->parent->handle))
5125 conn->state = BT_CLOSED;
5129 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5130 /* Notify only in case of SCO over HCI transport data path which
5131 * is zero and non-zero value shall be non-HCI transport data path
5133 if (conn->codec.data_path == 0 && hdev->notify) {
5134 switch (ev->air_mode) {
5136 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5139 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5144 hci_connect_cfm(conn, status);
5149 hci_dev_unlock(hdev);
5152 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5156 while (parsed < eir_len) {
5157 u8 field_len = eir[0];
5162 parsed += field_len + 1;
5163 eir += field_len + 1;
5169 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5170 struct sk_buff *skb)
5172 struct hci_ev_ext_inquiry_result *ev = edata;
5173 struct inquiry_data data;
5177 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5178 flex_array_size(ev, info, ev->num)))
5181 bt_dev_dbg(hdev, "num %d", ev->num);
5186 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5191 for (i = 0; i < ev->num; i++) {
5192 struct extended_inquiry_info *info = &ev->info[i];
5196 bacpy(&data.bdaddr, &info->bdaddr);
5197 data.pscan_rep_mode = info->pscan_rep_mode;
5198 data.pscan_period_mode = info->pscan_period_mode;
5199 data.pscan_mode = 0x00;
5200 memcpy(data.dev_class, info->dev_class, 3);
5201 data.clock_offset = info->clock_offset;
5202 data.rssi = info->rssi;
5203 data.ssp_mode = 0x01;
5205 if (hci_dev_test_flag(hdev, HCI_MGMT))
5206 name_known = eir_get_data(info->data,
5208 EIR_NAME_COMPLETE, NULL);
5212 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5214 eir_len = eir_get_length(info->data, sizeof(info->data));
5216 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5217 info->dev_class, info->rssi,
5218 flags, info->data, eir_len, NULL, 0, 0);
5221 hci_dev_unlock(hdev);
5224 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5225 struct sk_buff *skb)
5227 struct hci_ev_key_refresh_complete *ev = data;
5228 struct hci_conn *conn;
5230 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5231 __le16_to_cpu(ev->handle));
5235 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5239 /* For BR/EDR the necessary steps are taken through the
5240 * auth_complete event.
5242 if (conn->type != LE_LINK)
5246 conn->sec_level = conn->pending_sec_level;
5248 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5250 if (ev->status && conn->state == BT_CONNECTED) {
5251 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5252 hci_conn_drop(conn);
5256 if (conn->state == BT_CONFIG) {
5258 conn->state = BT_CONNECTED;
5260 hci_connect_cfm(conn, ev->status);
5261 hci_conn_drop(conn);
5263 hci_auth_cfm(conn, ev->status);
5265 hci_conn_hold(conn);
5266 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5267 hci_conn_drop(conn);
5271 hci_dev_unlock(hdev);
5274 static u8 hci_get_auth_req(struct hci_conn *conn)
5276 /* If remote requests no-bonding follow that lead */
5277 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5278 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5279 return conn->remote_auth | (conn->auth_type & 0x01);
5281 /* If both remote and local have enough IO capabilities, require
5284 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5285 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5286 return conn->remote_auth | 0x01;
5288 /* No MITM protection possible so ignore remote requirement */
5289 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5292 static u8 bredr_oob_data_present(struct hci_conn *conn)
5294 struct hci_dev *hdev = conn->hdev;
5295 struct oob_data *data;
5297 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5301 if (bredr_sc_enabled(hdev)) {
5302 /* When Secure Connections is enabled, then just
5303 * return the present value stored with the OOB
5304 * data. The stored value contains the right present
5305 * information. However it can only be trusted when
5306 * not in Secure Connection Only mode.
5308 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5309 return data->present;
5311 /* When Secure Connections Only mode is enabled, then
5312 * the P-256 values are required. If they are not
5313 * available, then do not declare that OOB data is
5316 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5317 !crypto_memneq(data->hash256, ZERO_KEY, 16))
5323 /* When Secure Connections is not enabled or actually
5324 * not supported by the hardware, then check that if
5325 * P-192 data values are present.
5327 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5328 !crypto_memneq(data->hash192, ZERO_KEY, 16))
5334 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5335 struct sk_buff *skb)
5337 struct hci_ev_io_capa_request *ev = data;
5338 struct hci_conn *conn;
5340 bt_dev_dbg(hdev, "");
5344 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5345 if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5348 /* Assume remote supports SSP since it has triggered this event */
5349 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5351 hci_conn_hold(conn);
5353 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5356 /* Allow pairing if we're pairable, the initiators of the
5357 * pairing or if the remote is not requesting bonding.
5359 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5360 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5361 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5362 struct hci_cp_io_capability_reply cp;
5364 bacpy(&cp.bdaddr, &ev->bdaddr);
5365 /* Change the IO capability from KeyboardDisplay
5366 * to DisplayYesNo as it is not supported by BT spec. */
5367 cp.capability = (conn->io_capability == 0x04) ?
5368 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5370 /* If we are initiators, there is no remote information yet */
5371 if (conn->remote_auth == 0xff) {
5372 /* Request MITM protection if our IO caps allow it
5373 * except for the no-bonding case.
5375 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5376 conn->auth_type != HCI_AT_NO_BONDING)
5377 conn->auth_type |= 0x01;
5379 conn->auth_type = hci_get_auth_req(conn);
5382 /* If we're not bondable, force one of the non-bondable
5383 * authentication requirement values.
5385 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5386 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5388 cp.authentication = conn->auth_type;
5389 cp.oob_data = bredr_oob_data_present(conn);
5391 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5394 struct hci_cp_io_capability_neg_reply cp;
5396 bacpy(&cp.bdaddr, &ev->bdaddr);
5397 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5399 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5404 hci_dev_unlock(hdev);
5407 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5408 struct sk_buff *skb)
5410 struct hci_ev_io_capa_reply *ev = data;
5411 struct hci_conn *conn;
5413 bt_dev_dbg(hdev, "");
5417 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5421 conn->remote_cap = ev->capability;
5422 conn->remote_auth = ev->authentication;
5425 hci_dev_unlock(hdev);
5428 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5429 struct sk_buff *skb)
5431 struct hci_ev_user_confirm_req *ev = data;
5432 int loc_mitm, rem_mitm, confirm_hint = 0;
5433 struct hci_conn *conn;
5435 bt_dev_dbg(hdev, "");
5439 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5442 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5446 loc_mitm = (conn->auth_type & 0x01);
5447 rem_mitm = (conn->remote_auth & 0x01);
5449 /* If we require MITM but the remote device can't provide that
5450 * (it has NoInputNoOutput) then reject the confirmation
5451 * request. We check the security level here since it doesn't
5452 * necessarily match conn->auth_type.
5454 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5455 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5456 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5457 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5458 sizeof(ev->bdaddr), &ev->bdaddr);
5462 /* If no side requires MITM protection; auto-accept */
5463 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5464 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5466 /* If we're not the initiators request authorization to
5467 * proceed from user space (mgmt_user_confirm with
5468 * confirm_hint set to 1). The exception is if neither
5469 * side had MITM or if the local IO capability is
5470 * NoInputNoOutput, in which case we do auto-accept
5472 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5473 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5474 (loc_mitm || rem_mitm)) {
5475 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5480 /* If there already exists link key in local host, leave the
5481 * decision to user space since the remote device could be
5482 * legitimate or malicious.
5484 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5485 bt_dev_dbg(hdev, "Local host already has link key");
5490 BT_DBG("Auto-accept of user confirmation with %ums delay",
5491 hdev->auto_accept_delay);
5493 if (hdev->auto_accept_delay > 0) {
5494 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5495 queue_delayed_work(conn->hdev->workqueue,
5496 &conn->auto_accept_work, delay);
5500 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5501 sizeof(ev->bdaddr), &ev->bdaddr);
5506 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5507 le32_to_cpu(ev->passkey), confirm_hint);
5510 hci_dev_unlock(hdev);
5513 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5514 struct sk_buff *skb)
5516 struct hci_ev_user_passkey_req *ev = data;
5518 bt_dev_dbg(hdev, "");
5520 if (hci_dev_test_flag(hdev, HCI_MGMT))
5521 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5524 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5525 struct sk_buff *skb)
5527 struct hci_ev_user_passkey_notify *ev = data;
5528 struct hci_conn *conn;
5530 bt_dev_dbg(hdev, "");
5532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5536 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5537 conn->passkey_entered = 0;
5539 if (hci_dev_test_flag(hdev, HCI_MGMT))
5540 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5541 conn->dst_type, conn->passkey_notify,
5542 conn->passkey_entered);
5545 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5546 struct sk_buff *skb)
5548 struct hci_ev_keypress_notify *ev = data;
5549 struct hci_conn *conn;
5551 bt_dev_dbg(hdev, "");
5553 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5558 case HCI_KEYPRESS_STARTED:
5559 conn->passkey_entered = 0;
5562 case HCI_KEYPRESS_ENTERED:
5563 conn->passkey_entered++;
5566 case HCI_KEYPRESS_ERASED:
5567 conn->passkey_entered--;
5570 case HCI_KEYPRESS_CLEARED:
5571 conn->passkey_entered = 0;
5574 case HCI_KEYPRESS_COMPLETED:
5578 if (hci_dev_test_flag(hdev, HCI_MGMT))
5579 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5580 conn->dst_type, conn->passkey_notify,
5581 conn->passkey_entered);
5584 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5585 struct sk_buff *skb)
5587 struct hci_ev_simple_pair_complete *ev = data;
5588 struct hci_conn *conn;
5590 bt_dev_dbg(hdev, "");
5594 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5595 if (!conn || !hci_conn_ssp_enabled(conn))
5598 /* Reset the authentication requirement to unknown */
5599 conn->remote_auth = 0xff;
5601 /* To avoid duplicate auth_failed events to user space we check
5602 * the HCI_CONN_AUTH_PEND flag which will be set if we
5603 * initiated the authentication. A traditional auth_complete
5604 * event gets always produced as initiator and is also mapped to
5605 * the mgmt_auth_failed event */
5606 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5607 mgmt_auth_failed(conn, ev->status);
5609 hci_conn_drop(conn);
5612 hci_dev_unlock(hdev);
5615 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5616 struct sk_buff *skb)
5618 struct hci_ev_remote_host_features *ev = data;
5619 struct inquiry_entry *ie;
5620 struct hci_conn *conn;
5622 bt_dev_dbg(hdev, "");
5626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5628 memcpy(conn->features[1], ev->features, 8);
5630 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5632 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5634 hci_dev_unlock(hdev);
5637 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5638 struct sk_buff *skb)
5640 struct hci_ev_remote_oob_data_request *ev = edata;
5641 struct oob_data *data;
5643 bt_dev_dbg(hdev, "");
5647 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5650 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5652 struct hci_cp_remote_oob_data_neg_reply cp;
5654 bacpy(&cp.bdaddr, &ev->bdaddr);
5655 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5660 if (bredr_sc_enabled(hdev)) {
5661 struct hci_cp_remote_oob_ext_data_reply cp;
5663 bacpy(&cp.bdaddr, &ev->bdaddr);
5664 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5665 memset(cp.hash192, 0, sizeof(cp.hash192));
5666 memset(cp.rand192, 0, sizeof(cp.rand192));
5668 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5669 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5671 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5672 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5674 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5677 struct hci_cp_remote_oob_data_reply cp;
5679 bacpy(&cp.bdaddr, &ev->bdaddr);
5680 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5681 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5683 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5688 hci_dev_unlock(hdev);
5691 #if IS_ENABLED(CONFIG_BT_HS)
5692 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5693 struct sk_buff *skb)
5695 struct hci_ev_channel_selected *ev = data;
5696 struct hci_conn *hcon;
5698 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5700 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5704 amp_read_loc_assoc_final_data(hdev, hcon);
5707 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5708 struct sk_buff *skb)
5710 struct hci_ev_phy_link_complete *ev = data;
5711 struct hci_conn *hcon, *bredr_hcon;
5713 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5718 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5730 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5732 hcon->state = BT_CONNECTED;
5733 bacpy(&hcon->dst, &bredr_hcon->dst);
5735 hci_conn_hold(hcon);
5736 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5737 hci_conn_drop(hcon);
5739 hci_debugfs_create_conn(hcon);
5740 hci_conn_add_sysfs(hcon);
5742 amp_physical_cfm(bredr_hcon, hcon);
5745 hci_dev_unlock(hdev);
5748 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5749 struct sk_buff *skb)
5751 struct hci_ev_logical_link_complete *ev = data;
5752 struct hci_conn *hcon;
5753 struct hci_chan *hchan;
5754 struct amp_mgr *mgr;
5756 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5757 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5759 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5763 /* Create AMP hchan */
5764 hchan = hci_chan_create(hcon);
5768 hchan->handle = le16_to_cpu(ev->handle);
5771 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5773 mgr = hcon->amp_mgr;
5774 if (mgr && mgr->bredr_chan) {
5775 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5777 l2cap_chan_lock(bredr_chan);
5779 bredr_chan->conn->mtu = hdev->block_mtu;
5780 l2cap_logical_cfm(bredr_chan, hchan, 0);
5781 hci_conn_hold(hcon);
5783 l2cap_chan_unlock(bredr_chan);
5787 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5788 struct sk_buff *skb)
5790 struct hci_ev_disconn_logical_link_complete *ev = data;
5791 struct hci_chan *hchan;
5793 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5794 le16_to_cpu(ev->handle), ev->status);
5801 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5802 if (!hchan || !hchan->amp)
5805 amp_destroy_logical_link(hchan, ev->reason);
5808 hci_dev_unlock(hdev);
5811 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5812 struct sk_buff *skb)
5814 struct hci_ev_disconn_phy_link_complete *ev = data;
5815 struct hci_conn *hcon;
5817 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5824 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5825 if (hcon && hcon->type == AMP_LINK) {
5826 hcon->state = BT_CLOSED;
5827 hci_disconn_cfm(hcon, ev->reason);
5831 hci_dev_unlock(hdev);
5835 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5836 u8 bdaddr_type, bdaddr_t *local_rpa)
5839 conn->dst_type = bdaddr_type;
5840 conn->resp_addr_type = bdaddr_type;
5841 bacpy(&conn->resp_addr, bdaddr);
5843 /* Check if the controller has set a Local RPA then it must be
5844 * used instead or hdev->rpa.
5846 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5847 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5848 bacpy(&conn->init_addr, local_rpa);
5849 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5850 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5851 bacpy(&conn->init_addr, &conn->hdev->rpa);
5853 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5854 &conn->init_addr_type);
5857 conn->resp_addr_type = conn->hdev->adv_addr_type;
5858 /* Check if the controller has set a Local RPA then it must be
5859 * used instead or hdev->rpa.
5861 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5862 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5863 bacpy(&conn->resp_addr, local_rpa);
5864 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5865 /* In case of ext adv, resp_addr will be updated in
5866 * Adv Terminated event.
5868 if (!ext_adv_capable(conn->hdev))
5869 bacpy(&conn->resp_addr,
5870 &conn->hdev->random_addr);
5872 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5875 conn->init_addr_type = bdaddr_type;
5876 bacpy(&conn->init_addr, bdaddr);
5878 /* For incoming connections, set the default minimum
5879 * and maximum connection interval. They will be used
5880 * to check if the parameters are in range and if not
5881 * trigger the connection update procedure.
5883 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5884 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5888 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5889 bdaddr_t *bdaddr, u8 bdaddr_type,
5890 bdaddr_t *local_rpa, u8 role, u16 handle,
5891 u16 interval, u16 latency,
5892 u16 supervision_timeout)
5894 struct hci_conn_params *params;
5895 struct hci_conn *conn;
5896 struct smp_irk *irk;
5901 /* All controllers implicitly stop advertising in the event of a
5902 * connection, so ensure that the state bit is cleared.
5904 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5906 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5908 /* In case of error status and there is no connection pending
5909 * just unlock as there is nothing to cleanup.
5914 conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5916 bt_dev_err(hdev, "no memory for new connection");
5920 conn->dst_type = bdaddr_type;
5922 /* If we didn't have a hci_conn object previously
5923 * but we're in central role this must be something
5924 * initiated using an accept list. Since accept list based
5925 * connections are not "first class citizens" we don't
5926 * have full tracking of them. Therefore, we go ahead
5927 * with a "best effort" approach of determining the
5928 * initiator address based on the HCI_PRIVACY flag.
5931 conn->resp_addr_type = bdaddr_type;
5932 bacpy(&conn->resp_addr, bdaddr);
5933 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5934 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5935 bacpy(&conn->init_addr, &hdev->rpa);
5937 hci_copy_identity_address(hdev,
5939 &conn->init_addr_type);
5943 cancel_delayed_work(&conn->le_conn_timeout);
5946 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5947 * Processing it more than once per connection can corrupt kernel memory.
5949 * As the connection handle is set here for the first time, it indicates
5950 * whether the connection is already set up.
5952 if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5953 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5957 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5959 /* Lookup the identity address from the stored connection
5960 * address and address type.
5962 * When establishing connections to an identity address, the
5963 * connection procedure will store the resolvable random
5964 * address first. Now if it can be converted back into the
5965 * identity address, start using the identity address from
5968 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5970 bacpy(&conn->dst, &irk->bdaddr);
5971 conn->dst_type = irk->addr_type;
5974 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5976 /* All connection failure handling is taken care of by the
5977 * hci_conn_failed function which is triggered by the HCI
5978 * request completion callbacks used for connecting.
5980 if (status || hci_conn_set_handle(conn, handle))
5983 /* Drop the connection if it has been aborted */
5984 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5985 hci_conn_drop(conn);
5989 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5990 addr_type = BDADDR_LE_PUBLIC;
5992 addr_type = BDADDR_LE_RANDOM;
5994 /* Drop the connection if the device is blocked */
5995 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5996 hci_conn_drop(conn);
6000 mgmt_device_connected(hdev, conn, NULL, 0);
6002 conn->sec_level = BT_SECURITY_LOW;
6003 conn->state = BT_CONFIG;
6005 /* Store current advertising instance as connection advertising instance
6006 * when sotfware rotation is in use so it can be re-enabled when
6009 if (!ext_adv_capable(hdev))
6010 conn->adv_instance = hdev->cur_adv_instance;
6012 conn->le_conn_interval = interval;
6013 conn->le_conn_latency = latency;
6014 conn->le_supv_timeout = supervision_timeout;
6016 hci_debugfs_create_conn(conn);
6017 hci_conn_add_sysfs(conn);
6019 /* The remote features procedure is defined for central
6020 * role only. So only in case of an initiated connection
6021 * request the remote features.
6023 * If the local controller supports peripheral-initiated features
6024 * exchange, then requesting the remote features in peripheral
6025 * role is possible. Otherwise just transition into the
6026 * connected state without requesting the remote features.
6029 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6030 struct hci_cp_le_read_remote_features cp;
6032 cp.handle = __cpu_to_le16(conn->handle);
6034 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6037 hci_conn_hold(conn);
6039 conn->state = BT_CONNECTED;
6040 hci_connect_cfm(conn, status);
6043 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6046 hci_pend_le_list_del_init(params);
6048 hci_conn_drop(params->conn);
6049 hci_conn_put(params->conn);
6050 params->conn = NULL;
6055 hci_update_passive_scan(hdev);
6056 hci_dev_unlock(hdev);
6059 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6060 struct sk_buff *skb)
6062 struct hci_ev_le_conn_complete *ev = data;
6064 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6066 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6067 NULL, ev->role, le16_to_cpu(ev->handle),
6068 le16_to_cpu(ev->interval),
6069 le16_to_cpu(ev->latency),
6070 le16_to_cpu(ev->supervision_timeout));
6073 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6074 struct sk_buff *skb)
6076 struct hci_ev_le_enh_conn_complete *ev = data;
6078 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6080 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6081 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6082 le16_to_cpu(ev->interval),
6083 le16_to_cpu(ev->latency),
6084 le16_to_cpu(ev->supervision_timeout));
6087 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6088 struct sk_buff *skb)
6090 struct hci_evt_le_ext_adv_set_term *ev = data;
6091 struct hci_conn *conn;
6092 struct adv_info *adv, *n;
6094 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6096 /* The Bluetooth Core 5.3 specification clearly states that this event
6097 * shall not be sent when the Host disables the advertising set. So in
6098 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6100 * When the Host disables an advertising set, all cleanup is done via
6101 * its command callback and not needed to be duplicated here.
6103 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6104 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6110 adv = hci_find_adv_instance(hdev, ev->handle);
6116 /* Remove advertising as it has been terminated */
6117 hci_remove_adv_instance(hdev, ev->handle);
6118 mgmt_advertising_removed(NULL, hdev, ev->handle);
6120 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6125 /* We are no longer advertising, clear HCI_LE_ADV */
6126 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6131 adv->enabled = false;
6133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6135 /* Store handle in the connection so the correct advertising
6136 * instance can be re-enabled when disconnected.
6138 conn->adv_instance = ev->handle;
6140 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6141 bacmp(&conn->resp_addr, BDADDR_ANY))
6145 bacpy(&conn->resp_addr, &hdev->random_addr);
6150 bacpy(&conn->resp_addr, &adv->random_addr);
6154 hci_dev_unlock(hdev);
6157 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6158 struct sk_buff *skb)
6160 struct hci_ev_le_conn_update_complete *ev = data;
6161 struct hci_conn *conn;
6163 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6170 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6172 conn->le_conn_interval = le16_to_cpu(ev->interval);
6173 conn->le_conn_latency = le16_to_cpu(ev->latency);
6174 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6177 hci_dev_unlock(hdev);
6180 /* This function requires the caller holds hdev->lock */
6181 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6183 u8 addr_type, bool addr_resolved,
6184 u8 adv_type, u8 phy, u8 sec_phy)
6186 struct hci_conn *conn;
6187 struct hci_conn_params *params;
6189 /* If the event is not connectable don't proceed further */
6190 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6193 /* Ignore if the device is blocked or hdev is suspended */
6194 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6198 /* Most controller will fail if we try to create new connections
6199 * while we have an existing one in peripheral role.
6201 if (hdev->conn_hash.le_num_peripheral > 0 &&
6202 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6203 !(hdev->le_states[3] & 0x10)))
6206 /* If we're not connectable only connect devices that we have in
6207 * our pend_le_conns list.
6209 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6214 if (!params->explicit_connect) {
6215 switch (params->auto_connect) {
6216 case HCI_AUTO_CONN_DIRECT:
6217 /* Only devices advertising with ADV_DIRECT_IND are
6218 * triggering a connection attempt. This is allowing
6219 * incoming connections from peripheral devices.
6221 if (adv_type != LE_ADV_DIRECT_IND)
6224 case HCI_AUTO_CONN_ALWAYS:
6225 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6226 * are triggering a connection attempt. This means
6227 * that incoming connections from peripheral device are
6228 * accepted and also outgoing connections to peripheral
6229 * devices are established when found.
6237 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6238 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6239 HCI_ROLE_MASTER, phy, sec_phy);
6240 if (!IS_ERR(conn)) {
6241 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6242 * by higher layer that tried to connect, if no then
6243 * store the pointer since we don't really have any
6244 * other owner of the object besides the params that
6245 * triggered it. This way we can abort the connection if
6246 * the parameters get removed and keep the reference
6247 * count consistent once the connection is established.
6250 if (!params->explicit_connect)
6251 params->conn = hci_conn_get(conn);
6256 switch (PTR_ERR(conn)) {
6258 /* If hci_connect() returns -EBUSY it means there is already
6259 * an LE connection attempt going on. Since controllers don't
6260 * support more than one connection attempt at the time, we
6261 * don't consider this an error case.
6265 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6272 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6273 u8 bdaddr_type, bdaddr_t *direct_addr,
6274 u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6275 u8 *data, u8 len, bool ext_adv, bool ctl_time,
6278 struct discovery_state *d = &hdev->discovery;
6279 struct smp_irk *irk;
6280 struct hci_conn *conn;
6281 bool match, bdaddr_resolved;
6287 case LE_ADV_DIRECT_IND:
6288 case LE_ADV_SCAN_IND:
6289 case LE_ADV_NONCONN_IND:
6290 case LE_ADV_SCAN_RSP:
6293 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6294 "type: 0x%02x", type);
6298 if (len > max_adv_len(hdev)) {
6299 bt_dev_err_ratelimited(hdev,
6300 "adv larger than maximum supported");
6304 /* Find the end of the data in case the report contains padded zero
6305 * bytes at the end causing an invalid length value.
6307 * When data is NULL, len is 0 so there is no need for extra ptr
6308 * check as 'ptr < data + 0' is already false in such case.
6310 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6311 if (ptr + 1 + *ptr > data + len)
6315 /* Adjust for actual length. This handles the case when remote
6316 * device is advertising with incorrect data length.
6320 /* If the direct address is present, then this report is from
6321 * a LE Direct Advertising Report event. In that case it is
6322 * important to see if the address is matching the local
6323 * controller address.
6325 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6326 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6329 /* Only resolvable random addresses are valid for these
6330 * kind of reports and others can be ignored.
6332 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6335 /* If the controller is not using resolvable random
6336 * addresses, then this report can be ignored.
6338 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6341 /* If the local IRK of the controller does not match
6342 * with the resolvable random address provided, then
6343 * this report can be ignored.
6345 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6349 /* Check if we need to convert to identity address */
6350 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6352 bdaddr = &irk->bdaddr;
6353 bdaddr_type = irk->addr_type;
6356 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6358 /* Check if we have been requested to connect to this device.
6360 * direct_addr is set only for directed advertising reports (it is NULL
6361 * for advertising reports) and is already verified to be RPA above.
6363 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6364 type, phy, sec_phy);
6365 if (!ext_adv && conn && type == LE_ADV_IND &&
6366 len <= max_adv_len(hdev)) {
6367 /* Store report for later inclusion by
6368 * mgmt_device_connected
6370 memcpy(conn->le_adv_data, data, len);
6371 conn->le_adv_data_len = len;
6374 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6375 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6379 /* All scan results should be sent up for Mesh systems */
6380 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6381 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6382 rssi, flags, data, len, NULL, 0, instant);
6386 /* Passive scanning shouldn't trigger any device found events,
6387 * except for devices marked as CONN_REPORT for which we do send
6388 * device found events, or advertisement monitoring requested.
6390 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6391 if (type == LE_ADV_DIRECT_IND)
6394 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6395 bdaddr, bdaddr_type) &&
6396 idr_is_empty(&hdev->adv_monitors_idr))
6399 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6400 rssi, flags, data, len, NULL, 0, 0);
6404 /* When receiving a scan response, then there is no way to
6405 * know if the remote device is connectable or not. However
6406 * since scan responses are merged with a previously seen
6407 * advertising report, the flags field from that report
6410 * In the unlikely case that a controller just sends a scan
6411 * response event that doesn't match the pending report, then
6412 * it is marked as a standalone SCAN_RSP.
6414 if (type == LE_ADV_SCAN_RSP)
6415 flags = MGMT_DEV_FOUND_SCAN_RSP;
6417 /* If there's nothing pending either store the data from this
6418 * event or send an immediate device found event if the data
6419 * should not be stored for later.
6421 if (!ext_adv && !has_pending_adv_report(hdev)) {
6422 /* If the report will trigger a SCAN_REQ store it for
6425 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6426 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6427 rssi, flags, data, len);
6431 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6432 rssi, flags, data, len, NULL, 0, 0);
6436 /* Check if the pending report is for the same device as the new one */
6437 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6438 bdaddr_type == d->last_adv_addr_type);
6440 /* If the pending data doesn't match this report or this isn't a
6441 * scan response (e.g. we got a duplicate ADV_IND) then force
6442 * sending of the pending data.
6444 if (type != LE_ADV_SCAN_RSP || !match) {
6445 /* Send out whatever is in the cache, but skip duplicates */
6447 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6448 d->last_adv_addr_type, NULL,
6449 d->last_adv_rssi, d->last_adv_flags,
6451 d->last_adv_data_len, NULL, 0, 0);
6453 /* If the new report will trigger a SCAN_REQ store it for
6456 if (!ext_adv && (type == LE_ADV_IND ||
6457 type == LE_ADV_SCAN_IND)) {
6458 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6459 rssi, flags, data, len);
6463 /* The advertising reports cannot be merged, so clear
6464 * the pending report and send out a device found event.
6466 clear_pending_adv_report(hdev);
6467 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6468 rssi, flags, data, len, NULL, 0, 0);
6472 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6473 * the new event is a SCAN_RSP. We can therefore proceed with
6474 * sending a merged device found event.
6476 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6477 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6478 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6479 clear_pending_adv_report(hdev);
6482 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6483 struct sk_buff *skb)
6485 struct hci_ev_le_advertising_report *ev = data;
6486 u64 instant = jiffies;
6494 struct hci_ev_le_advertising_info *info;
6497 info = hci_le_ev_skb_pull(hdev, skb,
6498 HCI_EV_LE_ADVERTISING_REPORT,
6503 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6507 if (info->length <= max_adv_len(hdev)) {
6508 rssi = info->data[info->length];
6509 process_adv_report(hdev, info->type, &info->bdaddr,
6510 info->bdaddr_type, NULL, 0,
6511 HCI_ADV_PHY_1M, 0, rssi,
6512 info->data, info->length, false,
6515 bt_dev_err(hdev, "Dropping invalid advertising data");
6519 hci_dev_unlock(hdev);
6522 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6524 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6526 case LE_LEGACY_ADV_IND:
6528 case LE_LEGACY_ADV_DIRECT_IND:
6529 return LE_ADV_DIRECT_IND;
6530 case LE_LEGACY_ADV_SCAN_IND:
6531 return LE_ADV_SCAN_IND;
6532 case LE_LEGACY_NONCONN_IND:
6533 return LE_ADV_NONCONN_IND;
6534 case LE_LEGACY_SCAN_RSP_ADV:
6535 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6536 return LE_ADV_SCAN_RSP;
6542 if (evt_type & LE_EXT_ADV_CONN_IND) {
6543 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6544 return LE_ADV_DIRECT_IND;
6549 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6550 return LE_ADV_SCAN_RSP;
6552 if (evt_type & LE_EXT_ADV_SCAN_IND)
6553 return LE_ADV_SCAN_IND;
6555 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6556 evt_type & LE_EXT_ADV_DIRECT_IND)
6557 return LE_ADV_NONCONN_IND;
6560 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6563 return LE_ADV_INVALID;
6566 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6567 struct sk_buff *skb)
6569 struct hci_ev_le_ext_adv_report *ev = data;
6570 u64 instant = jiffies;
6578 struct hci_ev_le_ext_adv_info *info;
6582 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6587 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6591 evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6592 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6593 if (legacy_evt_type != LE_ADV_INVALID) {
6594 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6595 info->bdaddr_type, NULL, 0,
6597 info->secondary_phy,
6598 info->rssi, info->data, info->length,
6599 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6604 hci_dev_unlock(hdev);
6607 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6609 struct hci_cp_le_pa_term_sync cp;
6611 memset(&cp, 0, sizeof(cp));
6614 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6617 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6618 struct sk_buff *skb)
6620 struct hci_ev_le_pa_sync_established *ev = data;
6621 int mask = hdev->link_mode;
6623 struct hci_conn *pa_sync;
6625 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6629 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6631 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6632 if (!(mask & HCI_LM_ACCEPT)) {
6633 hci_le_pa_term_sync(hdev, ev->handle);
6637 if (!(flags & HCI_PROTO_DEFER))
6641 /* Add connection to indicate the failed PA sync event */
6642 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6648 set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6650 /* Notify iso layer */
6651 hci_connect_cfm(pa_sync, ev->status);
6655 hci_dev_unlock(hdev);
6658 static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6659 struct sk_buff *skb)
6661 struct hci_ev_le_per_adv_report *ev = data;
6662 int mask = hdev->link_mode;
6665 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6669 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6670 if (!(mask & HCI_LM_ACCEPT))
6671 hci_le_pa_term_sync(hdev, ev->sync_handle);
6673 hci_dev_unlock(hdev);
6676 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6677 struct sk_buff *skb)
6679 struct hci_ev_le_remote_feat_complete *ev = data;
6680 struct hci_conn *conn;
6682 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6686 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6689 memcpy(conn->features[0], ev->features, 8);
6691 if (conn->state == BT_CONFIG) {
6694 /* If the local controller supports peripheral-initiated
6695 * features exchange, but the remote controller does
6696 * not, then it is possible that the error code 0x1a
6697 * for unsupported remote feature gets returned.
6699 * In this specific case, allow the connection to
6700 * transition into connected state and mark it as
6703 if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6704 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6707 status = ev->status;
6709 conn->state = BT_CONNECTED;
6710 hci_connect_cfm(conn, status);
6711 hci_conn_drop(conn);
6715 hci_dev_unlock(hdev);
6718 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6719 struct sk_buff *skb)
6721 struct hci_ev_le_ltk_req *ev = data;
6722 struct hci_cp_le_ltk_reply cp;
6723 struct hci_cp_le_ltk_neg_reply neg;
6724 struct hci_conn *conn;
6725 struct smp_ltk *ltk;
6727 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6731 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6735 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6739 if (smp_ltk_is_sc(ltk)) {
6740 /* With SC both EDiv and Rand are set to zero */
6741 if (ev->ediv || ev->rand)
6744 /* For non-SC keys check that EDiv and Rand match */
6745 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6749 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6750 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6751 cp.handle = cpu_to_le16(conn->handle);
6753 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6755 conn->enc_key_size = ltk->enc_size;
6757 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6759 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6760 * temporary key used to encrypt a connection following
6761 * pairing. It is used during the Encrypted Session Setup to
6762 * distribute the keys. Later, security can be re-established
6763 * using a distributed LTK.
6765 if (ltk->type == SMP_STK) {
6766 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6767 list_del_rcu(<k->list);
6768 kfree_rcu(ltk, rcu);
6770 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6773 hci_dev_unlock(hdev);
6778 neg.handle = ev->handle;
6779 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6780 hci_dev_unlock(hdev);
6783 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6786 struct hci_cp_le_conn_param_req_neg_reply cp;
6788 cp.handle = cpu_to_le16(handle);
6791 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6795 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6796 struct sk_buff *skb)
6798 struct hci_ev_le_remote_conn_param_req *ev = data;
6799 struct hci_cp_le_conn_param_req_reply cp;
6800 struct hci_conn *hcon;
6801 u16 handle, min, max, latency, timeout;
6803 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6805 handle = le16_to_cpu(ev->handle);
6806 min = le16_to_cpu(ev->interval_min);
6807 max = le16_to_cpu(ev->interval_max);
6808 latency = le16_to_cpu(ev->latency);
6809 timeout = le16_to_cpu(ev->timeout);
6811 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6812 if (!hcon || hcon->state != BT_CONNECTED)
6813 return send_conn_param_neg_reply(hdev, handle,
6814 HCI_ERROR_UNKNOWN_CONN_ID);
6816 if (max > hcon->le_conn_max_interval)
6817 return send_conn_param_neg_reply(hdev, handle,
6818 HCI_ERROR_INVALID_LL_PARAMS);
6820 if (hci_check_conn_params(min, max, latency, timeout))
6821 return send_conn_param_neg_reply(hdev, handle,
6822 HCI_ERROR_INVALID_LL_PARAMS);
6824 if (hcon->role == HCI_ROLE_MASTER) {
6825 struct hci_conn_params *params;
6830 params = hci_conn_params_lookup(hdev, &hcon->dst,
6833 params->conn_min_interval = min;
6834 params->conn_max_interval = max;
6835 params->conn_latency = latency;
6836 params->supervision_timeout = timeout;
6842 hci_dev_unlock(hdev);
6844 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6845 store_hint, min, max, latency, timeout);
6848 cp.handle = ev->handle;
6849 cp.interval_min = ev->interval_min;
6850 cp.interval_max = ev->interval_max;
6851 cp.latency = ev->latency;
6852 cp.timeout = ev->timeout;
6856 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6859 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6860 struct sk_buff *skb)
6862 struct hci_ev_le_direct_adv_report *ev = data;
6863 u64 instant = jiffies;
6866 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6867 flex_array_size(ev, info, ev->num)))
6875 for (i = 0; i < ev->num; i++) {
6876 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6878 process_adv_report(hdev, info->type, &info->bdaddr,
6879 info->bdaddr_type, &info->direct_addr,
6880 info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6881 info->rssi, NULL, 0, false, false, instant);
6884 hci_dev_unlock(hdev);
6887 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6888 struct sk_buff *skb)
6890 struct hci_ev_le_phy_update_complete *ev = data;
6891 struct hci_conn *conn;
6893 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6904 conn->le_tx_phy = ev->tx_phy;
6905 conn->le_rx_phy = ev->rx_phy;
6908 hci_dev_unlock(hdev);
6911 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6912 struct sk_buff *skb)
6914 struct hci_evt_le_cis_established *ev = data;
6915 struct hci_conn *conn;
6916 struct bt_iso_qos *qos;
6917 bool pending = false;
6918 u16 handle = __le16_to_cpu(ev->handle);
6920 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6924 conn = hci_conn_hash_lookup_handle(hdev, handle);
6927 "Unable to find connection with handle 0x%4.4x",
6932 if (conn->type != ISO_LINK) {
6934 "Invalid connection link type handle 0x%4.4x",
6939 qos = &conn->iso_qos;
6941 pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6943 /* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6944 qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6945 qos->ucast.out.interval = qos->ucast.in.interval;
6947 switch (conn->role) {
6948 case HCI_ROLE_SLAVE:
6949 /* Convert Transport Latency (us) to Latency (msec) */
6950 qos->ucast.in.latency =
6951 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6953 qos->ucast.out.latency =
6954 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6956 qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6957 qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6958 qos->ucast.in.phy = ev->c_phy;
6959 qos->ucast.out.phy = ev->p_phy;
6961 case HCI_ROLE_MASTER:
6962 /* Convert Transport Latency (us) to Latency (msec) */
6963 qos->ucast.out.latency =
6964 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6966 qos->ucast.in.latency =
6967 DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6969 qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6970 qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6971 qos->ucast.out.phy = ev->c_phy;
6972 qos->ucast.in.phy = ev->p_phy;
6977 conn->state = BT_CONNECTED;
6978 hci_debugfs_create_conn(conn);
6979 hci_conn_add_sysfs(conn);
6980 hci_iso_setup_path(conn);
6984 conn->state = BT_CLOSED;
6985 hci_connect_cfm(conn, ev->status);
6990 hci_le_create_cis_pending(hdev);
6992 hci_dev_unlock(hdev);
6995 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6997 struct hci_cp_le_reject_cis cp;
6999 memset(&cp, 0, sizeof(cp));
7001 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7002 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7005 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7007 struct hci_cp_le_accept_cis cp;
7009 memset(&cp, 0, sizeof(cp));
7011 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7014 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7015 struct sk_buff *skb)
7017 struct hci_evt_le_cis_req *ev = data;
7018 u16 acl_handle, cis_handle;
7019 struct hci_conn *acl, *cis;
7023 acl_handle = __le16_to_cpu(ev->acl_handle);
7024 cis_handle = __le16_to_cpu(ev->cis_handle);
7026 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7027 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7031 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7035 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7036 if (!(mask & HCI_LM_ACCEPT)) {
7037 hci_le_reject_cis(hdev, ev->cis_handle);
7041 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7043 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
7046 hci_le_reject_cis(hdev, ev->cis_handle);
7051 cis->iso_qos.ucast.cig = ev->cig_id;
7052 cis->iso_qos.ucast.cis = ev->cis_id;
7054 if (!(flags & HCI_PROTO_DEFER)) {
7055 hci_le_accept_cis(hdev, ev->cis_handle);
7057 cis->state = BT_CONNECT2;
7058 hci_connect_cfm(cis, 0);
7062 hci_dev_unlock(hdev);
7065 static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
7067 u8 handle = PTR_UINT(data);
7069 return hci_le_terminate_big_sync(hdev, handle,
7070 HCI_ERROR_LOCAL_HOST_TERM);
7073 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7074 struct sk_buff *skb)
7076 struct hci_evt_le_create_big_complete *ev = data;
7077 struct hci_conn *conn;
7080 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7082 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7083 flex_array_size(ev, bis_handle, ev->num_bis)))
7089 /* Connect all BISes that are bound to the BIG */
7090 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
7091 if (bacmp(&conn->dst, BDADDR_ANY) ||
7092 conn->type != ISO_LINK ||
7093 conn->iso_qos.bcast.big != ev->handle)
7096 if (hci_conn_set_handle(conn,
7097 __le16_to_cpu(ev->bis_handle[i++])))
7101 conn->state = BT_CONNECTED;
7102 set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
7104 hci_debugfs_create_conn(conn);
7105 hci_conn_add_sysfs(conn);
7106 hci_iso_setup_path(conn);
7111 hci_connect_cfm(conn, ev->status);
7119 if (!ev->status && !i)
7120 /* If no BISes have been connected for the BIG,
7121 * terminate. This is in case all bound connections
7122 * have been closed before the BIG creation
7125 hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
7126 UINT_PTR(ev->handle), NULL);
7128 hci_dev_unlock(hdev);
7131 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7132 struct sk_buff *skb)
7134 struct hci_evt_le_big_sync_estabilished *ev = data;
7135 struct hci_conn *bis;
7138 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7140 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7141 flex_array_size(ev, bis, ev->num_bis)))
7146 for (i = 0; i < ev->num_bis; i++) {
7147 u16 handle = le16_to_cpu(ev->bis[i]);
7150 bis = hci_conn_hash_lookup_handle(hdev, handle);
7152 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7153 HCI_ROLE_SLAVE, handle);
7158 if (ev->status != 0x42)
7159 /* Mark PA sync as established */
7160 set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7162 bis->iso_qos.bcast.big = ev->handle;
7163 memset(&interval, 0, sizeof(interval));
7164 memcpy(&interval, ev->latency, sizeof(ev->latency));
7165 bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7166 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7167 bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7168 bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7171 set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7172 hci_iso_setup_path(bis);
7176 /* In case BIG sync failed, notify each failed connection to
7177 * the user after all hci connections have been added
7180 for (i = 0; i < ev->num_bis; i++) {
7181 u16 handle = le16_to_cpu(ev->bis[i]);
7183 bis = hci_conn_hash_lookup_handle(hdev, handle);
7185 set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7186 hci_connect_cfm(bis, ev->status);
7189 hci_dev_unlock(hdev);
7192 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7193 struct sk_buff *skb)
7195 struct hci_evt_le_big_info_adv_report *ev = data;
7196 int mask = hdev->link_mode;
7198 struct hci_conn *pa_sync;
7200 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7204 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7205 if (!(mask & HCI_LM_ACCEPT)) {
7206 hci_le_pa_term_sync(hdev, ev->sync_handle);
7210 if (!(flags & HCI_PROTO_DEFER))
7213 pa_sync = hci_conn_hash_lookup_pa_sync_handle
7215 le16_to_cpu(ev->sync_handle));
7220 /* Add connection to indicate the PA sync event */
7221 pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7227 pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7228 set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7230 /* Notify iso layer */
7231 hci_connect_cfm(pa_sync, 0x00);
7233 /* Notify MGMT layer */
7234 mgmt_device_connected(hdev, pa_sync, NULL, 0);
7237 hci_dev_unlock(hdev);
7240 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7243 .min_len = _min_len, \
7244 .max_len = _max_len, \
7247 #define HCI_LE_EV(_op, _func, _len) \
7248 HCI_LE_EV_VL(_op, _func, _len, _len)
7250 #define HCI_LE_EV_STATUS(_op, _func) \
7251 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7253 /* Entries in this table shall have their position according to the subevent
7254 * opcode they handle so the use of the macros above is recommend since it does
7255 * attempt to initialize at its proper index using Designated Initializers that
7256 * way events without a callback function can be ommited.
7258 static const struct hci_le_ev {
7259 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7262 } hci_le_ev_table[U8_MAX + 1] = {
7263 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7264 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7265 sizeof(struct hci_ev_le_conn_complete)),
7266 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7267 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7268 sizeof(struct hci_ev_le_advertising_report),
7269 HCI_MAX_EVENT_SIZE),
7270 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7271 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7272 hci_le_conn_update_complete_evt,
7273 sizeof(struct hci_ev_le_conn_update_complete)),
7274 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7275 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7276 hci_le_remote_feat_complete_evt,
7277 sizeof(struct hci_ev_le_remote_feat_complete)),
7278 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7279 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7280 sizeof(struct hci_ev_le_ltk_req)),
7281 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7282 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7283 hci_le_remote_conn_param_req_evt,
7284 sizeof(struct hci_ev_le_remote_conn_param_req)),
7285 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7286 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7287 hci_le_enh_conn_complete_evt,
7288 sizeof(struct hci_ev_le_enh_conn_complete)),
7289 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7290 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7291 sizeof(struct hci_ev_le_direct_adv_report),
7292 HCI_MAX_EVENT_SIZE),
7293 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7294 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7295 sizeof(struct hci_ev_le_phy_update_complete)),
7296 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7297 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7298 sizeof(struct hci_ev_le_ext_adv_report),
7299 HCI_MAX_EVENT_SIZE),
7300 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7301 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7302 hci_le_pa_sync_estabilished_evt,
7303 sizeof(struct hci_ev_le_pa_sync_established)),
7304 /* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7305 HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7306 hci_le_per_adv_report_evt,
7307 sizeof(struct hci_ev_le_per_adv_report),
7308 HCI_MAX_EVENT_SIZE),
7309 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7310 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7311 sizeof(struct hci_evt_le_ext_adv_set_term)),
7312 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7313 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7314 sizeof(struct hci_evt_le_cis_established)),
7315 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7316 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7317 sizeof(struct hci_evt_le_cis_req)),
7318 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7319 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7320 hci_le_create_big_complete_evt,
7321 sizeof(struct hci_evt_le_create_big_complete),
7322 HCI_MAX_EVENT_SIZE),
7323 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7324 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7325 hci_le_big_sync_established_evt,
7326 sizeof(struct hci_evt_le_big_sync_estabilished),
7327 HCI_MAX_EVENT_SIZE),
7328 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7329 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7330 hci_le_big_info_adv_report_evt,
7331 sizeof(struct hci_evt_le_big_info_adv_report),
7332 HCI_MAX_EVENT_SIZE),
7335 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7336 struct sk_buff *skb, u16 *opcode, u8 *status,
7337 hci_req_complete_t *req_complete,
7338 hci_req_complete_skb_t *req_complete_skb)
7340 struct hci_ev_le_meta *ev = data;
7341 const struct hci_le_ev *subev;
7343 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7345 /* Only match event if command OGF is for LE */
7346 if (hdev->req_skb &&
7347 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7348 hci_skb_event(hdev->req_skb) == ev->subevent) {
7349 *opcode = hci_skb_opcode(hdev->req_skb);
7350 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7354 subev = &hci_le_ev_table[ev->subevent];
7358 if (skb->len < subev->min_len) {
7359 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7360 ev->subevent, skb->len, subev->min_len);
7364 /* Just warn if the length is over max_len size it still be
7365 * possible to partially parse the event so leave to callback to
7366 * decide if that is acceptable.
7368 if (skb->len > subev->max_len)
7369 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7370 ev->subevent, skb->len, subev->max_len);
7371 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7375 subev->func(hdev, data, skb);
7378 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7379 u8 event, struct sk_buff *skb)
7381 struct hci_ev_cmd_complete *ev;
7382 struct hci_event_hdr *hdr;
7387 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7392 if (hdr->evt != event)
7397 /* Check if request ended in Command Status - no way to retrieve
7398 * any extra parameters in this case.
7400 if (hdr->evt == HCI_EV_CMD_STATUS)
7403 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7404 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7409 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7413 if (opcode != __le16_to_cpu(ev->opcode)) {
7414 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7415 __le16_to_cpu(ev->opcode));
7422 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7423 struct sk_buff *skb)
7425 struct hci_ev_le_advertising_info *adv;
7426 struct hci_ev_le_direct_adv_info *direct_adv;
7427 struct hci_ev_le_ext_adv_info *ext_adv;
7428 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7429 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7433 /* If we are currently suspended and this is the first BT event seen,
7434 * save the wake reason associated with the event.
7436 if (!hdev->suspended || hdev->wake_reason)
7439 /* Default to remote wake. Values for wake_reason are documented in the
7440 * Bluez mgmt api docs.
7442 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7444 /* Once configured for remote wakeup, we should only wake up for
7445 * reconnections. It's useful to see which device is waking us up so
7446 * keep track of the bdaddr of the connection event that woke us up.
7448 if (event == HCI_EV_CONN_REQUEST) {
7449 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7450 hdev->wake_addr_type = BDADDR_BREDR;
7451 } else if (event == HCI_EV_CONN_COMPLETE) {
7452 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7453 hdev->wake_addr_type = BDADDR_BREDR;
7454 } else if (event == HCI_EV_LE_META) {
7455 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7456 u8 subevent = le_ev->subevent;
7457 u8 *ptr = &skb->data[sizeof(*le_ev)];
7458 u8 num_reports = *ptr;
7460 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7461 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7462 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7464 adv = (void *)(ptr + 1);
7465 direct_adv = (void *)(ptr + 1);
7466 ext_adv = (void *)(ptr + 1);
7469 case HCI_EV_LE_ADVERTISING_REPORT:
7470 bacpy(&hdev->wake_addr, &adv->bdaddr);
7471 hdev->wake_addr_type = adv->bdaddr_type;
7473 case HCI_EV_LE_DIRECT_ADV_REPORT:
7474 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7475 hdev->wake_addr_type = direct_adv->bdaddr_type;
7477 case HCI_EV_LE_EXT_ADV_REPORT:
7478 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7479 hdev->wake_addr_type = ext_adv->bdaddr_type;
7484 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7488 hci_dev_unlock(hdev);
7491 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7495 .min_len = _min_len, \
7496 .max_len = _max_len, \
7499 #define HCI_EV(_op, _func, _len) \
7500 HCI_EV_VL(_op, _func, _len, _len)
7502 #define HCI_EV_STATUS(_op, _func) \
7503 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7505 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7508 .func_req = _func, \
7509 .min_len = _min_len, \
7510 .max_len = _max_len, \
7513 #define HCI_EV_REQ(_op, _func, _len) \
7514 HCI_EV_REQ_VL(_op, _func, _len, _len)
7516 /* Entries in this table shall have their position according to the event opcode
7517 * they handle so the use of the macros above is recommend since it does attempt
7518 * to initialize at its proper index using Designated Initializers that way
7519 * events without a callback function don't have entered.
7521 static const struct hci_ev {
7524 void (*func)(struct hci_dev *hdev, void *data,
7525 struct sk_buff *skb);
7526 void (*func_req)(struct hci_dev *hdev, void *data,
7527 struct sk_buff *skb, u16 *opcode, u8 *status,
7528 hci_req_complete_t *req_complete,
7529 hci_req_complete_skb_t *req_complete_skb);
7533 } hci_ev_table[U8_MAX + 1] = {
7534 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7535 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7536 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7537 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7538 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7539 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7540 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7541 sizeof(struct hci_ev_conn_complete)),
7542 /* [0x04 = HCI_EV_CONN_REQUEST] */
7543 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7544 sizeof(struct hci_ev_conn_request)),
7545 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7546 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7547 sizeof(struct hci_ev_disconn_complete)),
7548 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7549 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7550 sizeof(struct hci_ev_auth_complete)),
7551 /* [0x07 = HCI_EV_REMOTE_NAME] */
7552 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7553 sizeof(struct hci_ev_remote_name)),
7554 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7555 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7556 sizeof(struct hci_ev_encrypt_change)),
7557 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7558 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7559 hci_change_link_key_complete_evt,
7560 sizeof(struct hci_ev_change_link_key_complete)),
7561 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7562 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7563 sizeof(struct hci_ev_remote_features)),
7564 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7565 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7566 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7567 /* [0x0f = HCI_EV_CMD_STATUS] */
7568 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7569 sizeof(struct hci_ev_cmd_status)),
7570 /* [0x10 = HCI_EV_CMD_STATUS] */
7571 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7572 sizeof(struct hci_ev_hardware_error)),
7573 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7574 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7575 sizeof(struct hci_ev_role_change)),
7576 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7577 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7578 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7579 /* [0x14 = HCI_EV_MODE_CHANGE] */
7580 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7581 sizeof(struct hci_ev_mode_change)),
7582 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7583 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7584 sizeof(struct hci_ev_pin_code_req)),
7585 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7586 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7587 sizeof(struct hci_ev_link_key_req)),
7588 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7589 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7590 sizeof(struct hci_ev_link_key_notify)),
7591 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7592 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7593 sizeof(struct hci_ev_clock_offset)),
7594 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7595 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7596 sizeof(struct hci_ev_pkt_type_change)),
7597 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7598 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7599 sizeof(struct hci_ev_pscan_rep_mode)),
7600 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7601 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7602 hci_inquiry_result_with_rssi_evt,
7603 sizeof(struct hci_ev_inquiry_result_rssi),
7604 HCI_MAX_EVENT_SIZE),
7605 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7606 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7607 sizeof(struct hci_ev_remote_ext_features)),
7608 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7609 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7610 sizeof(struct hci_ev_sync_conn_complete)),
7611 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7612 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7613 hci_extended_inquiry_result_evt,
7614 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7615 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7616 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7617 sizeof(struct hci_ev_key_refresh_complete)),
7618 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7619 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7620 sizeof(struct hci_ev_io_capa_request)),
7621 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7622 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7623 sizeof(struct hci_ev_io_capa_reply)),
7624 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7625 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7626 sizeof(struct hci_ev_user_confirm_req)),
7627 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7628 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7629 sizeof(struct hci_ev_user_passkey_req)),
7630 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7631 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7632 sizeof(struct hci_ev_remote_oob_data_request)),
7633 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7634 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7635 sizeof(struct hci_ev_simple_pair_complete)),
7636 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7637 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7638 sizeof(struct hci_ev_user_passkey_notify)),
7639 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7640 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7641 sizeof(struct hci_ev_keypress_notify)),
7642 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7643 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7644 sizeof(struct hci_ev_remote_host_features)),
7645 /* [0x3e = HCI_EV_LE_META] */
7646 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7647 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7648 #if IS_ENABLED(CONFIG_BT_HS)
7649 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7650 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7651 sizeof(struct hci_ev_phy_link_complete)),
7652 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7653 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7654 sizeof(struct hci_ev_channel_selected)),
7655 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7656 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7657 hci_disconn_loglink_complete_evt,
7658 sizeof(struct hci_ev_disconn_logical_link_complete)),
7659 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7660 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7661 sizeof(struct hci_ev_logical_link_complete)),
7662 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7663 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7664 hci_disconn_phylink_complete_evt,
7665 sizeof(struct hci_ev_disconn_phy_link_complete)),
7667 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7668 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7669 sizeof(struct hci_ev_num_comp_blocks)),
7670 /* [0xff = HCI_EV_VENDOR] */
7671 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7674 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7675 u16 *opcode, u8 *status,
7676 hci_req_complete_t *req_complete,
7677 hci_req_complete_skb_t *req_complete_skb)
7679 const struct hci_ev *ev = &hci_ev_table[event];
7685 if (skb->len < ev->min_len) {
7686 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7687 event, skb->len, ev->min_len);
7691 /* Just warn if the length is over max_len size it still be
7692 * possible to partially parse the event so leave to callback to
7693 * decide if that is acceptable.
7695 if (skb->len > ev->max_len)
7696 bt_dev_warn_ratelimited(hdev,
7697 "unexpected event 0x%2.2x length: %u > %u",
7698 event, skb->len, ev->max_len);
7700 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7705 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7708 ev->func(hdev, data, skb);
7711 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7713 struct hci_event_hdr *hdr = (void *) skb->data;
7714 hci_req_complete_t req_complete = NULL;
7715 hci_req_complete_skb_t req_complete_skb = NULL;
7716 struct sk_buff *orig_skb = NULL;
7717 u8 status = 0, event, req_evt = 0;
7718 u16 opcode = HCI_OP_NOP;
7720 if (skb->len < sizeof(*hdr)) {
7721 bt_dev_err(hdev, "Malformed HCI Event");
7725 kfree_skb(hdev->recv_event);
7726 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7730 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7735 /* Only match event if command OGF is not for LE */
7736 if (hdev->req_skb &&
7737 hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7738 hci_skb_event(hdev->req_skb) == event) {
7739 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7740 status, &req_complete, &req_complete_skb);
7744 /* If it looks like we might end up having to call
7745 * req_complete_skb, store a pristine copy of the skb since the
7746 * various handlers may modify the original one through
7747 * skb_pull() calls, etc.
7749 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7750 event == HCI_EV_CMD_COMPLETE)
7751 orig_skb = skb_clone(skb, GFP_KERNEL);
7753 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7755 /* Store wake reason if we're suspended */
7756 hci_store_wake_reason(hdev, event, skb);
7758 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7760 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7764 req_complete(hdev, status, opcode);
7765 } else if (req_complete_skb) {
7766 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7767 kfree_skb(orig_skb);
7770 req_complete_skb(hdev, status, opcode, orig_skb);
7774 kfree_skb(orig_skb);
7776 hdev->stat.evt_rx++;