2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
42 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
43 "\x00\x00\x00\x00\x00\x00\x00\x00"
45 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47 /* Handle HCI Event packets */
49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
52 __u8 status = *((__u8 *) skb->data);
54 BT_DBG("%s status 0x%2.2x", hdev->name, status);
56 /* It is possible that we receive Inquiry Complete event right
57 * before we receive Inquiry Cancel Command Complete event, in
58 * which case the latter event should have status of Command
59 * Disallowed (0x0c). This should not be treated as error, since
60 * we actually achieve what Inquiry Cancel wants to achieve,
61 * which is to end the last Inquiry session.
63 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
64 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
73 clear_bit(HCI_INQUIRY, &hdev->flags);
74 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
75 wake_up_bit(&hdev->flags, HCI_INQUIRY);
78 /* Set discovery state to stopped if we're not doing LE active
81 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
82 hdev->le_scan_type != LE_SCAN_ACTIVE)
83 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
86 hci_conn_check_pending(hdev);
89 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
91 __u8 status = *((__u8 *) skb->data);
93 BT_DBG("%s status 0x%2.2x", hdev->name, status);
98 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
101 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
103 __u8 status = *((__u8 *) skb->data);
105 BT_DBG("%s status 0x%2.2x", hdev->name, status);
110 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
112 hci_conn_check_pending(hdev);
115 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
118 BT_DBG("%s", hdev->name);
121 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
123 struct hci_rp_role_discovery *rp = (void *) skb->data;
124 struct hci_conn *conn;
126 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
135 conn->role = rp->role;
137 hci_dev_unlock(hdev);
140 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
142 struct hci_rp_read_link_policy *rp = (void *) skb->data;
143 struct hci_conn *conn;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
154 conn->link_policy = __le16_to_cpu(rp->policy);
156 hci_dev_unlock(hdev);
159 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
161 struct hci_rp_write_link_policy *rp = (void *) skb->data;
162 struct hci_conn *conn;
165 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
170 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
176 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
178 conn->link_policy = get_unaligned_le16(sent + 2);
180 hci_dev_unlock(hdev);
183 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
186 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
188 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
193 hdev->link_policy = __le16_to_cpu(rp->policy);
196 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
199 __u8 status = *((__u8 *) skb->data);
202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
207 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
211 hdev->link_policy = get_unaligned_le16(sent);
214 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
216 __u8 status = *((__u8 *) skb->data);
218 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220 clear_bit(HCI_RESET, &hdev->flags);
225 /* Reset all non-persistent flags */
226 hci_dev_clear_volatile_flags(hdev);
228 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
230 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
231 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
233 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
234 hdev->adv_data_len = 0;
236 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
237 hdev->scan_rsp_data_len = 0;
239 hdev->le_scan_type = LE_SCAN_PASSIVE;
241 hdev->ssp_debug_mode = 0;
243 hci_bdaddr_list_clear(&hdev->le_accept_list);
244 hci_bdaddr_list_clear(&hdev->le_resolv_list);
247 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
250 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
251 struct hci_cp_read_stored_link_key *sent;
253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
255 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
259 if (!rp->status && sent->read_all == 0x01) {
260 hdev->stored_max_keys = rp->max_keys;
261 hdev->stored_num_keys = rp->num_keys;
265 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
268 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
270 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
275 if (rp->num_keys <= hdev->stored_num_keys)
276 hdev->stored_num_keys -= rp->num_keys;
278 hdev->stored_num_keys = 0;
281 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 __u8 status = *((__u8 *) skb->data);
286 BT_DBG("%s status 0x%2.2x", hdev->name, status);
288 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
294 if (hci_dev_test_flag(hdev, HCI_MGMT))
295 mgmt_set_local_name_complete(hdev, sent, status);
297 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
299 hci_dev_unlock(hdev);
302 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
304 struct hci_rp_read_local_name *rp = (void *) skb->data;
306 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
311 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
312 hci_dev_test_flag(hdev, HCI_CONFIG))
313 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
316 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
318 __u8 status = *((__u8 *) skb->data);
321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
323 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
330 __u8 param = *((__u8 *) sent);
332 if (param == AUTH_ENABLED)
333 set_bit(HCI_AUTH, &hdev->flags);
335 clear_bit(HCI_AUTH, &hdev->flags);
338 if (hci_dev_test_flag(hdev, HCI_MGMT))
339 mgmt_auth_enable_complete(hdev, status);
341 hci_dev_unlock(hdev);
344 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
346 __u8 status = *((__u8 *) skb->data);
350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
359 param = *((__u8 *) sent);
362 set_bit(HCI_ENCRYPT, &hdev->flags);
364 clear_bit(HCI_ENCRYPT, &hdev->flags);
367 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
369 __u8 status = *((__u8 *) skb->data);
373 BT_DBG("%s status 0x%2.2x", hdev->name, status);
375 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
379 param = *((__u8 *) sent);
384 hdev->discov_timeout = 0;
388 if (param & SCAN_INQUIRY)
389 set_bit(HCI_ISCAN, &hdev->flags);
391 clear_bit(HCI_ISCAN, &hdev->flags);
393 if (param & SCAN_PAGE)
394 set_bit(HCI_PSCAN, &hdev->flags);
396 clear_bit(HCI_PSCAN, &hdev->flags);
399 hci_dev_unlock(hdev);
402 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
404 __u8 status = *((__u8 *)skb->data);
405 struct hci_cp_set_event_filter *cp;
408 BT_DBG("%s status 0x%2.2x", hdev->name, status);
413 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
417 cp = (struct hci_cp_set_event_filter *)sent;
419 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
420 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
422 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
425 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
427 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
429 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
434 memcpy(hdev->dev_class, rp->dev_class, 3);
436 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
437 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
440 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
442 __u8 status = *((__u8 *) skb->data);
445 BT_DBG("%s status 0x%2.2x", hdev->name, status);
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
454 memcpy(hdev->dev_class, sent, 3);
456 if (hci_dev_test_flag(hdev, HCI_MGMT))
457 mgmt_set_class_of_dev_complete(hdev, sent, status);
459 hci_dev_unlock(hdev);
462 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
464 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
467 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
472 setting = __le16_to_cpu(rp->voice_setting);
474 if (hdev->voice_setting == setting)
477 hdev->voice_setting = setting;
479 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
488 __u8 status = *((__u8 *) skb->data);
492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
497 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
501 setting = get_unaligned_le16(sent);
503 if (hdev->voice_setting == setting)
506 hdev->voice_setting = setting;
508 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
511 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
514 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
517 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
519 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
524 hdev->num_iac = rp->num_iac;
526 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
529 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
531 __u8 status = *((__u8 *) skb->data);
532 struct hci_cp_write_ssp_mode *sent;
534 BT_DBG("%s status 0x%2.2x", hdev->name, status);
536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
544 hdev->features[1][0] |= LMP_HOST_SSP;
546 hdev->features[1][0] &= ~LMP_HOST_SSP;
549 if (hci_dev_test_flag(hdev, HCI_MGMT))
550 mgmt_ssp_enable_complete(hdev, sent->mode, status);
553 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
555 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
558 hci_dev_unlock(hdev);
561 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
563 u8 status = *((u8 *) skb->data);
564 struct hci_cp_write_sc_support *sent;
566 BT_DBG("%s status 0x%2.2x", hdev->name, status);
568 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
576 hdev->features[1][0] |= LMP_HOST_SC;
578 hdev->features[1][0] &= ~LMP_HOST_SC;
581 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
583 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
585 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
588 hci_dev_unlock(hdev);
591 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
593 struct hci_rp_read_local_version *rp = (void *) skb->data;
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
600 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
601 hci_dev_test_flag(hdev, HCI_CONFIG)) {
602 hdev->hci_ver = rp->hci_ver;
603 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
604 hdev->lmp_ver = rp->lmp_ver;
605 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
606 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
610 static void hci_cc_read_local_commands(struct hci_dev *hdev,
613 struct hci_rp_read_local_commands *rp = (void *) skb->data;
615 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
620 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
621 hci_dev_test_flag(hdev, HCI_CONFIG))
622 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
625 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
628 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
629 struct hci_conn *conn;
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
640 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
642 hci_dev_unlock(hdev);
645 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
648 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
649 struct hci_conn *conn;
652 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
657 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
663 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
665 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
667 hci_dev_unlock(hdev);
670 static void hci_cc_read_local_features(struct hci_dev *hdev,
673 struct hci_rp_read_local_features *rp = (void *) skb->data;
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
680 memcpy(hdev->features, rp->features, 8);
682 /* Adjust default settings according to features
683 * supported by device. */
685 if (hdev->features[0][0] & LMP_3SLOT)
686 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
688 if (hdev->features[0][0] & LMP_5SLOT)
689 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
691 if (hdev->features[0][1] & LMP_HV2) {
692 hdev->pkt_type |= (HCI_HV2);
693 hdev->esco_type |= (ESCO_HV2);
696 if (hdev->features[0][1] & LMP_HV3) {
697 hdev->pkt_type |= (HCI_HV3);
698 hdev->esco_type |= (ESCO_HV3);
701 if (lmp_esco_capable(hdev))
702 hdev->esco_type |= (ESCO_EV3);
704 if (hdev->features[0][4] & LMP_EV4)
705 hdev->esco_type |= (ESCO_EV4);
707 if (hdev->features[0][4] & LMP_EV5)
708 hdev->esco_type |= (ESCO_EV5);
710 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
711 hdev->esco_type |= (ESCO_2EV3);
713 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
714 hdev->esco_type |= (ESCO_3EV3);
716 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
717 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
720 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
723 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
725 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
730 if (hdev->max_page < rp->max_page)
731 hdev->max_page = rp->max_page;
733 if (rp->page < HCI_MAX_PAGES)
734 memcpy(hdev->features[rp->page], rp->features, 8);
737 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
740 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
742 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
747 hdev->flow_ctl_mode = rp->mode;
750 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
752 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
759 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
760 hdev->sco_mtu = rp->sco_mtu;
761 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
762 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
764 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
769 hdev->acl_cnt = hdev->acl_pkts;
770 hdev->sco_cnt = hdev->sco_pkts;
772 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
773 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
776 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
778 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
780 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785 if (test_bit(HCI_INIT, &hdev->flags))
786 bacpy(&hdev->bdaddr, &rp->bdaddr);
788 if (hci_dev_test_flag(hdev, HCI_SETUP))
789 bacpy(&hdev->setup_addr, &rp->bdaddr);
792 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
795 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
803 hci_dev_test_flag(hdev, HCI_CONFIG)) {
804 hdev->pairing_opts = rp->pairing_opts;
805 hdev->max_enc_key_size = rp->max_key_size;
809 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
812 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
819 if (test_bit(HCI_INIT, &hdev->flags)) {
820 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
821 hdev->page_scan_window = __le16_to_cpu(rp->window);
825 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
828 u8 status = *((u8 *) skb->data);
829 struct hci_cp_write_page_scan_activity *sent;
831 BT_DBG("%s status 0x%2.2x", hdev->name, status);
836 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
840 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
841 hdev->page_scan_window = __le16_to_cpu(sent->window);
844 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
847 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
849 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
854 if (test_bit(HCI_INIT, &hdev->flags))
855 hdev->page_scan_type = rp->type;
858 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
861 u8 status = *((u8 *) skb->data);
864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
869 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
871 hdev->page_scan_type = *type;
874 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
877 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
885 hdev->block_len = __le16_to_cpu(rp->block_len);
886 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
888 hdev->block_cnt = hdev->num_blocks;
890 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
891 hdev->block_cnt, hdev->block_len);
894 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
896 struct hci_rp_read_clock *rp = (void *) skb->data;
897 struct hci_cp_read_clock *cp;
898 struct hci_conn *conn;
900 BT_DBG("%s", hdev->name);
902 if (skb->len < sizeof(*rp))
910 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
914 if (cp->which == 0x00) {
915 hdev->clock = le32_to_cpu(rp->clock);
919 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
921 conn->clock = le32_to_cpu(rp->clock);
922 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
926 hci_dev_unlock(hdev);
929 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
932 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
934 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
939 hdev->amp_status = rp->amp_status;
940 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
941 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
942 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
943 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
944 hdev->amp_type = rp->amp_type;
945 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
946 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
947 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
948 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
951 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
954 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
956 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961 hdev->inq_tx_power = rp->tx_power;
964 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
967 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 hdev->err_data_reporting = rp->err_data_reporting;
977 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
980 __u8 status = *((__u8 *)skb->data);
981 struct hci_cp_write_def_err_data_reporting *cp;
983 BT_DBG("%s status 0x%2.2x", hdev->name, status);
988 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
992 hdev->err_data_reporting = cp->err_data_reporting;
995 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
997 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
998 struct hci_cp_pin_code_reply *cp;
999 struct hci_conn *conn;
1001 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1005 if (hci_dev_test_flag(hdev, HCI_MGMT))
1006 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1011 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1017 conn->pin_length = cp->pin_len;
1020 hci_dev_unlock(hdev);
1023 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1025 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1027 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1031 if (hci_dev_test_flag(hdev, HCI_MGMT))
1032 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1035 hci_dev_unlock(hdev);
1038 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1039 struct sk_buff *skb)
1041 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1043 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1049 hdev->le_pkts = rp->le_max_pkt;
1051 hdev->le_cnt = hdev->le_pkts;
1053 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1056 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1057 struct sk_buff *skb)
1059 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1061 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1066 memcpy(hdev->le_features, rp->features, 8);
1069 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1072 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1074 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1079 hdev->adv_tx_power = rp->tx_power;
1082 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1084 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1086 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1090 if (hci_dev_test_flag(hdev, HCI_MGMT))
1091 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1094 hci_dev_unlock(hdev);
1097 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1098 struct sk_buff *skb)
1100 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1102 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1106 if (hci_dev_test_flag(hdev, HCI_MGMT))
1107 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1108 ACL_LINK, 0, rp->status);
1110 hci_dev_unlock(hdev);
1113 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1115 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1117 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1121 if (hci_dev_test_flag(hdev, HCI_MGMT))
1122 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1125 hci_dev_unlock(hdev);
1128 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1129 struct sk_buff *skb)
1131 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1133 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1137 if (hci_dev_test_flag(hdev, HCI_MGMT))
1138 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1139 ACL_LINK, 0, rp->status);
1141 hci_dev_unlock(hdev);
1144 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1145 struct sk_buff *skb)
1147 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1149 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1152 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1153 struct sk_buff *skb)
1155 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1160 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1162 __u8 status = *((__u8 *) skb->data);
1165 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1170 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1176 bacpy(&hdev->random_addr, sent);
1178 if (!bacmp(&hdev->rpa, sent)) {
1179 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1180 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1181 secs_to_jiffies(hdev->rpa_timeout));
1184 hci_dev_unlock(hdev);
1187 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1189 __u8 status = *((__u8 *) skb->data);
1190 struct hci_cp_le_set_default_phy *cp;
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1197 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1203 hdev->le_tx_def_phys = cp->tx_phys;
1204 hdev->le_rx_def_phys = cp->rx_phys;
1206 hci_dev_unlock(hdev);
1209 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1210 struct sk_buff *skb)
1212 __u8 status = *((__u8 *) skb->data);
1213 struct hci_cp_le_set_adv_set_rand_addr *cp;
1214 struct adv_info *adv;
1219 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1220 /* Update only in case the adv instance since handle 0x00 shall be using
1221 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1222 * non-extended adverting.
1224 if (!cp || !cp->handle)
1229 adv = hci_find_adv_instance(hdev, cp->handle);
1231 bacpy(&adv->random_addr, &cp->bdaddr);
1232 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1233 adv->rpa_expired = false;
1234 queue_delayed_work(hdev->workqueue,
1235 &adv->rpa_expired_cb,
1236 secs_to_jiffies(hdev->rpa_timeout));
1240 hci_dev_unlock(hdev);
1243 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1244 struct sk_buff *skb)
1246 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1248 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1253 hdev->min_le_tx_power = rp->min_le_tx_power;
1254 hdev->max_le_tx_power = rp->max_le_tx_power;
1257 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1259 __u8 *sent, status = *((__u8 *) skb->data);
1261 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1266 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1272 /* If we're doing connection initiation as peripheral. Set a
1273 * timeout in case something goes wrong.
1276 struct hci_conn *conn;
1278 hci_dev_set_flag(hdev, HCI_LE_ADV);
1280 conn = hci_lookup_le_connect(hdev);
1282 queue_delayed_work(hdev->workqueue,
1283 &conn->le_conn_timeout,
1284 conn->conn_timeout);
1286 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1289 hci_dev_unlock(hdev);
1292 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1293 struct sk_buff *skb)
1295 struct hci_cp_le_set_ext_adv_enable *cp;
1296 struct hci_cp_ext_adv_set *set;
1297 __u8 status = *((__u8 *) skb->data);
1298 struct adv_info *adv = NULL, *n;
1300 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1309 set = (void *)cp->data;
1313 if (cp->num_of_sets)
1314 adv = hci_find_adv_instance(hdev, set->handle);
1317 struct hci_conn *conn;
1319 hci_dev_set_flag(hdev, HCI_LE_ADV);
1322 adv->enabled = true;
1324 conn = hci_lookup_le_connect(hdev);
1326 queue_delayed_work(hdev->workqueue,
1327 &conn->le_conn_timeout,
1328 conn->conn_timeout);
1330 if (cp->num_of_sets) {
1332 adv->enabled = false;
1334 /* If just one instance was disabled check if there are
1335 * any other instance enabled before clearing HCI_LE_ADV
1337 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1343 /* All instances shall be considered disabled */
1344 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1346 adv->enabled = false;
1349 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1353 hci_dev_unlock(hdev);
1356 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1358 struct hci_cp_le_set_scan_param *cp;
1359 __u8 status = *((__u8 *) skb->data);
1361 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1366 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1372 hdev->le_scan_type = cp->type;
1374 hci_dev_unlock(hdev);
1377 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1378 struct sk_buff *skb)
1380 struct hci_cp_le_set_ext_scan_params *cp;
1381 __u8 status = *((__u8 *) skb->data);
1382 struct hci_cp_le_scan_phy_params *phy_param;
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1389 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1393 phy_param = (void *)cp->data;
1397 hdev->le_scan_type = phy_param->type;
1399 hci_dev_unlock(hdev);
1402 static bool has_pending_adv_report(struct hci_dev *hdev)
1404 struct discovery_state *d = &hdev->discovery;
1406 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1409 static void clear_pending_adv_report(struct hci_dev *hdev)
1411 struct discovery_state *d = &hdev->discovery;
1413 bacpy(&d->last_adv_addr, BDADDR_ANY);
1414 d->last_adv_data_len = 0;
1417 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1418 u8 bdaddr_type, s8 rssi, u32 flags,
1421 struct discovery_state *d = &hdev->discovery;
1423 if (len > HCI_MAX_AD_LENGTH)
1426 bacpy(&d->last_adv_addr, bdaddr);
1427 d->last_adv_addr_type = bdaddr_type;
1428 d->last_adv_rssi = rssi;
1429 d->last_adv_flags = flags;
1430 memcpy(d->last_adv_data, data, len);
1431 d->last_adv_data_len = len;
1434 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1439 case LE_SCAN_ENABLE:
1440 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1441 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1442 clear_pending_adv_report(hdev);
1445 case LE_SCAN_DISABLE:
1446 /* We do this here instead of when setting DISCOVERY_STOPPED
1447 * since the latter would potentially require waiting for
1448 * inquiry to stop too.
1450 if (has_pending_adv_report(hdev)) {
1451 struct discovery_state *d = &hdev->discovery;
1453 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1454 d->last_adv_addr_type, NULL,
1455 d->last_adv_rssi, d->last_adv_flags,
1457 d->last_adv_data_len, NULL, 0);
1460 /* Cancel this timer so that we don't try to disable scanning
1461 * when it's already disabled.
1463 cancel_delayed_work(&hdev->le_scan_disable);
1465 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1467 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1468 * interrupted scanning due to a connect request. Mark
1469 * therefore discovery as stopped. If this was not
1470 * because of a connect request advertising might have
1471 * been disabled because of active scanning, so
1472 * re-enable it again if necessary.
1474 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1475 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1477 hdev->discovery.state == DISCOVERY_FINDING)
1478 hci_req_reenable_advertising(hdev);
1483 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1488 hci_dev_unlock(hdev);
1491 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1492 struct sk_buff *skb)
1494 struct hci_cp_le_set_scan_enable *cp;
1495 __u8 status = *((__u8 *) skb->data);
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1506 le_set_scan_enable_complete(hdev, cp->enable);
1509 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1510 struct sk_buff *skb)
1512 struct hci_cp_le_set_ext_scan_enable *cp;
1513 __u8 status = *((__u8 *) skb->data);
1515 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1520 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1524 le_set_scan_enable_complete(hdev, cp->enable);
1527 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1528 struct sk_buff *skb)
1530 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1532 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1538 hdev->le_num_of_adv_sets = rp->num_of_sets;
1541 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1542 struct sk_buff *skb)
1544 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1546 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1551 hdev->le_accept_list_size = rp->size;
1554 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1555 struct sk_buff *skb)
1557 __u8 status = *((__u8 *) skb->data);
1559 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1565 hci_bdaddr_list_clear(&hdev->le_accept_list);
1566 hci_dev_unlock(hdev);
1569 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1570 struct sk_buff *skb)
1572 struct hci_cp_le_add_to_accept_list *sent;
1573 __u8 status = *((__u8 *) skb->data);
1575 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1580 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1585 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1587 hci_dev_unlock(hdev);
1590 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1591 struct sk_buff *skb)
1593 struct hci_cp_le_del_from_accept_list *sent;
1594 __u8 status = *((__u8 *) skb->data);
1596 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1601 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1606 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1608 hci_dev_unlock(hdev);
1611 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1612 struct sk_buff *skb)
1614 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1616 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1621 memcpy(hdev->le_states, rp->le_states, 8);
1624 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1625 struct sk_buff *skb)
1627 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1634 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1635 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1638 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1639 struct sk_buff *skb)
1641 struct hci_cp_le_write_def_data_len *sent;
1642 __u8 status = *((__u8 *) skb->data);
1644 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1649 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1653 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1654 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1657 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1658 struct sk_buff *skb)
1660 struct hci_cp_le_add_to_resolv_list *sent;
1661 __u8 status = *((__u8 *) skb->data);
1663 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1668 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1673 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1674 sent->bdaddr_type, sent->peer_irk,
1676 hci_dev_unlock(hdev);
1679 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1680 struct sk_buff *skb)
1682 struct hci_cp_le_del_from_resolv_list *sent;
1683 __u8 status = *((__u8 *) skb->data);
1685 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1690 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1695 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1697 hci_dev_unlock(hdev);
1700 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1701 struct sk_buff *skb)
1703 __u8 status = *((__u8 *) skb->data);
1705 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1711 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1712 hci_dev_unlock(hdev);
1715 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1716 struct sk_buff *skb)
1718 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1720 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1725 hdev->le_resolv_list_size = rp->size;
1728 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1729 struct sk_buff *skb)
1731 __u8 *sent, status = *((__u8 *) skb->data);
1733 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1738 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1745 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1747 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1749 hci_dev_unlock(hdev);
1752 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1753 struct sk_buff *skb)
1755 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1757 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1762 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1763 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1764 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1765 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1768 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1769 struct sk_buff *skb)
1771 struct hci_cp_write_le_host_supported *sent;
1772 __u8 status = *((__u8 *) skb->data);
1774 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1779 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1786 hdev->features[1][0] |= LMP_HOST_LE;
1787 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1789 hdev->features[1][0] &= ~LMP_HOST_LE;
1790 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1791 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1795 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1797 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1799 hci_dev_unlock(hdev);
1802 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1804 struct hci_cp_le_set_adv_param *cp;
1805 u8 status = *((u8 *) skb->data);
1807 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1812 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1817 hdev->adv_addr_type = cp->own_address_type;
1818 hci_dev_unlock(hdev);
1821 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1823 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1824 struct hci_cp_le_set_ext_adv_params *cp;
1825 struct adv_info *adv_instance;
1827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1832 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1837 hdev->adv_addr_type = cp->own_addr_type;
1839 /* Store in hdev for instance 0 */
1840 hdev->adv_tx_power = rp->tx_power;
1842 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1844 adv_instance->tx_power = rp->tx_power;
1846 /* Update adv data as tx power is known now */
1847 hci_req_update_adv_data(hdev, cp->handle);
1849 hci_dev_unlock(hdev);
1852 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1854 struct hci_rp_read_rssi *rp = (void *) skb->data;
1855 struct hci_conn *conn;
1857 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1864 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1866 conn->rssi = rp->rssi;
1868 hci_dev_unlock(hdev);
1871 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1873 struct hci_cp_read_tx_power *sent;
1874 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1875 struct hci_conn *conn;
1877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1882 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1888 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1892 switch (sent->type) {
1894 conn->tx_power = rp->tx_power;
1897 conn->max_tx_power = rp->tx_power;
1902 hci_dev_unlock(hdev);
1905 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1907 u8 status = *((u8 *) skb->data);
1910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1915 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1917 hdev->ssp_debug_mode = *mode;
1920 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1922 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1925 hci_conn_check_pending(hdev);
1929 set_bit(HCI_INQUIRY, &hdev->flags);
1932 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1934 struct hci_cp_create_conn *cp;
1935 struct hci_conn *conn;
1937 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1939 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1947 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1950 if (conn && conn->state == BT_CONNECT) {
1951 if (status != 0x0c || conn->attempt > 2) {
1952 conn->state = BT_CLOSED;
1953 hci_connect_cfm(conn, status);
1956 conn->state = BT_CONNECT2;
1960 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1963 bt_dev_err(hdev, "no memory for new connection");
1967 hci_dev_unlock(hdev);
1970 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1972 struct hci_cp_add_sco *cp;
1973 struct hci_conn *acl, *sco;
1976 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1981 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1985 handle = __le16_to_cpu(cp->handle);
1987 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1991 acl = hci_conn_hash_lookup_handle(hdev, handle);
1995 sco->state = BT_CLOSED;
1997 hci_connect_cfm(sco, status);
2002 hci_dev_unlock(hdev);
2005 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2007 struct hci_cp_auth_requested *cp;
2008 struct hci_conn *conn;
2010 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2015 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2021 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2023 if (conn->state == BT_CONFIG) {
2024 hci_connect_cfm(conn, status);
2025 hci_conn_drop(conn);
2029 hci_dev_unlock(hdev);
2032 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2034 struct hci_cp_set_conn_encrypt *cp;
2035 struct hci_conn *conn;
2037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2042 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2048 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2050 if (conn->state == BT_CONFIG) {
2051 hci_connect_cfm(conn, status);
2052 hci_conn_drop(conn);
2056 hci_dev_unlock(hdev);
2059 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2060 struct hci_conn *conn)
2062 if (conn->state != BT_CONFIG || !conn->out)
2065 if (conn->pending_sec_level == BT_SECURITY_SDP)
2068 /* Only request authentication for SSP connections or non-SSP
2069 * devices with sec_level MEDIUM or HIGH or if MITM protection
2072 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2073 conn->pending_sec_level != BT_SECURITY_FIPS &&
2074 conn->pending_sec_level != BT_SECURITY_HIGH &&
2075 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2081 static int hci_resolve_name(struct hci_dev *hdev,
2082 struct inquiry_entry *e)
2084 struct hci_cp_remote_name_req cp;
2086 memset(&cp, 0, sizeof(cp));
2088 bacpy(&cp.bdaddr, &e->data.bdaddr);
2089 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2090 cp.pscan_mode = e->data.pscan_mode;
2091 cp.clock_offset = e->data.clock_offset;
2093 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2096 static bool hci_resolve_next_name(struct hci_dev *hdev)
2098 struct discovery_state *discov = &hdev->discovery;
2099 struct inquiry_entry *e;
2101 if (list_empty(&discov->resolve))
2104 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2108 if (hci_resolve_name(hdev, e) == 0) {
2109 e->name_state = NAME_PENDING;
2116 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2117 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2119 struct discovery_state *discov = &hdev->discovery;
2120 struct inquiry_entry *e;
2122 /* Update the mgmt connected state if necessary. Be careful with
2123 * conn objects that exist but are not (yet) connected however.
2124 * Only those in BT_CONFIG or BT_CONNECTED states can be
2125 * considered connected.
2128 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2129 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2130 mgmt_device_connected(hdev, conn, name, name_len);
2132 if (discov->state == DISCOVERY_STOPPED)
2135 if (discov->state == DISCOVERY_STOPPING)
2136 goto discov_complete;
2138 if (discov->state != DISCOVERY_RESOLVING)
2141 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2142 /* If the device was not found in a list of found devices names of which
2143 * are pending. there is no need to continue resolving a next name as it
2144 * will be done upon receiving another Remote Name Request Complete
2151 e->name_state = NAME_KNOWN;
2152 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2153 e->data.rssi, name, name_len);
2155 e->name_state = NAME_NOT_KNOWN;
2158 if (hci_resolve_next_name(hdev))
2162 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2165 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2167 struct hci_cp_remote_name_req *cp;
2168 struct hci_conn *conn;
2170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2172 /* If successful wait for the name req complete event before
2173 * checking for the need to do authentication */
2177 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2183 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2185 if (hci_dev_test_flag(hdev, HCI_MGMT))
2186 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2191 if (!hci_outgoing_auth_needed(hdev, conn))
2194 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2195 struct hci_cp_auth_requested auth_cp;
2197 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2199 auth_cp.handle = __cpu_to_le16(conn->handle);
2200 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2201 sizeof(auth_cp), &auth_cp);
2205 hci_dev_unlock(hdev);
2208 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2210 struct hci_cp_read_remote_features *cp;
2211 struct hci_conn *conn;
2213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2218 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2224 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2226 if (conn->state == BT_CONFIG) {
2227 hci_connect_cfm(conn, status);
2228 hci_conn_drop(conn);
2232 hci_dev_unlock(hdev);
2235 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2237 struct hci_cp_read_remote_ext_features *cp;
2238 struct hci_conn *conn;
2240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2245 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2251 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2253 if (conn->state == BT_CONFIG) {
2254 hci_connect_cfm(conn, status);
2255 hci_conn_drop(conn);
2259 hci_dev_unlock(hdev);
2262 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2264 struct hci_cp_setup_sync_conn *cp;
2265 struct hci_conn *acl, *sco;
2268 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2273 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2277 handle = __le16_to_cpu(cp->handle);
2279 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2283 acl = hci_conn_hash_lookup_handle(hdev, handle);
2287 sco->state = BT_CLOSED;
2289 hci_connect_cfm(sco, status);
2294 hci_dev_unlock(hdev);
2297 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2299 struct hci_cp_sniff_mode *cp;
2300 struct hci_conn *conn;
2302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2307 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2313 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2315 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2317 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2318 hci_sco_setup(conn, status);
2321 hci_dev_unlock(hdev);
2324 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2326 struct hci_cp_exit_sniff_mode *cp;
2327 struct hci_conn *conn;
2329 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2334 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2340 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2342 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2344 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2345 hci_sco_setup(conn, status);
2348 hci_dev_unlock(hdev);
2351 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2353 struct hci_cp_disconnect *cp;
2354 struct hci_conn *conn;
2359 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2367 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2368 conn->dst_type, status);
2370 if (conn->type == LE_LINK) {
2371 hdev->cur_adv_instance = conn->adv_instance;
2372 hci_req_reenable_advertising(hdev);
2375 /* If the disconnection failed for any reason, the upper layer
2376 * does not retry to disconnect in current implementation.
2377 * Hence, we need to do some basic cleanup here and re-enable
2378 * advertising if necessary.
2383 hci_dev_unlock(hdev);
2386 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2387 u8 peer_addr_type, u8 own_address_type,
2390 struct hci_conn *conn;
2392 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2397 /* When using controller based address resolution, then the new
2398 * address types 0x02 and 0x03 are used. These types need to be
2399 * converted back into either public address or random address type
2401 if (use_ll_privacy(hdev) &&
2402 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2403 switch (own_address_type) {
2404 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2405 own_address_type = ADDR_LE_DEV_PUBLIC;
2407 case ADDR_LE_DEV_RANDOM_RESOLVED:
2408 own_address_type = ADDR_LE_DEV_RANDOM;
2413 /* Store the initiator and responder address information which
2414 * is needed for SMP. These values will not change during the
2415 * lifetime of the connection.
2417 conn->init_addr_type = own_address_type;
2418 if (own_address_type == ADDR_LE_DEV_RANDOM)
2419 bacpy(&conn->init_addr, &hdev->random_addr);
2421 bacpy(&conn->init_addr, &hdev->bdaddr);
2423 conn->resp_addr_type = peer_addr_type;
2424 bacpy(&conn->resp_addr, peer_addr);
2426 /* We don't want the connection attempt to stick around
2427 * indefinitely since LE doesn't have a page timeout concept
2428 * like BR/EDR. Set a timer for any connection that doesn't use
2429 * the accept list for connecting.
2431 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2432 queue_delayed_work(conn->hdev->workqueue,
2433 &conn->le_conn_timeout,
2434 conn->conn_timeout);
2437 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2439 struct hci_cp_le_create_conn *cp;
2441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2443 /* All connection failure handling is taken care of by the
2444 * hci_le_conn_failed function which is triggered by the HCI
2445 * request completion callbacks used for connecting.
2450 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2456 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2457 cp->own_address_type, cp->filter_policy);
2459 hci_dev_unlock(hdev);
2462 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2464 struct hci_cp_le_ext_create_conn *cp;
2466 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2468 /* All connection failure handling is taken care of by the
2469 * hci_le_conn_failed function which is triggered by the HCI
2470 * request completion callbacks used for connecting.
2475 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2481 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2482 cp->own_addr_type, cp->filter_policy);
2484 hci_dev_unlock(hdev);
2487 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2489 struct hci_cp_le_read_remote_features *cp;
2490 struct hci_conn *conn;
2492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2497 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2505 if (conn->state == BT_CONFIG) {
2506 hci_connect_cfm(conn, status);
2507 hci_conn_drop(conn);
2511 hci_dev_unlock(hdev);
2514 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2516 struct hci_cp_le_start_enc *cp;
2517 struct hci_conn *conn;
2519 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2526 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2534 if (conn->state != BT_CONNECTED)
2537 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2538 hci_conn_drop(conn);
2541 hci_dev_unlock(hdev);
2544 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2546 struct hci_cp_switch_role *cp;
2547 struct hci_conn *conn;
2549 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2554 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2560 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2562 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2564 hci_dev_unlock(hdev);
2567 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2569 __u8 status = *((__u8 *) skb->data);
2570 struct discovery_state *discov = &hdev->discovery;
2571 struct inquiry_entry *e;
2573 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2575 hci_conn_check_pending(hdev);
2577 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2580 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2581 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2583 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2588 if (discov->state != DISCOVERY_FINDING)
2591 if (list_empty(&discov->resolve)) {
2592 /* When BR/EDR inquiry is active and no LE scanning is in
2593 * progress, then change discovery state to indicate completion.
2595 * When running LE scanning and BR/EDR inquiry simultaneously
2596 * and the LE scan already finished, then change the discovery
2597 * state to indicate completion.
2599 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2600 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2601 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2605 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2606 if (e && hci_resolve_name(hdev, e) == 0) {
2607 e->name_state = NAME_PENDING;
2608 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2610 /* When BR/EDR inquiry is active and no LE scanning is in
2611 * progress, then change discovery state to indicate completion.
2613 * When running LE scanning and BR/EDR inquiry simultaneously
2614 * and the LE scan already finished, then change the discovery
2615 * state to indicate completion.
2617 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2618 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2619 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2623 hci_dev_unlock(hdev);
2626 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2628 struct inquiry_data data;
2629 struct inquiry_info *info = (void *) (skb->data + 1);
2630 int num_rsp = *((__u8 *) skb->data);
2632 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2634 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2637 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2642 for (; num_rsp; num_rsp--, info++) {
2645 bacpy(&data.bdaddr, &info->bdaddr);
2646 data.pscan_rep_mode = info->pscan_rep_mode;
2647 data.pscan_period_mode = info->pscan_period_mode;
2648 data.pscan_mode = info->pscan_mode;
2649 memcpy(data.dev_class, info->dev_class, 3);
2650 data.clock_offset = info->clock_offset;
2651 data.rssi = HCI_RSSI_INVALID;
2652 data.ssp_mode = 0x00;
2654 flags = hci_inquiry_cache_update(hdev, &data, false);
2656 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2657 info->dev_class, HCI_RSSI_INVALID,
2658 flags, NULL, 0, NULL, 0);
2661 hci_dev_unlock(hdev);
2664 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2666 struct hci_ev_conn_complete *ev = (void *) skb->data;
2667 struct hci_conn *conn;
2669 BT_DBG("%s", hdev->name);
2673 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2675 /* Connection may not exist if auto-connected. Check the bredr
2676 * allowlist to see if this device is allowed to auto connect.
2677 * If link is an ACL type, create a connection class
2680 * Auto-connect will only occur if the event filter is
2681 * programmed with a given address. Right now, event filter is
2682 * only used during suspend.
2684 if (ev->link_type == ACL_LINK &&
2685 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2688 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2691 bt_dev_err(hdev, "no memory for new conn");
2695 if (ev->link_type != SCO_LINK)
2698 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2703 conn->type = SCO_LINK;
2708 conn->handle = __le16_to_cpu(ev->handle);
2710 if (conn->type == ACL_LINK) {
2711 conn->state = BT_CONFIG;
2712 hci_conn_hold(conn);
2714 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2715 !hci_find_link_key(hdev, &ev->bdaddr))
2716 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2718 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2720 conn->state = BT_CONNECTED;
2722 hci_debugfs_create_conn(conn);
2723 hci_conn_add_sysfs(conn);
2725 if (test_bit(HCI_AUTH, &hdev->flags))
2726 set_bit(HCI_CONN_AUTH, &conn->flags);
2728 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2729 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2731 /* Get remote features */
2732 if (conn->type == ACL_LINK) {
2733 struct hci_cp_read_remote_features cp;
2734 cp.handle = ev->handle;
2735 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2738 hci_req_update_scan(hdev);
2741 /* Set packet type for incoming connection */
2742 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2743 struct hci_cp_change_conn_ptype cp;
2744 cp.handle = ev->handle;
2745 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2746 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2750 conn->state = BT_CLOSED;
2751 if (conn->type == ACL_LINK)
2752 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2753 conn->dst_type, ev->status);
2756 if (conn->type == ACL_LINK)
2757 hci_sco_setup(conn, ev->status);
2760 hci_connect_cfm(conn, ev->status);
2762 } else if (ev->link_type == SCO_LINK) {
2763 switch (conn->setting & SCO_AIRMODE_MASK) {
2764 case SCO_AIRMODE_CVSD:
2766 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2770 hci_connect_cfm(conn, ev->status);
2774 hci_dev_unlock(hdev);
2776 hci_conn_check_pending(hdev);
2779 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2781 struct hci_cp_reject_conn_req cp;
2783 bacpy(&cp.bdaddr, bdaddr);
2784 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2785 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2788 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 struct hci_ev_conn_request *ev = (void *) skb->data;
2791 int mask = hdev->link_mode;
2792 struct inquiry_entry *ie;
2793 struct hci_conn *conn;
2796 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2799 /* Reject incoming connection from device with same BD ADDR against
2802 if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
2803 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
2805 hci_reject_conn(hdev, &ev->bdaddr);
2809 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2812 if (!(mask & HCI_LM_ACCEPT)) {
2813 hci_reject_conn(hdev, &ev->bdaddr);
2819 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2821 hci_reject_conn(hdev, &ev->bdaddr);
2825 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2826 * connection. These features are only touched through mgmt so
2827 * only do the checks if HCI_MGMT is set.
2829 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2830 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2831 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2833 hci_reject_conn(hdev, &ev->bdaddr);
2837 /* Connection accepted */
2839 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2841 memcpy(ie->data.dev_class, ev->dev_class, 3);
2843 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2846 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2849 bt_dev_err(hdev, "no memory for new connection");
2854 memcpy(conn->dev_class, ev->dev_class, 3);
2856 hci_dev_unlock(hdev);
2858 if (ev->link_type == ACL_LINK ||
2859 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2860 struct hci_cp_accept_conn_req cp;
2861 conn->state = BT_CONNECT;
2863 bacpy(&cp.bdaddr, &ev->bdaddr);
2865 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2866 cp.role = 0x00; /* Become central */
2868 cp.role = 0x01; /* Remain peripheral */
2870 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2871 } else if (!(flags & HCI_PROTO_DEFER)) {
2872 struct hci_cp_accept_sync_conn_req cp;
2873 conn->state = BT_CONNECT;
2875 bacpy(&cp.bdaddr, &ev->bdaddr);
2876 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2878 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2879 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2880 cp.max_latency = cpu_to_le16(0xffff);
2881 cp.content_format = cpu_to_le16(hdev->voice_setting);
2882 cp.retrans_effort = 0xff;
2884 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2887 conn->state = BT_CONNECT2;
2888 hci_connect_cfm(conn, 0);
2893 hci_dev_unlock(hdev);
2896 static u8 hci_to_mgmt_reason(u8 err)
2899 case HCI_ERROR_CONNECTION_TIMEOUT:
2900 return MGMT_DEV_DISCONN_TIMEOUT;
2901 case HCI_ERROR_REMOTE_USER_TERM:
2902 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2903 case HCI_ERROR_REMOTE_POWER_OFF:
2904 return MGMT_DEV_DISCONN_REMOTE;
2905 case HCI_ERROR_LOCAL_HOST_TERM:
2906 return MGMT_DEV_DISCONN_LOCAL_HOST;
2908 return MGMT_DEV_DISCONN_UNKNOWN;
2912 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2914 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2916 struct hci_conn_params *params;
2917 struct hci_conn *conn;
2918 bool mgmt_connected;
2920 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2924 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2929 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2930 conn->dst_type, ev->status);
2934 conn->state = BT_CLOSED;
2936 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2938 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2939 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2941 reason = hci_to_mgmt_reason(ev->reason);
2943 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2944 reason, mgmt_connected);
2946 if (conn->type == ACL_LINK) {
2947 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2948 hci_remove_link_key(hdev, &conn->dst);
2950 hci_req_update_scan(hdev);
2953 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2955 switch (params->auto_connect) {
2956 case HCI_AUTO_CONN_LINK_LOSS:
2957 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2961 case HCI_AUTO_CONN_DIRECT:
2962 case HCI_AUTO_CONN_ALWAYS:
2963 list_del_init(¶ms->action);
2964 list_add(¶ms->action, &hdev->pend_le_conns);
2965 hci_update_background_scan(hdev);
2973 hci_disconn_cfm(conn, ev->reason);
2975 /* The suspend notifier is waiting for all devices to disconnect so
2976 * clear the bit from pending tasks and inform the wait queue.
2978 if (list_empty(&hdev->conn_hash.list) &&
2979 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2980 wake_up(&hdev->suspend_wait_q);
2983 /* Re-enable advertising if necessary, since it might
2984 * have been disabled by the connection. From the
2985 * HCI_LE_Set_Advertise_Enable command description in
2986 * the core specification (v4.0):
2987 * "The Controller shall continue advertising until the Host
2988 * issues an LE_Set_Advertise_Enable command with
2989 * Advertising_Enable set to 0x00 (Advertising is disabled)
2990 * or until a connection is created or until the Advertising
2991 * is timed out due to Directed Advertising."
2993 if (conn->type == LE_LINK) {
2994 hdev->cur_adv_instance = conn->adv_instance;
2995 hci_req_reenable_advertising(hdev);
3001 hci_dev_unlock(hdev);
3004 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3006 struct hci_ev_auth_complete *ev = (void *) skb->data;
3007 struct hci_conn *conn;
3009 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3013 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3018 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3020 if (!hci_conn_ssp_enabled(conn) &&
3021 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3022 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3024 set_bit(HCI_CONN_AUTH, &conn->flags);
3025 conn->sec_level = conn->pending_sec_level;
3028 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3029 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3031 mgmt_auth_failed(conn, ev->status);
3034 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3035 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3037 if (conn->state == BT_CONFIG) {
3038 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3039 struct hci_cp_set_conn_encrypt cp;
3040 cp.handle = ev->handle;
3042 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3045 conn->state = BT_CONNECTED;
3046 hci_connect_cfm(conn, ev->status);
3047 hci_conn_drop(conn);
3050 hci_auth_cfm(conn, ev->status);
3052 hci_conn_hold(conn);
3053 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3054 hci_conn_drop(conn);
3057 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3059 struct hci_cp_set_conn_encrypt cp;
3060 cp.handle = ev->handle;
3062 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3065 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3066 hci_encrypt_cfm(conn, ev->status);
3071 hci_dev_unlock(hdev);
3074 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3076 struct hci_ev_remote_name *ev = (void *) skb->data;
3077 struct hci_conn *conn;
3079 BT_DBG("%s", hdev->name);
3081 hci_conn_check_pending(hdev);
3085 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3087 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3090 if (ev->status == 0)
3091 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3092 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3094 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3100 if (!hci_outgoing_auth_needed(hdev, conn))
3103 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3104 struct hci_cp_auth_requested cp;
3106 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3108 cp.handle = __cpu_to_le16(conn->handle);
3109 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3113 hci_dev_unlock(hdev);
3116 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3117 u16 opcode, struct sk_buff *skb)
3119 const struct hci_rp_read_enc_key_size *rp;
3120 struct hci_conn *conn;
3123 BT_DBG("%s status 0x%02x", hdev->name, status);
3125 if (!skb || skb->len < sizeof(*rp)) {
3126 bt_dev_err(hdev, "invalid read key size response");
3130 rp = (void *)skb->data;
3131 handle = le16_to_cpu(rp->handle);
3135 conn = hci_conn_hash_lookup_handle(hdev, handle);
3139 /* While unexpected, the read_enc_key_size command may fail. The most
3140 * secure approach is to then assume the key size is 0 to force a
3144 bt_dev_err(hdev, "failed to read key size for handle %u",
3146 conn->enc_key_size = 0;
3148 conn->enc_key_size = rp->key_size;
3151 hci_encrypt_cfm(conn, 0);
3154 hci_dev_unlock(hdev);
3157 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3160 struct hci_conn *conn;
3162 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3172 /* Encryption implies authentication */
3173 set_bit(HCI_CONN_AUTH, &conn->flags);
3174 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3175 conn->sec_level = conn->pending_sec_level;
3177 /* P-256 authentication key implies FIPS */
3178 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3179 set_bit(HCI_CONN_FIPS, &conn->flags);
3181 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3182 conn->type == LE_LINK)
3183 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3185 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3186 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3190 /* We should disregard the current RPA and generate a new one
3191 * whenever the encryption procedure fails.
3193 if (ev->status && conn->type == LE_LINK) {
3194 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3195 hci_adv_instances_set_rpa_expired(hdev, true);
3198 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3200 /* Check link security requirements are met */
3201 if (!hci_conn_check_link_mode(conn))
3202 ev->status = HCI_ERROR_AUTH_FAILURE;
3204 if (ev->status && conn->state == BT_CONNECTED) {
3205 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3206 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3208 /* Notify upper layers so they can cleanup before
3211 hci_encrypt_cfm(conn, ev->status);
3212 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3213 hci_conn_drop(conn);
3217 /* Try reading the encryption key size for encrypted ACL links */
3218 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3219 struct hci_cp_read_enc_key_size cp;
3220 struct hci_request req;
3222 /* Only send HCI_Read_Encryption_Key_Size if the
3223 * controller really supports it. If it doesn't, assume
3224 * the default size (16).
3226 if (!(hdev->commands[20] & 0x10)) {
3227 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3231 hci_req_init(&req, hdev);
3233 cp.handle = cpu_to_le16(conn->handle);
3234 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3236 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3237 bt_dev_err(hdev, "sending read key size failed");
3238 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3245 /* Set the default Authenticated Payload Timeout after
3246 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3247 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3248 * sent when the link is active and Encryption is enabled, the conn
3249 * type can be either LE or ACL and controller must support LMP Ping.
3250 * Ensure for AES-CCM encryption as well.
3252 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3253 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3254 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3255 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3256 struct hci_cp_write_auth_payload_to cp;
3258 cp.handle = cpu_to_le16(conn->handle);
3259 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3260 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3265 hci_encrypt_cfm(conn, ev->status);
3268 hci_dev_unlock(hdev);
3271 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3272 struct sk_buff *skb)
3274 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3275 struct hci_conn *conn;
3277 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3284 set_bit(HCI_CONN_SECURE, &conn->flags);
3286 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3288 hci_key_change_cfm(conn, ev->status);
3291 hci_dev_unlock(hdev);
3294 static void hci_remote_features_evt(struct hci_dev *hdev,
3295 struct sk_buff *skb)
3297 struct hci_ev_remote_features *ev = (void *) skb->data;
3298 struct hci_conn *conn;
3300 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3304 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3309 memcpy(conn->features[0], ev->features, 8);
3311 if (conn->state != BT_CONFIG)
3314 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3315 lmp_ext_feat_capable(conn)) {
3316 struct hci_cp_read_remote_ext_features cp;
3317 cp.handle = ev->handle;
3319 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3324 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3325 struct hci_cp_remote_name_req cp;
3326 memset(&cp, 0, sizeof(cp));
3327 bacpy(&cp.bdaddr, &conn->dst);
3328 cp.pscan_rep_mode = 0x02;
3329 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3330 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3331 mgmt_device_connected(hdev, conn, NULL, 0);
3333 if (!hci_outgoing_auth_needed(hdev, conn)) {
3334 conn->state = BT_CONNECTED;
3335 hci_connect_cfm(conn, ev->status);
3336 hci_conn_drop(conn);
3340 hci_dev_unlock(hdev);
3343 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3345 cancel_delayed_work(&hdev->cmd_timer);
3347 if (!test_bit(HCI_RESET, &hdev->flags)) {
3349 cancel_delayed_work(&hdev->ncmd_timer);
3350 atomic_set(&hdev->cmd_cnt, 1);
3352 schedule_delayed_work(&hdev->ncmd_timer,
3358 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3359 u16 *opcode, u8 *status,
3360 hci_req_complete_t *req_complete,
3361 hci_req_complete_skb_t *req_complete_skb)
3363 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3365 *opcode = __le16_to_cpu(ev->opcode);
3366 *status = skb->data[sizeof(*ev)];
3368 skb_pull(skb, sizeof(*ev));
3371 case HCI_OP_INQUIRY_CANCEL:
3372 hci_cc_inquiry_cancel(hdev, skb, status);
3375 case HCI_OP_PERIODIC_INQ:
3376 hci_cc_periodic_inq(hdev, skb);
3379 case HCI_OP_EXIT_PERIODIC_INQ:
3380 hci_cc_exit_periodic_inq(hdev, skb);
3383 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3384 hci_cc_remote_name_req_cancel(hdev, skb);
3387 case HCI_OP_ROLE_DISCOVERY:
3388 hci_cc_role_discovery(hdev, skb);
3391 case HCI_OP_READ_LINK_POLICY:
3392 hci_cc_read_link_policy(hdev, skb);
3395 case HCI_OP_WRITE_LINK_POLICY:
3396 hci_cc_write_link_policy(hdev, skb);
3399 case HCI_OP_READ_DEF_LINK_POLICY:
3400 hci_cc_read_def_link_policy(hdev, skb);
3403 case HCI_OP_WRITE_DEF_LINK_POLICY:
3404 hci_cc_write_def_link_policy(hdev, skb);
3408 hci_cc_reset(hdev, skb);
3411 case HCI_OP_READ_STORED_LINK_KEY:
3412 hci_cc_read_stored_link_key(hdev, skb);
3415 case HCI_OP_DELETE_STORED_LINK_KEY:
3416 hci_cc_delete_stored_link_key(hdev, skb);
3419 case HCI_OP_WRITE_LOCAL_NAME:
3420 hci_cc_write_local_name(hdev, skb);
3423 case HCI_OP_READ_LOCAL_NAME:
3424 hci_cc_read_local_name(hdev, skb);
3427 case HCI_OP_WRITE_AUTH_ENABLE:
3428 hci_cc_write_auth_enable(hdev, skb);
3431 case HCI_OP_WRITE_ENCRYPT_MODE:
3432 hci_cc_write_encrypt_mode(hdev, skb);
3435 case HCI_OP_WRITE_SCAN_ENABLE:
3436 hci_cc_write_scan_enable(hdev, skb);
3439 case HCI_OP_SET_EVENT_FLT:
3440 hci_cc_set_event_filter(hdev, skb);
3443 case HCI_OP_READ_CLASS_OF_DEV:
3444 hci_cc_read_class_of_dev(hdev, skb);
3447 case HCI_OP_WRITE_CLASS_OF_DEV:
3448 hci_cc_write_class_of_dev(hdev, skb);
3451 case HCI_OP_READ_VOICE_SETTING:
3452 hci_cc_read_voice_setting(hdev, skb);
3455 case HCI_OP_WRITE_VOICE_SETTING:
3456 hci_cc_write_voice_setting(hdev, skb);
3459 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3460 hci_cc_read_num_supported_iac(hdev, skb);
3463 case HCI_OP_WRITE_SSP_MODE:
3464 hci_cc_write_ssp_mode(hdev, skb);
3467 case HCI_OP_WRITE_SC_SUPPORT:
3468 hci_cc_write_sc_support(hdev, skb);
3471 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3472 hci_cc_read_auth_payload_timeout(hdev, skb);
3475 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3476 hci_cc_write_auth_payload_timeout(hdev, skb);
3479 case HCI_OP_READ_LOCAL_VERSION:
3480 hci_cc_read_local_version(hdev, skb);
3483 case HCI_OP_READ_LOCAL_COMMANDS:
3484 hci_cc_read_local_commands(hdev, skb);
3487 case HCI_OP_READ_LOCAL_FEATURES:
3488 hci_cc_read_local_features(hdev, skb);
3491 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3492 hci_cc_read_local_ext_features(hdev, skb);
3495 case HCI_OP_READ_BUFFER_SIZE:
3496 hci_cc_read_buffer_size(hdev, skb);
3499 case HCI_OP_READ_BD_ADDR:
3500 hci_cc_read_bd_addr(hdev, skb);
3503 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3504 hci_cc_read_local_pairing_opts(hdev, skb);
3507 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3508 hci_cc_read_page_scan_activity(hdev, skb);
3511 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3512 hci_cc_write_page_scan_activity(hdev, skb);
3515 case HCI_OP_READ_PAGE_SCAN_TYPE:
3516 hci_cc_read_page_scan_type(hdev, skb);
3519 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3520 hci_cc_write_page_scan_type(hdev, skb);
3523 case HCI_OP_READ_DATA_BLOCK_SIZE:
3524 hci_cc_read_data_block_size(hdev, skb);
3527 case HCI_OP_READ_FLOW_CONTROL_MODE:
3528 hci_cc_read_flow_control_mode(hdev, skb);
3531 case HCI_OP_READ_LOCAL_AMP_INFO:
3532 hci_cc_read_local_amp_info(hdev, skb);
3535 case HCI_OP_READ_CLOCK:
3536 hci_cc_read_clock(hdev, skb);
3539 case HCI_OP_READ_INQ_RSP_TX_POWER:
3540 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3543 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3544 hci_cc_read_def_err_data_reporting(hdev, skb);
3547 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3548 hci_cc_write_def_err_data_reporting(hdev, skb);
3551 case HCI_OP_PIN_CODE_REPLY:
3552 hci_cc_pin_code_reply(hdev, skb);
3555 case HCI_OP_PIN_CODE_NEG_REPLY:
3556 hci_cc_pin_code_neg_reply(hdev, skb);
3559 case HCI_OP_READ_LOCAL_OOB_DATA:
3560 hci_cc_read_local_oob_data(hdev, skb);
3563 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3564 hci_cc_read_local_oob_ext_data(hdev, skb);
3567 case HCI_OP_LE_READ_BUFFER_SIZE:
3568 hci_cc_le_read_buffer_size(hdev, skb);
3571 case HCI_OP_LE_READ_LOCAL_FEATURES:
3572 hci_cc_le_read_local_features(hdev, skb);
3575 case HCI_OP_LE_READ_ADV_TX_POWER:
3576 hci_cc_le_read_adv_tx_power(hdev, skb);
3579 case HCI_OP_USER_CONFIRM_REPLY:
3580 hci_cc_user_confirm_reply(hdev, skb);
3583 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3584 hci_cc_user_confirm_neg_reply(hdev, skb);
3587 case HCI_OP_USER_PASSKEY_REPLY:
3588 hci_cc_user_passkey_reply(hdev, skb);
3591 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3592 hci_cc_user_passkey_neg_reply(hdev, skb);
3595 case HCI_OP_LE_SET_RANDOM_ADDR:
3596 hci_cc_le_set_random_addr(hdev, skb);
3599 case HCI_OP_LE_SET_ADV_ENABLE:
3600 hci_cc_le_set_adv_enable(hdev, skb);
3603 case HCI_OP_LE_SET_SCAN_PARAM:
3604 hci_cc_le_set_scan_param(hdev, skb);
3607 case HCI_OP_LE_SET_SCAN_ENABLE:
3608 hci_cc_le_set_scan_enable(hdev, skb);
3611 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3612 hci_cc_le_read_accept_list_size(hdev, skb);
3615 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3616 hci_cc_le_clear_accept_list(hdev, skb);
3619 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3620 hci_cc_le_add_to_accept_list(hdev, skb);
3623 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3624 hci_cc_le_del_from_accept_list(hdev, skb);
3627 case HCI_OP_LE_READ_SUPPORTED_STATES:
3628 hci_cc_le_read_supported_states(hdev, skb);
3631 case HCI_OP_LE_READ_DEF_DATA_LEN:
3632 hci_cc_le_read_def_data_len(hdev, skb);
3635 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3636 hci_cc_le_write_def_data_len(hdev, skb);
3639 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3640 hci_cc_le_add_to_resolv_list(hdev, skb);
3643 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3644 hci_cc_le_del_from_resolv_list(hdev, skb);
3647 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3648 hci_cc_le_clear_resolv_list(hdev, skb);
3651 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3652 hci_cc_le_read_resolv_list_size(hdev, skb);
3655 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3656 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3659 case HCI_OP_LE_READ_MAX_DATA_LEN:
3660 hci_cc_le_read_max_data_len(hdev, skb);
3663 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3664 hci_cc_write_le_host_supported(hdev, skb);
3667 case HCI_OP_LE_SET_ADV_PARAM:
3668 hci_cc_set_adv_param(hdev, skb);
3671 case HCI_OP_READ_RSSI:
3672 hci_cc_read_rssi(hdev, skb);
3675 case HCI_OP_READ_TX_POWER:
3676 hci_cc_read_tx_power(hdev, skb);
3679 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3680 hci_cc_write_ssp_debug_mode(hdev, skb);
3683 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3684 hci_cc_le_set_ext_scan_param(hdev, skb);
3687 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3688 hci_cc_le_set_ext_scan_enable(hdev, skb);
3691 case HCI_OP_LE_SET_DEFAULT_PHY:
3692 hci_cc_le_set_default_phy(hdev, skb);
3695 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3696 hci_cc_le_read_num_adv_sets(hdev, skb);
3699 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3700 hci_cc_set_ext_adv_param(hdev, skb);
3703 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3704 hci_cc_le_set_ext_adv_enable(hdev, skb);
3707 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3708 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3711 case HCI_OP_LE_READ_TRANSMIT_POWER:
3712 hci_cc_le_read_transmit_power(hdev, skb);
3716 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3720 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3722 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3725 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3727 "unexpected event for opcode 0x%4.4x", *opcode);
3731 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3732 queue_work(hdev->workqueue, &hdev->cmd_work);
3735 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3736 u16 *opcode, u8 *status,
3737 hci_req_complete_t *req_complete,
3738 hci_req_complete_skb_t *req_complete_skb)
3740 struct hci_ev_cmd_status *ev = (void *) skb->data;
3742 skb_pull(skb, sizeof(*ev));
3744 *opcode = __le16_to_cpu(ev->opcode);
3745 *status = ev->status;
3748 case HCI_OP_INQUIRY:
3749 hci_cs_inquiry(hdev, ev->status);
3752 case HCI_OP_CREATE_CONN:
3753 hci_cs_create_conn(hdev, ev->status);
3756 case HCI_OP_DISCONNECT:
3757 hci_cs_disconnect(hdev, ev->status);
3760 case HCI_OP_ADD_SCO:
3761 hci_cs_add_sco(hdev, ev->status);
3764 case HCI_OP_AUTH_REQUESTED:
3765 hci_cs_auth_requested(hdev, ev->status);
3768 case HCI_OP_SET_CONN_ENCRYPT:
3769 hci_cs_set_conn_encrypt(hdev, ev->status);
3772 case HCI_OP_REMOTE_NAME_REQ:
3773 hci_cs_remote_name_req(hdev, ev->status);
3776 case HCI_OP_READ_REMOTE_FEATURES:
3777 hci_cs_read_remote_features(hdev, ev->status);
3780 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3781 hci_cs_read_remote_ext_features(hdev, ev->status);
3784 case HCI_OP_SETUP_SYNC_CONN:
3785 hci_cs_setup_sync_conn(hdev, ev->status);
3788 case HCI_OP_SNIFF_MODE:
3789 hci_cs_sniff_mode(hdev, ev->status);
3792 case HCI_OP_EXIT_SNIFF_MODE:
3793 hci_cs_exit_sniff_mode(hdev, ev->status);
3796 case HCI_OP_SWITCH_ROLE:
3797 hci_cs_switch_role(hdev, ev->status);
3800 case HCI_OP_LE_CREATE_CONN:
3801 hci_cs_le_create_conn(hdev, ev->status);
3804 case HCI_OP_LE_READ_REMOTE_FEATURES:
3805 hci_cs_le_read_remote_features(hdev, ev->status);
3808 case HCI_OP_LE_START_ENC:
3809 hci_cs_le_start_enc(hdev, ev->status);
3812 case HCI_OP_LE_EXT_CREATE_CONN:
3813 hci_cs_le_ext_create_conn(hdev, ev->status);
3817 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3821 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3823 /* Indicate request completion if the command failed. Also, if
3824 * we're not waiting for a special event and we get a success
3825 * command status we should try to flag the request as completed
3826 * (since for this kind of commands there will not be a command
3830 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3831 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3834 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3836 "unexpected event for opcode 0x%4.4x", *opcode);
3840 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3841 queue_work(hdev->workqueue, &hdev->cmd_work);
3844 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3846 struct hci_ev_hardware_error *ev = (void *) skb->data;
3848 hdev->hw_error_code = ev->code;
3850 queue_work(hdev->req_workqueue, &hdev->error_reset);
3853 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3855 struct hci_ev_role_change *ev = (void *) skb->data;
3856 struct hci_conn *conn;
3858 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3865 conn->role = ev->role;
3867 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3869 hci_role_switch_cfm(conn, ev->status, ev->role);
3872 hci_dev_unlock(hdev);
3875 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3877 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3880 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3881 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3885 if (skb->len < sizeof(*ev) ||
3886 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3887 BT_DBG("%s bad parameters", hdev->name);
3891 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3893 for (i = 0; i < ev->num_hndl; i++) {
3894 struct hci_comp_pkts_info *info = &ev->handles[i];
3895 struct hci_conn *conn;
3896 __u16 handle, count;
3898 handle = __le16_to_cpu(info->handle);
3899 count = __le16_to_cpu(info->count);
3901 conn = hci_conn_hash_lookup_handle(hdev, handle);
3905 conn->sent -= count;
3907 switch (conn->type) {
3909 hdev->acl_cnt += count;
3910 if (hdev->acl_cnt > hdev->acl_pkts)
3911 hdev->acl_cnt = hdev->acl_pkts;
3915 if (hdev->le_pkts) {
3916 hdev->le_cnt += count;
3917 if (hdev->le_cnt > hdev->le_pkts)
3918 hdev->le_cnt = hdev->le_pkts;
3920 hdev->acl_cnt += count;
3921 if (hdev->acl_cnt > hdev->acl_pkts)
3922 hdev->acl_cnt = hdev->acl_pkts;
3927 hdev->sco_cnt += count;
3928 if (hdev->sco_cnt > hdev->sco_pkts)
3929 hdev->sco_cnt = hdev->sco_pkts;
3933 bt_dev_err(hdev, "unknown type %d conn %p",
3939 queue_work(hdev->workqueue, &hdev->tx_work);
3942 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3945 struct hci_chan *chan;
3947 switch (hdev->dev_type) {
3949 return hci_conn_hash_lookup_handle(hdev, handle);
3951 chan = hci_chan_lookup_handle(hdev, handle);
3956 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3963 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3965 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3968 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3969 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3973 if (skb->len < sizeof(*ev) ||
3974 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3975 BT_DBG("%s bad parameters", hdev->name);
3979 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3982 for (i = 0; i < ev->num_hndl; i++) {
3983 struct hci_comp_blocks_info *info = &ev->handles[i];
3984 struct hci_conn *conn = NULL;
3985 __u16 handle, block_count;
3987 handle = __le16_to_cpu(info->handle);
3988 block_count = __le16_to_cpu(info->blocks);
3990 conn = __hci_conn_lookup_handle(hdev, handle);
3994 conn->sent -= block_count;
3996 switch (conn->type) {
3999 hdev->block_cnt += block_count;
4000 if (hdev->block_cnt > hdev->num_blocks)
4001 hdev->block_cnt = hdev->num_blocks;
4005 bt_dev_err(hdev, "unknown type %d conn %p",
4011 queue_work(hdev->workqueue, &hdev->tx_work);
4014 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4016 struct hci_ev_mode_change *ev = (void *) skb->data;
4017 struct hci_conn *conn;
4019 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4023 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4025 conn->mode = ev->mode;
4027 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4029 if (conn->mode == HCI_CM_ACTIVE)
4030 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4032 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4035 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4036 hci_sco_setup(conn, ev->status);
4039 hci_dev_unlock(hdev);
4042 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4044 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4045 struct hci_conn *conn;
4047 BT_DBG("%s", hdev->name);
4051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4055 if (conn->state == BT_CONNECTED) {
4056 hci_conn_hold(conn);
4057 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4058 hci_conn_drop(conn);
4061 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4062 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4063 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4064 sizeof(ev->bdaddr), &ev->bdaddr);
4065 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4068 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4073 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4077 hci_dev_unlock(hdev);
4080 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4082 if (key_type == HCI_LK_CHANGED_COMBINATION)
4085 conn->pin_length = pin_len;
4086 conn->key_type = key_type;
4089 case HCI_LK_LOCAL_UNIT:
4090 case HCI_LK_REMOTE_UNIT:
4091 case HCI_LK_DEBUG_COMBINATION:
4093 case HCI_LK_COMBINATION:
4095 conn->pending_sec_level = BT_SECURITY_HIGH;
4097 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4099 case HCI_LK_UNAUTH_COMBINATION_P192:
4100 case HCI_LK_UNAUTH_COMBINATION_P256:
4101 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4103 case HCI_LK_AUTH_COMBINATION_P192:
4104 conn->pending_sec_level = BT_SECURITY_HIGH;
4106 case HCI_LK_AUTH_COMBINATION_P256:
4107 conn->pending_sec_level = BT_SECURITY_FIPS;
4112 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4114 struct hci_ev_link_key_req *ev = (void *) skb->data;
4115 struct hci_cp_link_key_reply cp;
4116 struct hci_conn *conn;
4117 struct link_key *key;
4119 BT_DBG("%s", hdev->name);
4121 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4126 key = hci_find_link_key(hdev, &ev->bdaddr);
4128 BT_DBG("%s link key not found for %pMR", hdev->name,
4133 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4136 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4138 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4140 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4141 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4142 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4143 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4147 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4148 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4149 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4150 BT_DBG("%s ignoring key unauthenticated for high security",
4155 conn_set_key(conn, key->type, key->pin_len);
4158 bacpy(&cp.bdaddr, &ev->bdaddr);
4159 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4161 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4163 hci_dev_unlock(hdev);
4168 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4169 hci_dev_unlock(hdev);
4172 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4174 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4175 struct hci_conn *conn;
4176 struct link_key *key;
4180 BT_DBG("%s", hdev->name);
4184 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4188 /* Ignore NULL link key against CVE-2020-26555 */
4189 if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4190 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4192 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4193 hci_conn_drop(conn);
4197 hci_conn_hold(conn);
4198 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4199 hci_conn_drop(conn);
4201 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4202 conn_set_key(conn, ev->key_type, conn->pin_length);
4204 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4207 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4208 ev->key_type, pin_len, &persistent);
4212 /* Update connection information since adding the key will have
4213 * fixed up the type in the case of changed combination keys.
4215 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4216 conn_set_key(conn, key->type, key->pin_len);
4218 mgmt_new_link_key(hdev, key, persistent);
4220 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4221 * is set. If it's not set simply remove the key from the kernel
4222 * list (we've still notified user space about it but with
4223 * store_hint being 0).
4225 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4226 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4227 list_del_rcu(&key->list);
4228 kfree_rcu(key, rcu);
4233 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4235 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4238 hci_dev_unlock(hdev);
4241 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4243 struct hci_ev_clock_offset *ev = (void *) skb->data;
4244 struct hci_conn *conn;
4246 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4251 if (conn && !ev->status) {
4252 struct inquiry_entry *ie;
4254 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4256 ie->data.clock_offset = ev->clock_offset;
4257 ie->timestamp = jiffies;
4261 hci_dev_unlock(hdev);
4264 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4266 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4267 struct hci_conn *conn;
4269 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4273 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4274 if (conn && !ev->status)
4275 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4277 hci_dev_unlock(hdev);
4280 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4282 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4283 struct inquiry_entry *ie;
4285 BT_DBG("%s", hdev->name);
4289 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4291 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4292 ie->timestamp = jiffies;
4295 hci_dev_unlock(hdev);
4298 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4299 struct sk_buff *skb)
4301 struct inquiry_data data;
4302 int num_rsp = *((__u8 *) skb->data);
4304 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4309 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4314 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4315 struct inquiry_info_with_rssi_and_pscan_mode *info;
4316 info = (void *) (skb->data + 1);
4318 if (skb->len < num_rsp * sizeof(*info) + 1)
4321 for (; num_rsp; num_rsp--, info++) {
4324 bacpy(&data.bdaddr, &info->bdaddr);
4325 data.pscan_rep_mode = info->pscan_rep_mode;
4326 data.pscan_period_mode = info->pscan_period_mode;
4327 data.pscan_mode = info->pscan_mode;
4328 memcpy(data.dev_class, info->dev_class, 3);
4329 data.clock_offset = info->clock_offset;
4330 data.rssi = info->rssi;
4331 data.ssp_mode = 0x00;
4333 flags = hci_inquiry_cache_update(hdev, &data, false);
4335 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4336 info->dev_class, info->rssi,
4337 flags, NULL, 0, NULL, 0);
4340 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4342 if (skb->len < num_rsp * sizeof(*info) + 1)
4345 for (; num_rsp; num_rsp--, info++) {
4348 bacpy(&data.bdaddr, &info->bdaddr);
4349 data.pscan_rep_mode = info->pscan_rep_mode;
4350 data.pscan_period_mode = info->pscan_period_mode;
4351 data.pscan_mode = 0x00;
4352 memcpy(data.dev_class, info->dev_class, 3);
4353 data.clock_offset = info->clock_offset;
4354 data.rssi = info->rssi;
4355 data.ssp_mode = 0x00;
4357 flags = hci_inquiry_cache_update(hdev, &data, false);
4359 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4360 info->dev_class, info->rssi,
4361 flags, NULL, 0, NULL, 0);
4366 hci_dev_unlock(hdev);
4369 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4370 struct sk_buff *skb)
4372 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4373 struct hci_conn *conn;
4375 BT_DBG("%s", hdev->name);
4379 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4383 if (ev->page < HCI_MAX_PAGES)
4384 memcpy(conn->features[ev->page], ev->features, 8);
4386 if (!ev->status && ev->page == 0x01) {
4387 struct inquiry_entry *ie;
4389 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4391 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4393 if (ev->features[0] & LMP_HOST_SSP) {
4394 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4396 /* It is mandatory by the Bluetooth specification that
4397 * Extended Inquiry Results are only used when Secure
4398 * Simple Pairing is enabled, but some devices violate
4401 * To make these devices work, the internal SSP
4402 * enabled flag needs to be cleared if the remote host
4403 * features do not indicate SSP support */
4404 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4407 if (ev->features[0] & LMP_HOST_SC)
4408 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4411 if (conn->state != BT_CONFIG)
4414 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4415 struct hci_cp_remote_name_req cp;
4416 memset(&cp, 0, sizeof(cp));
4417 bacpy(&cp.bdaddr, &conn->dst);
4418 cp.pscan_rep_mode = 0x02;
4419 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4420 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4421 mgmt_device_connected(hdev, conn, NULL, 0);
4423 if (!hci_outgoing_auth_needed(hdev, conn)) {
4424 conn->state = BT_CONNECTED;
4425 hci_connect_cfm(conn, ev->status);
4426 hci_conn_drop(conn);
4430 hci_dev_unlock(hdev);
4433 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4434 struct sk_buff *skb)
4436 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4437 struct hci_conn *conn;
4439 switch (ev->link_type) {
4444 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4445 * for HCI_Synchronous_Connection_Complete is limited to
4446 * either SCO or eSCO
4448 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4452 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4456 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4458 if (ev->link_type == ESCO_LINK)
4461 /* When the link type in the event indicates SCO connection
4462 * and lookup of the connection object fails, then check
4463 * if an eSCO connection object exists.
4465 * The core limits the synchronous connections to either
4466 * SCO or eSCO. The eSCO connection is preferred and tried
4467 * to be setup first and until successfully established,
4468 * the link type will be hinted as eSCO.
4470 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4475 switch (ev->status) {
4477 /* The synchronous connection complete event should only be
4478 * sent once per new connection. Receiving a successful
4479 * complete event when the connection status is already
4480 * BT_CONNECTED means that the device is misbehaving and sent
4481 * multiple complete event packets for the same new connection.
4483 * Registering the device more than once can corrupt kernel
4484 * memory, hence upon detecting this invalid event, we report
4485 * an error and ignore the packet.
4487 if (conn->state == BT_CONNECTED) {
4488 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4492 conn->handle = __le16_to_cpu(ev->handle);
4493 conn->state = BT_CONNECTED;
4494 conn->type = ev->link_type;
4496 hci_debugfs_create_conn(conn);
4497 hci_conn_add_sysfs(conn);
4500 case 0x10: /* Connection Accept Timeout */
4501 case 0x0d: /* Connection Rejected due to Limited Resources */
4502 case 0x11: /* Unsupported Feature or Parameter Value */
4503 case 0x1c: /* SCO interval rejected */
4504 case 0x1a: /* Unsupported Remote Feature */
4505 case 0x1e: /* Invalid LMP Parameters */
4506 case 0x1f: /* Unspecified error */
4507 case 0x20: /* Unsupported LMP Parameter value */
4509 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4510 (hdev->esco_type & EDR_ESCO_MASK);
4511 if (hci_setup_sync(conn, conn->link->handle))
4517 conn->state = BT_CLOSED;
4521 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4523 switch (ev->air_mode) {
4526 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4530 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4534 hci_connect_cfm(conn, ev->status);
4539 hci_dev_unlock(hdev);
4542 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4546 while (parsed < eir_len) {
4547 u8 field_len = eir[0];
4552 parsed += field_len + 1;
4553 eir += field_len + 1;
4559 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4560 struct sk_buff *skb)
4562 struct inquiry_data data;
4563 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4564 int num_rsp = *((__u8 *) skb->data);
4567 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4569 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4572 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4577 for (; num_rsp; num_rsp--, info++) {
4581 bacpy(&data.bdaddr, &info->bdaddr);
4582 data.pscan_rep_mode = info->pscan_rep_mode;
4583 data.pscan_period_mode = info->pscan_period_mode;
4584 data.pscan_mode = 0x00;
4585 memcpy(data.dev_class, info->dev_class, 3);
4586 data.clock_offset = info->clock_offset;
4587 data.rssi = info->rssi;
4588 data.ssp_mode = 0x01;
4590 if (hci_dev_test_flag(hdev, HCI_MGMT))
4591 name_known = eir_get_data(info->data,
4593 EIR_NAME_COMPLETE, NULL);
4597 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4599 eir_len = eir_get_length(info->data, sizeof(info->data));
4601 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4602 info->dev_class, info->rssi,
4603 flags, info->data, eir_len, NULL, 0);
4606 hci_dev_unlock(hdev);
4609 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4610 struct sk_buff *skb)
4612 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4613 struct hci_conn *conn;
4615 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4616 __le16_to_cpu(ev->handle));
4620 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4624 /* For BR/EDR the necessary steps are taken through the
4625 * auth_complete event.
4627 if (conn->type != LE_LINK)
4631 conn->sec_level = conn->pending_sec_level;
4633 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4635 if (ev->status && conn->state == BT_CONNECTED) {
4636 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4637 hci_conn_drop(conn);
4641 if (conn->state == BT_CONFIG) {
4643 conn->state = BT_CONNECTED;
4645 hci_connect_cfm(conn, ev->status);
4646 hci_conn_drop(conn);
4648 hci_auth_cfm(conn, ev->status);
4650 hci_conn_hold(conn);
4651 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4652 hci_conn_drop(conn);
4656 hci_dev_unlock(hdev);
4659 static u8 hci_get_auth_req(struct hci_conn *conn)
4661 /* If remote requests no-bonding follow that lead */
4662 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4663 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4664 return conn->remote_auth | (conn->auth_type & 0x01);
4666 /* If both remote and local have enough IO capabilities, require
4669 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4670 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4671 return conn->remote_auth | 0x01;
4673 /* No MITM protection possible so ignore remote requirement */
4674 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4677 static u8 bredr_oob_data_present(struct hci_conn *conn)
4679 struct hci_dev *hdev = conn->hdev;
4680 struct oob_data *data;
4682 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4686 if (bredr_sc_enabled(hdev)) {
4687 /* When Secure Connections is enabled, then just
4688 * return the present value stored with the OOB
4689 * data. The stored value contains the right present
4690 * information. However it can only be trusted when
4691 * not in Secure Connection Only mode.
4693 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4694 return data->present;
4696 /* When Secure Connections Only mode is enabled, then
4697 * the P-256 values are required. If they are not
4698 * available, then do not declare that OOB data is
4701 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
4702 !crypto_memneq(data->hash256, ZERO_KEY, 16))
4708 /* When Secure Connections is not enabled or actually
4709 * not supported by the hardware, then check that if
4710 * P-192 data values are present.
4712 if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
4713 !crypto_memneq(data->hash192, ZERO_KEY, 16))
4719 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4721 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4722 struct hci_conn *conn;
4724 BT_DBG("%s", hdev->name);
4728 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4729 if (!conn || !hci_conn_ssp_enabled(conn))
4732 hci_conn_hold(conn);
4734 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4737 /* Allow pairing if we're pairable, the initiators of the
4738 * pairing or if the remote is not requesting bonding.
4740 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4741 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4742 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4743 struct hci_cp_io_capability_reply cp;
4745 bacpy(&cp.bdaddr, &ev->bdaddr);
4746 /* Change the IO capability from KeyboardDisplay
4747 * to DisplayYesNo as it is not supported by BT spec. */
4748 cp.capability = (conn->io_capability == 0x04) ?
4749 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4751 /* If we are initiators, there is no remote information yet */
4752 if (conn->remote_auth == 0xff) {
4753 /* Request MITM protection if our IO caps allow it
4754 * except for the no-bonding case.
4756 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4757 conn->auth_type != HCI_AT_NO_BONDING)
4758 conn->auth_type |= 0x01;
4760 conn->auth_type = hci_get_auth_req(conn);
4763 /* If we're not bondable, force one of the non-bondable
4764 * authentication requirement values.
4766 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4767 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4769 cp.authentication = conn->auth_type;
4770 cp.oob_data = bredr_oob_data_present(conn);
4772 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4775 struct hci_cp_io_capability_neg_reply cp;
4777 bacpy(&cp.bdaddr, &ev->bdaddr);
4778 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4780 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4785 hci_dev_unlock(hdev);
4788 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4790 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4791 struct hci_conn *conn;
4793 BT_DBG("%s", hdev->name);
4797 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4801 conn->remote_cap = ev->capability;
4802 conn->remote_auth = ev->authentication;
4805 hci_dev_unlock(hdev);
4808 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4809 struct sk_buff *skb)
4811 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4812 int loc_mitm, rem_mitm, confirm_hint = 0;
4813 struct hci_conn *conn;
4815 BT_DBG("%s", hdev->name);
4819 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4822 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4826 loc_mitm = (conn->auth_type & 0x01);
4827 rem_mitm = (conn->remote_auth & 0x01);
4829 /* If we require MITM but the remote device can't provide that
4830 * (it has NoInputNoOutput) then reject the confirmation
4831 * request. We check the security level here since it doesn't
4832 * necessarily match conn->auth_type.
4834 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4835 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4836 BT_DBG("Rejecting request: remote device can't provide MITM");
4837 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4838 sizeof(ev->bdaddr), &ev->bdaddr);
4842 /* If no side requires MITM protection; auto-accept */
4843 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4844 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4846 /* If we're not the initiators request authorization to
4847 * proceed from user space (mgmt_user_confirm with
4848 * confirm_hint set to 1). The exception is if neither
4849 * side had MITM or if the local IO capability is
4850 * NoInputNoOutput, in which case we do auto-accept
4852 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4853 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4854 (loc_mitm || rem_mitm)) {
4855 BT_DBG("Confirming auto-accept as acceptor");
4860 /* If there already exists link key in local host, leave the
4861 * decision to user space since the remote device could be
4862 * legitimate or malicious.
4864 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4865 bt_dev_dbg(hdev, "Local host already has link key");
4870 BT_DBG("Auto-accept of user confirmation with %ums delay",
4871 hdev->auto_accept_delay);
4873 if (hdev->auto_accept_delay > 0) {
4874 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4875 queue_delayed_work(conn->hdev->workqueue,
4876 &conn->auto_accept_work, delay);
4880 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4881 sizeof(ev->bdaddr), &ev->bdaddr);
4886 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4887 le32_to_cpu(ev->passkey), confirm_hint);
4890 hci_dev_unlock(hdev);
4893 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4894 struct sk_buff *skb)
4896 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4898 BT_DBG("%s", hdev->name);
4900 if (hci_dev_test_flag(hdev, HCI_MGMT))
4901 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4904 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4905 struct sk_buff *skb)
4907 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4908 struct hci_conn *conn;
4910 BT_DBG("%s", hdev->name);
4912 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4916 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4917 conn->passkey_entered = 0;
4919 if (hci_dev_test_flag(hdev, HCI_MGMT))
4920 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4921 conn->dst_type, conn->passkey_notify,
4922 conn->passkey_entered);
4925 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4927 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4928 struct hci_conn *conn;
4930 BT_DBG("%s", hdev->name);
4932 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4937 case HCI_KEYPRESS_STARTED:
4938 conn->passkey_entered = 0;
4941 case HCI_KEYPRESS_ENTERED:
4942 conn->passkey_entered++;
4945 case HCI_KEYPRESS_ERASED:
4946 conn->passkey_entered--;
4949 case HCI_KEYPRESS_CLEARED:
4950 conn->passkey_entered = 0;
4953 case HCI_KEYPRESS_COMPLETED:
4957 if (hci_dev_test_flag(hdev, HCI_MGMT))
4958 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4959 conn->dst_type, conn->passkey_notify,
4960 conn->passkey_entered);
4963 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4964 struct sk_buff *skb)
4966 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4967 struct hci_conn *conn;
4969 BT_DBG("%s", hdev->name);
4973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4974 if (!conn || !hci_conn_ssp_enabled(conn))
4977 /* Reset the authentication requirement to unknown */
4978 conn->remote_auth = 0xff;
4980 /* To avoid duplicate auth_failed events to user space we check
4981 * the HCI_CONN_AUTH_PEND flag which will be set if we
4982 * initiated the authentication. A traditional auth_complete
4983 * event gets always produced as initiator and is also mapped to
4984 * the mgmt_auth_failed event */
4985 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4986 mgmt_auth_failed(conn, ev->status);
4988 hci_conn_drop(conn);
4991 hci_dev_unlock(hdev);
4994 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4995 struct sk_buff *skb)
4997 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4998 struct inquiry_entry *ie;
4999 struct hci_conn *conn;
5001 BT_DBG("%s", hdev->name);
5005 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5007 memcpy(conn->features[1], ev->features, 8);
5009 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5011 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5013 hci_dev_unlock(hdev);
5016 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5017 struct sk_buff *skb)
5019 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5020 struct oob_data *data;
5022 BT_DBG("%s", hdev->name);
5026 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5029 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5031 struct hci_cp_remote_oob_data_neg_reply cp;
5033 bacpy(&cp.bdaddr, &ev->bdaddr);
5034 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5039 if (bredr_sc_enabled(hdev)) {
5040 struct hci_cp_remote_oob_ext_data_reply cp;
5042 bacpy(&cp.bdaddr, &ev->bdaddr);
5043 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5044 memset(cp.hash192, 0, sizeof(cp.hash192));
5045 memset(cp.rand192, 0, sizeof(cp.rand192));
5047 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5048 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5050 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5051 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5053 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5056 struct hci_cp_remote_oob_data_reply cp;
5058 bacpy(&cp.bdaddr, &ev->bdaddr);
5059 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5060 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5062 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5067 hci_dev_unlock(hdev);
5070 #if IS_ENABLED(CONFIG_BT_HS)
5071 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5073 struct hci_ev_channel_selected *ev = (void *)skb->data;
5074 struct hci_conn *hcon;
5076 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5078 skb_pull(skb, sizeof(*ev));
5080 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5084 amp_read_loc_assoc_final_data(hdev, hcon);
5087 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5088 struct sk_buff *skb)
5090 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5091 struct hci_conn *hcon, *bredr_hcon;
5093 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5098 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5110 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5112 hcon->state = BT_CONNECTED;
5113 bacpy(&hcon->dst, &bredr_hcon->dst);
5115 hci_conn_hold(hcon);
5116 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5117 hci_conn_drop(hcon);
5119 hci_debugfs_create_conn(hcon);
5120 hci_conn_add_sysfs(hcon);
5122 amp_physical_cfm(bredr_hcon, hcon);
5125 hci_dev_unlock(hdev);
5128 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5130 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5131 struct hci_conn *hcon;
5132 struct hci_chan *hchan;
5133 struct amp_mgr *mgr;
5135 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5136 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5139 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5143 /* Create AMP hchan */
5144 hchan = hci_chan_create(hcon);
5148 hchan->handle = le16_to_cpu(ev->handle);
5151 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5153 mgr = hcon->amp_mgr;
5154 if (mgr && mgr->bredr_chan) {
5155 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5157 l2cap_chan_lock(bredr_chan);
5159 bredr_chan->conn->mtu = hdev->block_mtu;
5160 l2cap_logical_cfm(bredr_chan, hchan, 0);
5161 hci_conn_hold(hcon);
5163 l2cap_chan_unlock(bredr_chan);
5167 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5168 struct sk_buff *skb)
5170 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5171 struct hci_chan *hchan;
5173 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5174 le16_to_cpu(ev->handle), ev->status);
5181 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5182 if (!hchan || !hchan->amp)
5185 amp_destroy_logical_link(hchan, ev->reason);
5188 hci_dev_unlock(hdev);
5191 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5192 struct sk_buff *skb)
5194 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5195 struct hci_conn *hcon;
5197 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5204 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5205 if (hcon && hcon->type == AMP_LINK) {
5206 hcon->state = BT_CLOSED;
5207 hci_disconn_cfm(hcon, ev->reason);
5211 hci_dev_unlock(hdev);
5215 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5216 u8 bdaddr_type, bdaddr_t *local_rpa)
5219 conn->dst_type = bdaddr_type;
5220 conn->resp_addr_type = bdaddr_type;
5221 bacpy(&conn->resp_addr, bdaddr);
5223 /* Check if the controller has set a Local RPA then it must be
5224 * used instead or hdev->rpa.
5226 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5227 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5228 bacpy(&conn->init_addr, local_rpa);
5229 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5230 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5231 bacpy(&conn->init_addr, &conn->hdev->rpa);
5233 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5234 &conn->init_addr_type);
5237 conn->resp_addr_type = conn->hdev->adv_addr_type;
5238 /* Check if the controller has set a Local RPA then it must be
5239 * used instead or hdev->rpa.
5241 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5242 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5243 bacpy(&conn->resp_addr, local_rpa);
5244 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5245 /* In case of ext adv, resp_addr will be updated in
5246 * Adv Terminated event.
5248 if (!ext_adv_capable(conn->hdev))
5249 bacpy(&conn->resp_addr,
5250 &conn->hdev->random_addr);
5252 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5255 conn->init_addr_type = bdaddr_type;
5256 bacpy(&conn->init_addr, bdaddr);
5258 /* For incoming connections, set the default minimum
5259 * and maximum connection interval. They will be used
5260 * to check if the parameters are in range and if not
5261 * trigger the connection update procedure.
5263 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5264 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5268 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5269 bdaddr_t *bdaddr, u8 bdaddr_type,
5270 bdaddr_t *local_rpa, u8 role, u16 handle,
5271 u16 interval, u16 latency,
5272 u16 supervision_timeout)
5274 struct hci_conn_params *params;
5275 struct hci_conn *conn;
5276 struct smp_irk *irk;
5281 /* All controllers implicitly stop advertising in the event of a
5282 * connection, so ensure that the state bit is cleared.
5284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5286 conn = hci_lookup_le_connect(hdev);
5288 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5290 bt_dev_err(hdev, "no memory for new connection");
5294 conn->dst_type = bdaddr_type;
5296 /* If we didn't have a hci_conn object previously
5297 * but we're in central role this must be something
5298 * initiated using an accept list. Since accept list based
5299 * connections are not "first class citizens" we don't
5300 * have full tracking of them. Therefore, we go ahead
5301 * with a "best effort" approach of determining the
5302 * initiator address based on the HCI_PRIVACY flag.
5305 conn->resp_addr_type = bdaddr_type;
5306 bacpy(&conn->resp_addr, bdaddr);
5307 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5308 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5309 bacpy(&conn->init_addr, &hdev->rpa);
5311 hci_copy_identity_address(hdev,
5313 &conn->init_addr_type);
5317 cancel_delayed_work(&conn->le_conn_timeout);
5320 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5322 /* Lookup the identity address from the stored connection
5323 * address and address type.
5325 * When establishing connections to an identity address, the
5326 * connection procedure will store the resolvable random
5327 * address first. Now if it can be converted back into the
5328 * identity address, start using the identity address from
5331 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5333 bacpy(&conn->dst, &irk->bdaddr);
5334 conn->dst_type = irk->addr_type;
5337 /* When using controller based address resolution, then the new
5338 * address types 0x02 and 0x03 are used. These types need to be
5339 * converted back into either public address or random address type
5341 if (use_ll_privacy(hdev) &&
5342 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5343 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5344 switch (conn->dst_type) {
5345 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5346 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5348 case ADDR_LE_DEV_RANDOM_RESOLVED:
5349 conn->dst_type = ADDR_LE_DEV_RANDOM;
5355 hci_le_conn_failed(conn, status);
5359 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5360 addr_type = BDADDR_LE_PUBLIC;
5362 addr_type = BDADDR_LE_RANDOM;
5364 /* Drop the connection if the device is blocked */
5365 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5366 hci_conn_drop(conn);
5370 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5371 mgmt_device_connected(hdev, conn, NULL, 0);
5373 conn->sec_level = BT_SECURITY_LOW;
5374 conn->handle = handle;
5375 conn->state = BT_CONFIG;
5377 /* Store current advertising instance as connection advertising instance
5378 * when sotfware rotation is in use so it can be re-enabled when
5381 if (!ext_adv_capable(hdev))
5382 conn->adv_instance = hdev->cur_adv_instance;
5384 conn->le_conn_interval = interval;
5385 conn->le_conn_latency = latency;
5386 conn->le_supv_timeout = supervision_timeout;
5388 hci_debugfs_create_conn(conn);
5389 hci_conn_add_sysfs(conn);
5391 /* The remote features procedure is defined for central
5392 * role only. So only in case of an initiated connection
5393 * request the remote features.
5395 * If the local controller supports peripheral-initiated features
5396 * exchange, then requesting the remote features in peripheral
5397 * role is possible. Otherwise just transition into the
5398 * connected state without requesting the remote features.
5401 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5402 struct hci_cp_le_read_remote_features cp;
5404 cp.handle = __cpu_to_le16(conn->handle);
5406 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5409 hci_conn_hold(conn);
5411 conn->state = BT_CONNECTED;
5412 hci_connect_cfm(conn, status);
5415 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5418 list_del_init(¶ms->action);
5420 hci_conn_drop(params->conn);
5421 hci_conn_put(params->conn);
5422 params->conn = NULL;
5427 hci_update_background_scan(hdev);
5428 hci_dev_unlock(hdev);
5431 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5433 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5435 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5437 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5438 NULL, ev->role, le16_to_cpu(ev->handle),
5439 le16_to_cpu(ev->interval),
5440 le16_to_cpu(ev->latency),
5441 le16_to_cpu(ev->supervision_timeout));
5444 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5445 struct sk_buff *skb)
5447 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5449 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5451 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5452 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5453 le16_to_cpu(ev->interval),
5454 le16_to_cpu(ev->latency),
5455 le16_to_cpu(ev->supervision_timeout));
5457 if (use_ll_privacy(hdev) &&
5458 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5459 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5460 hci_req_disable_address_resolution(hdev);
5463 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5465 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5466 struct hci_conn *conn;
5467 struct adv_info *adv;
5469 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5471 adv = hci_find_adv_instance(hdev, ev->handle);
5477 /* Remove advertising as it has been terminated */
5478 hci_remove_adv_instance(hdev, ev->handle);
5479 mgmt_advertising_removed(NULL, hdev, ev->handle);
5485 adv->enabled = false;
5487 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5489 /* Store handle in the connection so the correct advertising
5490 * instance can be re-enabled when disconnected.
5492 conn->adv_instance = ev->handle;
5494 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5495 bacmp(&conn->resp_addr, BDADDR_ANY))
5499 bacpy(&conn->resp_addr, &hdev->random_addr);
5504 bacpy(&conn->resp_addr, &adv->random_addr);
5508 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5509 struct sk_buff *skb)
5511 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5512 struct hci_conn *conn;
5514 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5521 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5523 conn->le_conn_interval = le16_to_cpu(ev->interval);
5524 conn->le_conn_latency = le16_to_cpu(ev->latency);
5525 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5528 hci_dev_unlock(hdev);
5531 /* This function requires the caller holds hdev->lock */
5532 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5534 u8 addr_type, u8 adv_type,
5535 bdaddr_t *direct_rpa)
5537 struct hci_conn *conn;
5538 struct hci_conn_params *params;
5540 /* If the event is not connectable don't proceed further */
5541 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5544 /* Ignore if the device is blocked */
5545 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5548 /* Most controller will fail if we try to create new connections
5549 * while we have an existing one in peripheral role.
5551 if (hdev->conn_hash.le_num_peripheral > 0 &&
5552 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5553 !(hdev->le_states[3] & 0x10)))
5556 /* If we're not connectable only connect devices that we have in
5557 * our pend_le_conns list.
5559 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5564 if (!params->explicit_connect) {
5565 switch (params->auto_connect) {
5566 case HCI_AUTO_CONN_DIRECT:
5567 /* Only devices advertising with ADV_DIRECT_IND are
5568 * triggering a connection attempt. This is allowing
5569 * incoming connections from peripheral devices.
5571 if (adv_type != LE_ADV_DIRECT_IND)
5574 case HCI_AUTO_CONN_ALWAYS:
5575 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5576 * are triggering a connection attempt. This means
5577 * that incoming connections from peripheral device are
5578 * accepted and also outgoing connections to peripheral
5579 * devices are established when found.
5587 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5588 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5590 if (!IS_ERR(conn)) {
5591 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5592 * by higher layer that tried to connect, if no then
5593 * store the pointer since we don't really have any
5594 * other owner of the object besides the params that
5595 * triggered it. This way we can abort the connection if
5596 * the parameters get removed and keep the reference
5597 * count consistent once the connection is established.
5600 if (!params->explicit_connect)
5601 params->conn = hci_conn_get(conn);
5606 switch (PTR_ERR(conn)) {
5608 /* If hci_connect() returns -EBUSY it means there is already
5609 * an LE connection attempt going on. Since controllers don't
5610 * support more than one connection attempt at the time, we
5611 * don't consider this an error case.
5615 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5622 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5623 u8 bdaddr_type, bdaddr_t *direct_addr,
5624 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5627 struct discovery_state *d = &hdev->discovery;
5628 struct smp_irk *irk;
5629 struct hci_conn *conn;
5636 case LE_ADV_DIRECT_IND:
5637 case LE_ADV_SCAN_IND:
5638 case LE_ADV_NONCONN_IND:
5639 case LE_ADV_SCAN_RSP:
5642 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5643 "type: 0x%02x", type);
5647 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5648 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5652 /* Find the end of the data in case the report contains padded zero
5653 * bytes at the end causing an invalid length value.
5655 * When data is NULL, len is 0 so there is no need for extra ptr
5656 * check as 'ptr < data + 0' is already false in such case.
5658 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5659 if (ptr + 1 + *ptr > data + len)
5663 /* Adjust for actual length. This handles the case when remote
5664 * device is advertising with incorrect data length.
5668 /* If the direct address is present, then this report is from
5669 * a LE Direct Advertising Report event. In that case it is
5670 * important to see if the address is matching the local
5671 * controller address.
5674 /* Only resolvable random addresses are valid for these
5675 * kind of reports and others can be ignored.
5677 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5680 /* If the controller is not using resolvable random
5681 * addresses, then this report can be ignored.
5683 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5686 /* If the local IRK of the controller does not match
5687 * with the resolvable random address provided, then
5688 * this report can be ignored.
5690 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5694 /* Check if we need to convert to identity address */
5695 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5697 bdaddr = &irk->bdaddr;
5698 bdaddr_type = irk->addr_type;
5701 /* Check if we have been requested to connect to this device.
5703 * direct_addr is set only for directed advertising reports (it is NULL
5704 * for advertising reports) and is already verified to be RPA above.
5706 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5708 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5709 /* Store report for later inclusion by
5710 * mgmt_device_connected
5712 memcpy(conn->le_adv_data, data, len);
5713 conn->le_adv_data_len = len;
5716 /* Passive scanning shouldn't trigger any device found events,
5717 * except for devices marked as CONN_REPORT for which we do send
5718 * device found events, or advertisement monitoring requested.
5720 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5721 if (type == LE_ADV_DIRECT_IND)
5724 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5725 bdaddr, bdaddr_type) &&
5726 idr_is_empty(&hdev->adv_monitors_idr))
5729 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5730 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5733 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5734 rssi, flags, data, len, NULL, 0);
5738 /* When receiving non-connectable or scannable undirected
5739 * advertising reports, this means that the remote device is
5740 * not connectable and then clearly indicate this in the
5741 * device found event.
5743 * When receiving a scan response, then there is no way to
5744 * know if the remote device is connectable or not. However
5745 * since scan responses are merged with a previously seen
5746 * advertising report, the flags field from that report
5749 * In the really unlikely case that a controller get confused
5750 * and just sends a scan response event, then it is marked as
5751 * not connectable as well.
5753 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5754 type == LE_ADV_SCAN_RSP)
5755 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5759 /* If there's nothing pending either store the data from this
5760 * event or send an immediate device found event if the data
5761 * should not be stored for later.
5763 if (!ext_adv && !has_pending_adv_report(hdev)) {
5764 /* If the report will trigger a SCAN_REQ store it for
5767 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5768 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5769 rssi, flags, data, len);
5773 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5774 rssi, flags, data, len, NULL, 0);
5778 /* Check if the pending report is for the same device as the new one */
5779 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5780 bdaddr_type == d->last_adv_addr_type);
5782 /* If the pending data doesn't match this report or this isn't a
5783 * scan response (e.g. we got a duplicate ADV_IND) then force
5784 * sending of the pending data.
5786 if (type != LE_ADV_SCAN_RSP || !match) {
5787 /* Send out whatever is in the cache, but skip duplicates */
5789 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5790 d->last_adv_addr_type, NULL,
5791 d->last_adv_rssi, d->last_adv_flags,
5793 d->last_adv_data_len, NULL, 0);
5795 /* If the new report will trigger a SCAN_REQ store it for
5798 if (!ext_adv && (type == LE_ADV_IND ||
5799 type == LE_ADV_SCAN_IND)) {
5800 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5801 rssi, flags, data, len);
5805 /* The advertising reports cannot be merged, so clear
5806 * the pending report and send out a device found event.
5808 clear_pending_adv_report(hdev);
5809 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5810 rssi, flags, data, len, NULL, 0);
5814 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5815 * the new event is a SCAN_RSP. We can therefore proceed with
5816 * sending a merged device found event.
5818 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5819 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5820 d->last_adv_data, d->last_adv_data_len, data, len);
5821 clear_pending_adv_report(hdev);
5824 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5826 u8 num_reports = skb->data[0];
5827 void *ptr = &skb->data[1];
5831 while (num_reports--) {
5832 struct hci_ev_le_advertising_info *ev = ptr;
5835 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5836 bt_dev_err(hdev, "Malicious advertising data.");
5840 if (ev->length <= HCI_MAX_AD_LENGTH &&
5841 ev->data + ev->length <= skb_tail_pointer(skb)) {
5842 rssi = ev->data[ev->length];
5843 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5844 ev->bdaddr_type, NULL, 0, rssi,
5845 ev->data, ev->length, false);
5847 bt_dev_err(hdev, "Dropping invalid advertising data");
5850 ptr += sizeof(*ev) + ev->length + 1;
5853 hci_dev_unlock(hdev);
5856 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5858 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5860 case LE_LEGACY_ADV_IND:
5862 case LE_LEGACY_ADV_DIRECT_IND:
5863 return LE_ADV_DIRECT_IND;
5864 case LE_LEGACY_ADV_SCAN_IND:
5865 return LE_ADV_SCAN_IND;
5866 case LE_LEGACY_NONCONN_IND:
5867 return LE_ADV_NONCONN_IND;
5868 case LE_LEGACY_SCAN_RSP_ADV:
5869 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5870 return LE_ADV_SCAN_RSP;
5876 if (evt_type & LE_EXT_ADV_CONN_IND) {
5877 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5878 return LE_ADV_DIRECT_IND;
5883 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5884 return LE_ADV_SCAN_RSP;
5886 if (evt_type & LE_EXT_ADV_SCAN_IND)
5887 return LE_ADV_SCAN_IND;
5889 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5890 evt_type & LE_EXT_ADV_DIRECT_IND)
5891 return LE_ADV_NONCONN_IND;
5894 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5897 return LE_ADV_INVALID;
5900 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5902 u8 num_reports = skb->data[0];
5903 void *ptr = &skb->data[1];
5907 while (num_reports--) {
5908 struct hci_ev_le_ext_adv_report *ev = ptr;
5912 evt_type = __le16_to_cpu(ev->evt_type);
5913 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5914 if (legacy_evt_type != LE_ADV_INVALID) {
5915 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5916 ev->bdaddr_type, NULL, 0, ev->rssi,
5917 ev->data, ev->length,
5918 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5921 ptr += sizeof(*ev) + ev->length;
5924 hci_dev_unlock(hdev);
5927 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5928 struct sk_buff *skb)
5930 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5931 struct hci_conn *conn;
5933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5937 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5940 memcpy(conn->features[0], ev->features, 8);
5942 if (conn->state == BT_CONFIG) {
5945 /* If the local controller supports peripheral-initiated
5946 * features exchange, but the remote controller does
5947 * not, then it is possible that the error code 0x1a
5948 * for unsupported remote feature gets returned.
5950 * In this specific case, allow the connection to
5951 * transition into connected state and mark it as
5954 if (!conn->out && ev->status == 0x1a &&
5955 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5958 status = ev->status;
5960 conn->state = BT_CONNECTED;
5961 hci_connect_cfm(conn, status);
5962 hci_conn_drop(conn);
5966 hci_dev_unlock(hdev);
5969 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5971 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5972 struct hci_cp_le_ltk_reply cp;
5973 struct hci_cp_le_ltk_neg_reply neg;
5974 struct hci_conn *conn;
5975 struct smp_ltk *ltk;
5977 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5981 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5985 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5989 if (smp_ltk_is_sc(ltk)) {
5990 /* With SC both EDiv and Rand are set to zero */
5991 if (ev->ediv || ev->rand)
5994 /* For non-SC keys check that EDiv and Rand match */
5995 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5999 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6000 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6001 cp.handle = cpu_to_le16(conn->handle);
6003 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6005 conn->enc_key_size = ltk->enc_size;
6007 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6009 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6010 * temporary key used to encrypt a connection following
6011 * pairing. It is used during the Encrypted Session Setup to
6012 * distribute the keys. Later, security can be re-established
6013 * using a distributed LTK.
6015 if (ltk->type == SMP_STK) {
6016 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6017 list_del_rcu(<k->list);
6018 kfree_rcu(ltk, rcu);
6020 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6023 hci_dev_unlock(hdev);
6028 neg.handle = ev->handle;
6029 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6030 hci_dev_unlock(hdev);
6033 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6036 struct hci_cp_le_conn_param_req_neg_reply cp;
6038 cp.handle = cpu_to_le16(handle);
6041 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6045 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6046 struct sk_buff *skb)
6048 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6049 struct hci_cp_le_conn_param_req_reply cp;
6050 struct hci_conn *hcon;
6051 u16 handle, min, max, latency, timeout;
6053 handle = le16_to_cpu(ev->handle);
6054 min = le16_to_cpu(ev->interval_min);
6055 max = le16_to_cpu(ev->interval_max);
6056 latency = le16_to_cpu(ev->latency);
6057 timeout = le16_to_cpu(ev->timeout);
6059 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6060 if (!hcon || hcon->state != BT_CONNECTED)
6061 return send_conn_param_neg_reply(hdev, handle,
6062 HCI_ERROR_UNKNOWN_CONN_ID);
6064 if (hci_check_conn_params(min, max, latency, timeout))
6065 return send_conn_param_neg_reply(hdev, handle,
6066 HCI_ERROR_INVALID_LL_PARAMS);
6068 if (hcon->role == HCI_ROLE_MASTER) {
6069 struct hci_conn_params *params;
6074 params = hci_conn_params_lookup(hdev, &hcon->dst,
6077 params->conn_min_interval = min;
6078 params->conn_max_interval = max;
6079 params->conn_latency = latency;
6080 params->supervision_timeout = timeout;
6086 hci_dev_unlock(hdev);
6088 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6089 store_hint, min, max, latency, timeout);
6092 cp.handle = ev->handle;
6093 cp.interval_min = ev->interval_min;
6094 cp.interval_max = ev->interval_max;
6095 cp.latency = ev->latency;
6096 cp.timeout = ev->timeout;
6100 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6103 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6104 struct sk_buff *skb)
6106 u8 num_reports = skb->data[0];
6107 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6109 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6114 for (; num_reports; num_reports--, ev++)
6115 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6116 ev->bdaddr_type, &ev->direct_addr,
6117 ev->direct_addr_type, ev->rssi, NULL, 0,
6120 hci_dev_unlock(hdev);
6123 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6125 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6126 struct hci_conn *conn;
6128 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6135 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6139 conn->le_tx_phy = ev->tx_phy;
6140 conn->le_rx_phy = ev->rx_phy;
6143 hci_dev_unlock(hdev);
6146 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6148 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6150 skb_pull(skb, sizeof(*le_ev));
6152 switch (le_ev->subevent) {
6153 case HCI_EV_LE_CONN_COMPLETE:
6154 hci_le_conn_complete_evt(hdev, skb);
6157 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6158 hci_le_conn_update_complete_evt(hdev, skb);
6161 case HCI_EV_LE_ADVERTISING_REPORT:
6162 hci_le_adv_report_evt(hdev, skb);
6165 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6166 hci_le_remote_feat_complete_evt(hdev, skb);
6169 case HCI_EV_LE_LTK_REQ:
6170 hci_le_ltk_request_evt(hdev, skb);
6173 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6174 hci_le_remote_conn_param_req_evt(hdev, skb);
6177 case HCI_EV_LE_DIRECT_ADV_REPORT:
6178 hci_le_direct_adv_report_evt(hdev, skb);
6181 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6182 hci_le_phy_update_evt(hdev, skb);
6185 case HCI_EV_LE_EXT_ADV_REPORT:
6186 hci_le_ext_adv_report_evt(hdev, skb);
6189 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6190 hci_le_enh_conn_complete_evt(hdev, skb);
6193 case HCI_EV_LE_EXT_ADV_SET_TERM:
6194 hci_le_ext_adv_term_evt(hdev, skb);
6202 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6203 u8 event, struct sk_buff *skb)
6205 struct hci_ev_cmd_complete *ev;
6206 struct hci_event_hdr *hdr;
6211 if (skb->len < sizeof(*hdr)) {
6212 bt_dev_err(hdev, "too short HCI event");
6216 hdr = (void *) skb->data;
6217 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6220 if (hdr->evt != event)
6225 /* Check if request ended in Command Status - no way to retrieve
6226 * any extra parameters in this case.
6228 if (hdr->evt == HCI_EV_CMD_STATUS)
6231 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6232 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6237 if (skb->len < sizeof(*ev)) {
6238 bt_dev_err(hdev, "too short cmd_complete event");
6242 ev = (void *) skb->data;
6243 skb_pull(skb, sizeof(*ev));
6245 if (opcode != __le16_to_cpu(ev->opcode)) {
6246 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6247 __le16_to_cpu(ev->opcode));
6254 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6255 struct sk_buff *skb)
6257 struct hci_ev_le_advertising_info *adv;
6258 struct hci_ev_le_direct_adv_info *direct_adv;
6259 struct hci_ev_le_ext_adv_report *ext_adv;
6260 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6261 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6265 /* If we are currently suspended and this is the first BT event seen,
6266 * save the wake reason associated with the event.
6268 if (!hdev->suspended || hdev->wake_reason)
6271 /* Default to remote wake. Values for wake_reason are documented in the
6272 * Bluez mgmt api docs.
6274 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6276 /* Once configured for remote wakeup, we should only wake up for
6277 * reconnections. It's useful to see which device is waking us up so
6278 * keep track of the bdaddr of the connection event that woke us up.
6280 if (event == HCI_EV_CONN_REQUEST) {
6281 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6282 hdev->wake_addr_type = BDADDR_BREDR;
6283 } else if (event == HCI_EV_CONN_COMPLETE) {
6284 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6285 hdev->wake_addr_type = BDADDR_BREDR;
6286 } else if (event == HCI_EV_LE_META) {
6287 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6288 u8 subevent = le_ev->subevent;
6289 u8 *ptr = &skb->data[sizeof(*le_ev)];
6290 u8 num_reports = *ptr;
6292 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6293 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6294 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6296 adv = (void *)(ptr + 1);
6297 direct_adv = (void *)(ptr + 1);
6298 ext_adv = (void *)(ptr + 1);
6301 case HCI_EV_LE_ADVERTISING_REPORT:
6302 bacpy(&hdev->wake_addr, &adv->bdaddr);
6303 hdev->wake_addr_type = adv->bdaddr_type;
6305 case HCI_EV_LE_DIRECT_ADV_REPORT:
6306 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6307 hdev->wake_addr_type = direct_adv->bdaddr_type;
6309 case HCI_EV_LE_EXT_ADV_REPORT:
6310 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6311 hdev->wake_addr_type = ext_adv->bdaddr_type;
6316 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6320 hci_dev_unlock(hdev);
6323 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6325 struct hci_event_hdr *hdr = (void *) skb->data;
6326 hci_req_complete_t req_complete = NULL;
6327 hci_req_complete_skb_t req_complete_skb = NULL;
6328 struct sk_buff *orig_skb = NULL;
6329 u8 status = 0, event = hdr->evt, req_evt = 0;
6330 u16 opcode = HCI_OP_NOP;
6333 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6337 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6338 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6339 opcode = __le16_to_cpu(cmd_hdr->opcode);
6340 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6345 /* If it looks like we might end up having to call
6346 * req_complete_skb, store a pristine copy of the skb since the
6347 * various handlers may modify the original one through
6348 * skb_pull() calls, etc.
6350 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6351 event == HCI_EV_CMD_COMPLETE)
6352 orig_skb = skb_clone(skb, GFP_KERNEL);
6354 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6356 /* Store wake reason if we're suspended */
6357 hci_store_wake_reason(hdev, event, skb);
6360 case HCI_EV_INQUIRY_COMPLETE:
6361 hci_inquiry_complete_evt(hdev, skb);
6364 case HCI_EV_INQUIRY_RESULT:
6365 hci_inquiry_result_evt(hdev, skb);
6368 case HCI_EV_CONN_COMPLETE:
6369 hci_conn_complete_evt(hdev, skb);
6372 case HCI_EV_CONN_REQUEST:
6373 hci_conn_request_evt(hdev, skb);
6376 case HCI_EV_DISCONN_COMPLETE:
6377 hci_disconn_complete_evt(hdev, skb);
6380 case HCI_EV_AUTH_COMPLETE:
6381 hci_auth_complete_evt(hdev, skb);
6384 case HCI_EV_REMOTE_NAME:
6385 hci_remote_name_evt(hdev, skb);
6388 case HCI_EV_ENCRYPT_CHANGE:
6389 hci_encrypt_change_evt(hdev, skb);
6392 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6393 hci_change_link_key_complete_evt(hdev, skb);
6396 case HCI_EV_REMOTE_FEATURES:
6397 hci_remote_features_evt(hdev, skb);
6400 case HCI_EV_CMD_COMPLETE:
6401 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6402 &req_complete, &req_complete_skb);
6405 case HCI_EV_CMD_STATUS:
6406 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6410 case HCI_EV_HARDWARE_ERROR:
6411 hci_hardware_error_evt(hdev, skb);
6414 case HCI_EV_ROLE_CHANGE:
6415 hci_role_change_evt(hdev, skb);
6418 case HCI_EV_NUM_COMP_PKTS:
6419 hci_num_comp_pkts_evt(hdev, skb);
6422 case HCI_EV_MODE_CHANGE:
6423 hci_mode_change_evt(hdev, skb);
6426 case HCI_EV_PIN_CODE_REQ:
6427 hci_pin_code_request_evt(hdev, skb);
6430 case HCI_EV_LINK_KEY_REQ:
6431 hci_link_key_request_evt(hdev, skb);
6434 case HCI_EV_LINK_KEY_NOTIFY:
6435 hci_link_key_notify_evt(hdev, skb);
6438 case HCI_EV_CLOCK_OFFSET:
6439 hci_clock_offset_evt(hdev, skb);
6442 case HCI_EV_PKT_TYPE_CHANGE:
6443 hci_pkt_type_change_evt(hdev, skb);
6446 case HCI_EV_PSCAN_REP_MODE:
6447 hci_pscan_rep_mode_evt(hdev, skb);
6450 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6451 hci_inquiry_result_with_rssi_evt(hdev, skb);
6454 case HCI_EV_REMOTE_EXT_FEATURES:
6455 hci_remote_ext_features_evt(hdev, skb);
6458 case HCI_EV_SYNC_CONN_COMPLETE:
6459 hci_sync_conn_complete_evt(hdev, skb);
6462 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6463 hci_extended_inquiry_result_evt(hdev, skb);
6466 case HCI_EV_KEY_REFRESH_COMPLETE:
6467 hci_key_refresh_complete_evt(hdev, skb);
6470 case HCI_EV_IO_CAPA_REQUEST:
6471 hci_io_capa_request_evt(hdev, skb);
6474 case HCI_EV_IO_CAPA_REPLY:
6475 hci_io_capa_reply_evt(hdev, skb);
6478 case HCI_EV_USER_CONFIRM_REQUEST:
6479 hci_user_confirm_request_evt(hdev, skb);
6482 case HCI_EV_USER_PASSKEY_REQUEST:
6483 hci_user_passkey_request_evt(hdev, skb);
6486 case HCI_EV_USER_PASSKEY_NOTIFY:
6487 hci_user_passkey_notify_evt(hdev, skb);
6490 case HCI_EV_KEYPRESS_NOTIFY:
6491 hci_keypress_notify_evt(hdev, skb);
6494 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6495 hci_simple_pair_complete_evt(hdev, skb);
6498 case HCI_EV_REMOTE_HOST_FEATURES:
6499 hci_remote_host_features_evt(hdev, skb);
6502 case HCI_EV_LE_META:
6503 hci_le_meta_evt(hdev, skb);
6506 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6507 hci_remote_oob_data_request_evt(hdev, skb);
6510 #if IS_ENABLED(CONFIG_BT_HS)
6511 case HCI_EV_CHANNEL_SELECTED:
6512 hci_chan_selected_evt(hdev, skb);
6515 case HCI_EV_PHY_LINK_COMPLETE:
6516 hci_phy_link_complete_evt(hdev, skb);
6519 case HCI_EV_LOGICAL_LINK_COMPLETE:
6520 hci_loglink_complete_evt(hdev, skb);
6523 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6524 hci_disconn_loglink_complete_evt(hdev, skb);
6527 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6528 hci_disconn_phylink_complete_evt(hdev, skb);
6532 case HCI_EV_NUM_COMP_BLOCKS:
6533 hci_num_comp_blocks_evt(hdev, skb);
6537 msft_vendor_evt(hdev, skb);
6541 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6546 req_complete(hdev, status, opcode);
6547 } else if (req_complete_skb) {
6548 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6549 kfree_skb(orig_skb);
6552 req_complete_skb(hdev, status, opcode, orig_skb);
6556 kfree_skb(orig_skb);
6558 hdev->stat.evt_rx++;