2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
47 __u8 status = *((__u8 *) skb->data);
49 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 /* It is possible that we receive Inquiry Complete event right
52 * before we receive Inquiry Cancel Command Complete event, in
53 * which case the latter event should have status of Command
54 * Disallowed (0x0c). This should not be treated as error, since
55 * we actually achieve what Inquiry Cancel wants to achieve,
56 * which is to end the last Inquiry session.
58 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
59 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
68 clear_bit(HCI_INQUIRY, &hdev->flags);
69 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
70 wake_up_bit(&hdev->flags, HCI_INQUIRY);
73 /* Set discovery state to stopped if we're not doing LE active
76 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
77 hdev->le_scan_type != LE_SCAN_ACTIVE)
78 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 __u8 status = *((__u8 *) skb->data);
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
96 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 __u8 status = *((__u8 *) skb->data);
100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
105 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 hci_conn_check_pending(hdev);
110 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
113 BT_DBG("%s", hdev->name);
116 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 struct hci_rp_role_discovery *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 conn->role = rp->role;
132 hci_dev_unlock(hdev);
135 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 struct hci_rp_read_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 conn->link_policy = __le16_to_cpu(rp->policy);
151 hci_dev_unlock(hdev);
154 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 struct hci_rp_write_link_policy *rp = (void *) skb->data;
157 struct hci_conn *conn;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 conn->link_policy = get_unaligned_le16(sent + 2);
175 hci_dev_unlock(hdev);
178 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
181 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
188 hdev->link_policy = __le16_to_cpu(rp->policy);
191 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
194 __u8 status = *((__u8 *) skb->data);
197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 hdev->link_policy = get_unaligned_le16(sent);
209 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 __u8 status = *((__u8 *) skb->data);
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 clear_bit(HCI_RESET, &hdev->flags);
220 /* Reset all non-persistent flags */
221 hci_dev_clear_volatile_flags(hdev);
223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
226 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
229 hdev->adv_data_len = 0;
231 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
232 hdev->scan_rsp_data_len = 0;
234 hdev->le_scan_type = LE_SCAN_PASSIVE;
236 hdev->ssp_debug_mode = 0;
238 hci_bdaddr_list_clear(&hdev->le_white_list);
239 hci_bdaddr_list_clear(&hdev->le_resolv_list);
242 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
245 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
246 struct hci_cp_read_stored_link_key *sent;
248 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
254 if (!rp->status && sent->read_all == 0x01) {
255 hdev->stored_max_keys = rp->max_keys;
256 hdev->stored_num_keys = rp->num_keys;
260 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
270 if (rp->num_keys <= hdev->stored_num_keys)
271 hdev->stored_num_keys -= rp->num_keys;
273 hdev->stored_num_keys = 0;
276 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 __u8 status = *((__u8 *) skb->data);
281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
289 if (hci_dev_test_flag(hdev, HCI_MGMT))
290 mgmt_set_local_name_complete(hdev, sent, status);
292 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294 hci_dev_unlock(hdev);
297 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299 struct hci_rp_read_local_name *rp = (void *) skb->data;
301 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
306 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
307 hci_dev_test_flag(hdev, HCI_CONFIG))
308 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
311 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313 __u8 status = *((__u8 *) skb->data);
316 BT_DBG("%s status 0x%2.2x", hdev->name, status);
318 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
325 __u8 param = *((__u8 *) sent);
327 if (param == AUTH_ENABLED)
328 set_bit(HCI_AUTH, &hdev->flags);
330 clear_bit(HCI_AUTH, &hdev->flags);
333 if (hci_dev_test_flag(hdev, HCI_MGMT))
334 mgmt_auth_enable_complete(hdev, status);
336 hci_dev_unlock(hdev);
339 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341 __u8 status = *((__u8 *) skb->data);
345 BT_DBG("%s status 0x%2.2x", hdev->name, status);
350 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
354 param = *((__u8 *) sent);
357 set_bit(HCI_ENCRYPT, &hdev->flags);
359 clear_bit(HCI_ENCRYPT, &hdev->flags);
362 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364 __u8 status = *((__u8 *) skb->data);
368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
374 param = *((__u8 *) sent);
379 hdev->discov_timeout = 0;
383 if (param & SCAN_INQUIRY)
384 set_bit(HCI_ISCAN, &hdev->flags);
386 clear_bit(HCI_ISCAN, &hdev->flags);
388 if (param & SCAN_PAGE)
389 set_bit(HCI_PSCAN, &hdev->flags);
391 clear_bit(HCI_PSCAN, &hdev->flags);
394 hci_dev_unlock(hdev);
397 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
406 memcpy(hdev->dev_class, rp->dev_class, 3);
408 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
409 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
412 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414 __u8 status = *((__u8 *) skb->data);
417 BT_DBG("%s status 0x%2.2x", hdev->name, status);
419 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
426 memcpy(hdev->dev_class, sent, 3);
428 if (hci_dev_test_flag(hdev, HCI_MGMT))
429 mgmt_set_class_of_dev_complete(hdev, sent, status);
431 hci_dev_unlock(hdev);
434 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
439 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
444 setting = __le16_to_cpu(rp->voice_setting);
446 if (hdev->voice_setting == setting)
449 hdev->voice_setting = setting;
451 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
454 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
457 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
460 __u8 status = *((__u8 *) skb->data);
464 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
473 setting = get_unaligned_le16(sent);
475 if (hdev->voice_setting == setting)
478 hdev->voice_setting = setting;
480 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
483 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
486 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
489 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496 hdev->num_iac = rp->num_iac;
498 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
501 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503 __u8 status = *((__u8 *) skb->data);
504 struct hci_cp_write_ssp_mode *sent;
506 BT_DBG("%s status 0x%2.2x", hdev->name, status);
508 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
516 hdev->features[1][0] |= LMP_HOST_SSP;
518 hdev->features[1][0] &= ~LMP_HOST_SSP;
521 if (hci_dev_test_flag(hdev, HCI_MGMT))
522 mgmt_ssp_enable_complete(hdev, sent->mode, status);
525 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
527 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
530 hci_dev_unlock(hdev);
533 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535 u8 status = *((u8 *) skb->data);
536 struct hci_cp_write_sc_support *sent;
538 BT_DBG("%s status 0x%2.2x", hdev->name, status);
540 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
548 hdev->features[1][0] |= LMP_HOST_SC;
550 hdev->features[1][0] &= ~LMP_HOST_SC;
553 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
555 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
557 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
560 hci_dev_unlock(hdev);
563 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565 struct hci_rp_read_local_version *rp = (void *) skb->data;
567 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
572 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
573 hci_dev_test_flag(hdev, HCI_CONFIG)) {
574 hdev->hci_ver = rp->hci_ver;
575 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
576 hdev->lmp_ver = rp->lmp_ver;
577 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
578 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
582 static void hci_cc_read_local_commands(struct hci_dev *hdev,
585 struct hci_rp_read_local_commands *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
593 hci_dev_test_flag(hdev, HCI_CONFIG))
594 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
597 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
600 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
601 struct hci_conn *conn;
603 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
612 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
614 hci_dev_unlock(hdev);
617 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
620 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
621 struct hci_conn *conn;
624 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
629 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
639 hci_dev_unlock(hdev);
642 static void hci_cc_read_local_features(struct hci_dev *hdev,
645 struct hci_rp_read_local_features *rp = (void *) skb->data;
647 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
652 memcpy(hdev->features, rp->features, 8);
654 /* Adjust default settings according to features
655 * supported by device. */
657 if (hdev->features[0][0] & LMP_3SLOT)
658 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
660 if (hdev->features[0][0] & LMP_5SLOT)
661 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
663 if (hdev->features[0][1] & LMP_HV2) {
664 hdev->pkt_type |= (HCI_HV2);
665 hdev->esco_type |= (ESCO_HV2);
668 if (hdev->features[0][1] & LMP_HV3) {
669 hdev->pkt_type |= (HCI_HV3);
670 hdev->esco_type |= (ESCO_HV3);
673 if (lmp_esco_capable(hdev))
674 hdev->esco_type |= (ESCO_EV3);
676 if (hdev->features[0][4] & LMP_EV4)
677 hdev->esco_type |= (ESCO_EV4);
679 if (hdev->features[0][4] & LMP_EV5)
680 hdev->esco_type |= (ESCO_EV5);
682 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
683 hdev->esco_type |= (ESCO_2EV3);
685 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
686 hdev->esco_type |= (ESCO_3EV3);
688 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
689 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
692 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
695 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
697 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
702 if (hdev->max_page < rp->max_page)
703 hdev->max_page = rp->max_page;
705 if (rp->page < HCI_MAX_PAGES)
706 memcpy(hdev->features[rp->page], rp->features, 8);
709 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
712 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
714 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
719 hdev->flow_ctl_mode = rp->mode;
722 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
724 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
726 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
731 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
732 hdev->sco_mtu = rp->sco_mtu;
733 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
734 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
736 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
741 hdev->acl_cnt = hdev->acl_pkts;
742 hdev->sco_cnt = hdev->sco_pkts;
744 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
745 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
748 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
750 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757 if (test_bit(HCI_INIT, &hdev->flags))
758 bacpy(&hdev->bdaddr, &rp->bdaddr);
760 if (hci_dev_test_flag(hdev, HCI_SETUP))
761 bacpy(&hdev->setup_addr, &rp->bdaddr);
764 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
767 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
769 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
774 if (test_bit(HCI_INIT, &hdev->flags)) {
775 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
776 hdev->page_scan_window = __le16_to_cpu(rp->window);
780 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
783 u8 status = *((u8 *) skb->data);
784 struct hci_cp_write_page_scan_activity *sent;
786 BT_DBG("%s status 0x%2.2x", hdev->name, status);
791 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
795 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
796 hdev->page_scan_window = __le16_to_cpu(sent->window);
799 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
802 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
804 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
809 if (test_bit(HCI_INIT, &hdev->flags))
810 hdev->page_scan_type = rp->type;
813 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
816 u8 status = *((u8 *) skb->data);
819 BT_DBG("%s status 0x%2.2x", hdev->name, status);
824 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
826 hdev->page_scan_type = *type;
829 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
832 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
834 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
840 hdev->block_len = __le16_to_cpu(rp->block_len);
841 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
843 hdev->block_cnt = hdev->num_blocks;
845 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
846 hdev->block_cnt, hdev->block_len);
849 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
851 struct hci_rp_read_clock *rp = (void *) skb->data;
852 struct hci_cp_read_clock *cp;
853 struct hci_conn *conn;
855 BT_DBG("%s", hdev->name);
857 if (skb->len < sizeof(*rp))
865 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
869 if (cp->which == 0x00) {
870 hdev->clock = le32_to_cpu(rp->clock);
874 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
876 conn->clock = le32_to_cpu(rp->clock);
877 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
881 hci_dev_unlock(hdev);
884 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
887 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
894 hdev->amp_status = rp->amp_status;
895 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
896 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
897 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
898 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
899 hdev->amp_type = rp->amp_type;
900 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
901 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
902 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
903 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
906 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
909 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
911 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
916 hdev->inq_tx_power = rp->tx_power;
919 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
921 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
922 struct hci_cp_pin_code_reply *cp;
923 struct hci_conn *conn;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
935 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
939 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
941 conn->pin_length = cp->pin_len;
944 hci_dev_unlock(hdev);
947 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
949 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
951 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
955 if (hci_dev_test_flag(hdev, HCI_MGMT))
956 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
959 hci_dev_unlock(hdev);
962 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
965 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
972 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
973 hdev->le_pkts = rp->le_max_pkt;
975 hdev->le_cnt = hdev->le_pkts;
977 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
980 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
983 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
985 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
990 memcpy(hdev->le_features, rp->features, 8);
993 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
996 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003 hdev->adv_tx_power = rp->tx_power;
1006 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1008 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1010 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014 if (hci_dev_test_flag(hdev, HCI_MGMT))
1015 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1018 hci_dev_unlock(hdev);
1021 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1022 struct sk_buff *skb)
1024 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1032 ACL_LINK, 0, rp->status);
1034 hci_dev_unlock(hdev);
1037 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1039 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1045 if (hci_dev_test_flag(hdev, HCI_MGMT))
1046 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1049 hci_dev_unlock(hdev);
1052 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1053 struct sk_buff *skb)
1055 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1057 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 if (hci_dev_test_flag(hdev, HCI_MGMT))
1062 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1063 ACL_LINK, 0, rp->status);
1065 hci_dev_unlock(hdev);
1068 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1069 struct sk_buff *skb)
1071 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1073 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1076 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1077 struct sk_buff *skb)
1079 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1081 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1084 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1086 __u8 status = *((__u8 *) skb->data);
1089 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1094 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1100 bacpy(&hdev->random_addr, sent);
1102 hci_dev_unlock(hdev);
1105 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1107 __u8 status = *((__u8 *) skb->data);
1108 struct hci_cp_le_set_default_phy *cp;
1110 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1115 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1121 hdev->le_tx_def_phys = cp->tx_phys;
1122 hdev->le_rx_def_phys = cp->rx_phys;
1124 hci_dev_unlock(hdev);
1127 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1128 struct sk_buff *skb)
1130 __u8 status = *((__u8 *) skb->data);
1131 struct hci_cp_le_set_adv_set_rand_addr *cp;
1132 struct adv_info *adv_instance;
1137 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1143 if (!hdev->cur_adv_instance) {
1144 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1145 bacpy(&hdev->random_addr, &cp->bdaddr);
1147 adv_instance = hci_find_adv_instance(hdev,
1148 hdev->cur_adv_instance);
1150 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1153 hci_dev_unlock(hdev);
1156 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1158 __u8 *sent, status = *((__u8 *) skb->data);
1160 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1165 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1171 /* If we're doing connection initiation as peripheral. Set a
1172 * timeout in case something goes wrong.
1175 struct hci_conn *conn;
1177 hci_dev_set_flag(hdev, HCI_LE_ADV);
1179 conn = hci_lookup_le_connect(hdev);
1181 queue_delayed_work(hdev->workqueue,
1182 &conn->le_conn_timeout,
1183 conn->conn_timeout);
1185 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1188 hci_dev_unlock(hdev);
1191 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1192 struct sk_buff *skb)
1194 struct hci_cp_le_set_ext_adv_enable *cp;
1195 __u8 status = *((__u8 *) skb->data);
1197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1202 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1209 struct hci_conn *conn;
1211 hci_dev_set_flag(hdev, HCI_LE_ADV);
1213 conn = hci_lookup_le_connect(hdev);
1215 queue_delayed_work(hdev->workqueue,
1216 &conn->le_conn_timeout,
1217 conn->conn_timeout);
1219 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1222 hci_dev_unlock(hdev);
1225 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1227 struct hci_cp_le_set_scan_param *cp;
1228 __u8 status = *((__u8 *) skb->data);
1230 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1235 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1241 hdev->le_scan_type = cp->type;
1243 hci_dev_unlock(hdev);
1246 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1249 struct hci_cp_le_set_ext_scan_params *cp;
1250 __u8 status = *((__u8 *) skb->data);
1251 struct hci_cp_le_scan_phy_params *phy_param;
1253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1258 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1262 phy_param = (void *)cp->data;
1266 hdev->le_scan_type = phy_param->type;
1268 hci_dev_unlock(hdev);
1271 static bool has_pending_adv_report(struct hci_dev *hdev)
1273 struct discovery_state *d = &hdev->discovery;
1275 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1278 static void clear_pending_adv_report(struct hci_dev *hdev)
1280 struct discovery_state *d = &hdev->discovery;
1282 bacpy(&d->last_adv_addr, BDADDR_ANY);
1283 d->last_adv_data_len = 0;
1286 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1287 u8 bdaddr_type, s8 rssi, u32 flags,
1290 struct discovery_state *d = &hdev->discovery;
1292 if (len > HCI_MAX_AD_LENGTH)
1295 bacpy(&d->last_adv_addr, bdaddr);
1296 d->last_adv_addr_type = bdaddr_type;
1297 d->last_adv_rssi = rssi;
1298 d->last_adv_flags = flags;
1299 memcpy(d->last_adv_data, data, len);
1300 d->last_adv_data_len = len;
1303 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1308 case LE_SCAN_ENABLE:
1309 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1310 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1311 clear_pending_adv_report(hdev);
1314 case LE_SCAN_DISABLE:
1315 /* We do this here instead of when setting DISCOVERY_STOPPED
1316 * since the latter would potentially require waiting for
1317 * inquiry to stop too.
1319 if (has_pending_adv_report(hdev)) {
1320 struct discovery_state *d = &hdev->discovery;
1322 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1323 d->last_adv_addr_type, NULL,
1324 d->last_adv_rssi, d->last_adv_flags,
1326 d->last_adv_data_len, NULL, 0);
1329 /* Cancel this timer so that we don't try to disable scanning
1330 * when it's already disabled.
1332 cancel_delayed_work(&hdev->le_scan_disable);
1334 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1336 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1337 * interrupted scanning due to a connect request. Mark
1338 * therefore discovery as stopped. If this was not
1339 * because of a connect request advertising might have
1340 * been disabled because of active scanning, so
1341 * re-enable it again if necessary.
1343 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1344 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1345 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1346 hdev->discovery.state == DISCOVERY_FINDING)
1347 hci_req_reenable_advertising(hdev);
1352 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1357 hci_dev_unlock(hdev);
1360 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1361 struct sk_buff *skb)
1363 struct hci_cp_le_set_scan_enable *cp;
1364 __u8 status = *((__u8 *) skb->data);
1366 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1371 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1375 le_set_scan_enable_complete(hdev, cp->enable);
1378 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1379 struct sk_buff *skb)
1381 struct hci_cp_le_set_ext_scan_enable *cp;
1382 __u8 status = *((__u8 *) skb->data);
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1389 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1393 le_set_scan_enable_complete(hdev, cp->enable);
1396 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1397 struct sk_buff *skb)
1399 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1401 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1407 hdev->le_num_of_adv_sets = rp->num_of_sets;
1410 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1411 struct sk_buff *skb)
1413 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1415 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1420 hdev->le_white_list_size = rp->size;
1423 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1424 struct sk_buff *skb)
1426 __u8 status = *((__u8 *) skb->data);
1428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1433 hci_bdaddr_list_clear(&hdev->le_white_list);
1436 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1437 struct sk_buff *skb)
1439 struct hci_cp_le_add_to_white_list *sent;
1440 __u8 status = *((__u8 *) skb->data);
1442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1447 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1451 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1455 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1456 struct sk_buff *skb)
1458 struct hci_cp_le_del_from_white_list *sent;
1459 __u8 status = *((__u8 *) skb->data);
1461 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1466 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1470 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1474 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1477 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1479 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1484 memcpy(hdev->le_states, rp->le_states, 8);
1487 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1490 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1497 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1498 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1501 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1502 struct sk_buff *skb)
1504 struct hci_cp_le_write_def_data_len *sent;
1505 __u8 status = *((__u8 *) skb->data);
1507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1512 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1516 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1517 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1520 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1521 struct sk_buff *skb)
1523 struct hci_cp_le_add_to_resolv_list *sent;
1524 __u8 status = *((__u8 *) skb->data);
1526 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1531 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1535 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1536 sent->bdaddr_type, sent->peer_irk,
1540 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1541 struct sk_buff *skb)
1543 struct hci_cp_le_del_from_resolv_list *sent;
1544 __u8 status = *((__u8 *) skb->data);
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1551 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1555 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1559 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1560 struct sk_buff *skb)
1562 __u8 status = *((__u8 *) skb->data);
1564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1569 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1572 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1573 struct sk_buff *skb)
1575 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1577 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1582 hdev->le_resolv_list_size = rp->size;
1585 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1586 struct sk_buff *skb)
1588 __u8 *sent, status = *((__u8 *) skb->data);
1590 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1595 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1602 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1604 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1606 hci_dev_unlock(hdev);
1609 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1610 struct sk_buff *skb)
1612 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1614 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1619 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1620 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1621 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1622 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1625 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1626 struct sk_buff *skb)
1628 struct hci_cp_write_le_host_supported *sent;
1629 __u8 status = *((__u8 *) skb->data);
1631 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1636 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1643 hdev->features[1][0] |= LMP_HOST_LE;
1644 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1646 hdev->features[1][0] &= ~LMP_HOST_LE;
1647 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1648 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1652 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1654 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1656 hci_dev_unlock(hdev);
1659 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1661 struct hci_cp_le_set_adv_param *cp;
1662 u8 status = *((u8 *) skb->data);
1664 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1669 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1674 hdev->adv_addr_type = cp->own_address_type;
1675 hci_dev_unlock(hdev);
1678 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1680 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1681 struct hci_cp_le_set_ext_adv_params *cp;
1682 struct adv_info *adv_instance;
1684 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1689 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1694 hdev->adv_addr_type = cp->own_addr_type;
1695 if (!hdev->cur_adv_instance) {
1696 /* Store in hdev for instance 0 */
1697 hdev->adv_tx_power = rp->tx_power;
1699 adv_instance = hci_find_adv_instance(hdev,
1700 hdev->cur_adv_instance);
1702 adv_instance->tx_power = rp->tx_power;
1704 /* Update adv data as tx power is known now */
1705 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1706 hci_dev_unlock(hdev);
1709 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1711 struct hci_rp_read_rssi *rp = (void *) skb->data;
1712 struct hci_conn *conn;
1714 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1721 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1723 conn->rssi = rp->rssi;
1725 hci_dev_unlock(hdev);
1728 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1730 struct hci_cp_read_tx_power *sent;
1731 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1732 struct hci_conn *conn;
1734 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1739 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1745 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1749 switch (sent->type) {
1751 conn->tx_power = rp->tx_power;
1754 conn->max_tx_power = rp->tx_power;
1759 hci_dev_unlock(hdev);
1762 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1764 u8 status = *((u8 *) skb->data);
1767 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1772 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1774 hdev->ssp_debug_mode = *mode;
1777 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1779 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1782 hci_conn_check_pending(hdev);
1786 set_bit(HCI_INQUIRY, &hdev->flags);
1789 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1791 struct hci_cp_create_conn *cp;
1792 struct hci_conn *conn;
1794 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1804 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1807 if (conn && conn->state == BT_CONNECT) {
1808 if (status != 0x0c || conn->attempt > 2) {
1809 conn->state = BT_CLOSED;
1810 hci_connect_cfm(conn, status);
1813 conn->state = BT_CONNECT2;
1817 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1820 bt_dev_err(hdev, "no memory for new connection");
1824 hci_dev_unlock(hdev);
1827 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1829 struct hci_cp_add_sco *cp;
1830 struct hci_conn *acl, *sco;
1833 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1838 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1842 handle = __le16_to_cpu(cp->handle);
1844 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1848 acl = hci_conn_hash_lookup_handle(hdev, handle);
1852 sco->state = BT_CLOSED;
1854 hci_connect_cfm(sco, status);
1859 hci_dev_unlock(hdev);
1862 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1864 struct hci_cp_auth_requested *cp;
1865 struct hci_conn *conn;
1867 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1872 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1878 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1880 if (conn->state == BT_CONFIG) {
1881 hci_connect_cfm(conn, status);
1882 hci_conn_drop(conn);
1886 hci_dev_unlock(hdev);
1889 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1891 struct hci_cp_set_conn_encrypt *cp;
1892 struct hci_conn *conn;
1894 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1899 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1905 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1907 if (conn->state == BT_CONFIG) {
1908 hci_connect_cfm(conn, status);
1909 hci_conn_drop(conn);
1913 hci_dev_unlock(hdev);
1916 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1917 struct hci_conn *conn)
1919 if (conn->state != BT_CONFIG || !conn->out)
1922 if (conn->pending_sec_level == BT_SECURITY_SDP)
1925 /* Only request authentication for SSP connections or non-SSP
1926 * devices with sec_level MEDIUM or HIGH or if MITM protection
1929 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1930 conn->pending_sec_level != BT_SECURITY_FIPS &&
1931 conn->pending_sec_level != BT_SECURITY_HIGH &&
1932 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1938 static int hci_resolve_name(struct hci_dev *hdev,
1939 struct inquiry_entry *e)
1941 struct hci_cp_remote_name_req cp;
1943 memset(&cp, 0, sizeof(cp));
1945 bacpy(&cp.bdaddr, &e->data.bdaddr);
1946 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1947 cp.pscan_mode = e->data.pscan_mode;
1948 cp.clock_offset = e->data.clock_offset;
1950 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1953 static bool hci_resolve_next_name(struct hci_dev *hdev)
1955 struct discovery_state *discov = &hdev->discovery;
1956 struct inquiry_entry *e;
1958 if (list_empty(&discov->resolve))
1961 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1965 if (hci_resolve_name(hdev, e) == 0) {
1966 e->name_state = NAME_PENDING;
1973 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1974 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1976 struct discovery_state *discov = &hdev->discovery;
1977 struct inquiry_entry *e;
1979 /* Update the mgmt connected state if necessary. Be careful with
1980 * conn objects that exist but are not (yet) connected however.
1981 * Only those in BT_CONFIG or BT_CONNECTED states can be
1982 * considered connected.
1985 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1986 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1987 mgmt_device_connected(hdev, conn, 0, name, name_len);
1989 if (discov->state == DISCOVERY_STOPPED)
1992 if (discov->state == DISCOVERY_STOPPING)
1993 goto discov_complete;
1995 if (discov->state != DISCOVERY_RESOLVING)
1998 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1999 /* If the device was not found in a list of found devices names of which
2000 * are pending. there is no need to continue resolving a next name as it
2001 * will be done upon receiving another Remote Name Request Complete
2008 e->name_state = NAME_KNOWN;
2009 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2010 e->data.rssi, name, name_len);
2012 e->name_state = NAME_NOT_KNOWN;
2015 if (hci_resolve_next_name(hdev))
2019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2022 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2024 struct hci_cp_remote_name_req *cp;
2025 struct hci_conn *conn;
2027 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2029 /* If successful wait for the name req complete event before
2030 * checking for the need to do authentication */
2034 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2040 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2042 if (hci_dev_test_flag(hdev, HCI_MGMT))
2043 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2048 if (!hci_outgoing_auth_needed(hdev, conn))
2051 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2052 struct hci_cp_auth_requested auth_cp;
2054 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2056 auth_cp.handle = __cpu_to_le16(conn->handle);
2057 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2058 sizeof(auth_cp), &auth_cp);
2062 hci_dev_unlock(hdev);
2065 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2067 struct hci_cp_read_remote_features *cp;
2068 struct hci_conn *conn;
2070 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2075 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2081 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2083 if (conn->state == BT_CONFIG) {
2084 hci_connect_cfm(conn, status);
2085 hci_conn_drop(conn);
2089 hci_dev_unlock(hdev);
2092 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2094 struct hci_cp_read_remote_ext_features *cp;
2095 struct hci_conn *conn;
2097 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2102 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2110 if (conn->state == BT_CONFIG) {
2111 hci_connect_cfm(conn, status);
2112 hci_conn_drop(conn);
2116 hci_dev_unlock(hdev);
2119 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2121 struct hci_cp_setup_sync_conn *cp;
2122 struct hci_conn *acl, *sco;
2125 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2130 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2134 handle = __le16_to_cpu(cp->handle);
2136 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2140 acl = hci_conn_hash_lookup_handle(hdev, handle);
2144 sco->state = BT_CLOSED;
2146 hci_connect_cfm(sco, status);
2151 hci_dev_unlock(hdev);
2154 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2156 struct hci_cp_sniff_mode *cp;
2157 struct hci_conn *conn;
2159 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2164 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2170 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2172 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2174 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2175 hci_sco_setup(conn, status);
2178 hci_dev_unlock(hdev);
2181 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2183 struct hci_cp_exit_sniff_mode *cp;
2184 struct hci_conn *conn;
2186 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2191 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2197 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2199 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2201 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2202 hci_sco_setup(conn, status);
2205 hci_dev_unlock(hdev);
2208 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2210 struct hci_cp_disconnect *cp;
2211 struct hci_conn *conn;
2216 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2222 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2224 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2225 conn->dst_type, status);
2227 hci_dev_unlock(hdev);
2230 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2231 u8 peer_addr_type, u8 own_address_type,
2234 struct hci_conn *conn;
2236 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2241 /* Store the initiator and responder address information which
2242 * is needed for SMP. These values will not change during the
2243 * lifetime of the connection.
2245 conn->init_addr_type = own_address_type;
2246 if (own_address_type == ADDR_LE_DEV_RANDOM)
2247 bacpy(&conn->init_addr, &hdev->random_addr);
2249 bacpy(&conn->init_addr, &hdev->bdaddr);
2251 conn->resp_addr_type = peer_addr_type;
2252 bacpy(&conn->resp_addr, peer_addr);
2254 /* We don't want the connection attempt to stick around
2255 * indefinitely since LE doesn't have a page timeout concept
2256 * like BR/EDR. Set a timer for any connection that doesn't use
2257 * the white list for connecting.
2259 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2260 queue_delayed_work(conn->hdev->workqueue,
2261 &conn->le_conn_timeout,
2262 conn->conn_timeout);
2265 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2267 struct hci_cp_le_create_conn *cp;
2269 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2271 /* All connection failure handling is taken care of by the
2272 * hci_le_conn_failed function which is triggered by the HCI
2273 * request completion callbacks used for connecting.
2278 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2284 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2285 cp->own_address_type, cp->filter_policy);
2287 hci_dev_unlock(hdev);
2290 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2292 struct hci_cp_le_ext_create_conn *cp;
2294 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2296 /* All connection failure handling is taken care of by the
2297 * hci_le_conn_failed function which is triggered by the HCI
2298 * request completion callbacks used for connecting.
2303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2309 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2310 cp->own_addr_type, cp->filter_policy);
2312 hci_dev_unlock(hdev);
2315 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2317 struct hci_cp_le_read_remote_features *cp;
2318 struct hci_conn *conn;
2320 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2325 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2331 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2333 if (conn->state == BT_CONFIG) {
2334 hci_connect_cfm(conn, status);
2335 hci_conn_drop(conn);
2339 hci_dev_unlock(hdev);
2342 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2344 struct hci_cp_le_start_enc *cp;
2345 struct hci_conn *conn;
2347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2354 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2358 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2362 if (conn->state != BT_CONNECTED)
2365 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2366 hci_conn_drop(conn);
2369 hci_dev_unlock(hdev);
2372 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2374 struct hci_cp_switch_role *cp;
2375 struct hci_conn *conn;
2377 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2382 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2388 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2390 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2392 hci_dev_unlock(hdev);
2395 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2397 __u8 status = *((__u8 *) skb->data);
2398 struct discovery_state *discov = &hdev->discovery;
2399 struct inquiry_entry *e;
2401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2403 hci_conn_check_pending(hdev);
2405 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2408 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2409 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2411 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2416 if (discov->state != DISCOVERY_FINDING)
2419 if (list_empty(&discov->resolve)) {
2420 /* When BR/EDR inquiry is active and no LE scanning is in
2421 * progress, then change discovery state to indicate completion.
2423 * When running LE scanning and BR/EDR inquiry simultaneously
2424 * and the LE scan already finished, then change the discovery
2425 * state to indicate completion.
2427 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2428 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2433 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2434 if (e && hci_resolve_name(hdev, e) == 0) {
2435 e->name_state = NAME_PENDING;
2436 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2438 /* When BR/EDR inquiry is active and no LE scanning is in
2439 * progress, then change discovery state to indicate completion.
2441 * When running LE scanning and BR/EDR inquiry simultaneously
2442 * and the LE scan already finished, then change the discovery
2443 * state to indicate completion.
2445 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2446 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2447 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2451 hci_dev_unlock(hdev);
2454 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2456 struct inquiry_data data;
2457 struct inquiry_info *info = (void *) (skb->data + 1);
2458 int num_rsp = *((__u8 *) skb->data);
2460 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2462 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2465 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2470 for (; num_rsp; num_rsp--, info++) {
2473 bacpy(&data.bdaddr, &info->bdaddr);
2474 data.pscan_rep_mode = info->pscan_rep_mode;
2475 data.pscan_period_mode = info->pscan_period_mode;
2476 data.pscan_mode = info->pscan_mode;
2477 memcpy(data.dev_class, info->dev_class, 3);
2478 data.clock_offset = info->clock_offset;
2479 data.rssi = HCI_RSSI_INVALID;
2480 data.ssp_mode = 0x00;
2482 flags = hci_inquiry_cache_update(hdev, &data, false);
2484 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2485 info->dev_class, HCI_RSSI_INVALID,
2486 flags, NULL, 0, NULL, 0);
2489 hci_dev_unlock(hdev);
2492 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2494 struct hci_ev_conn_complete *ev = (void *) skb->data;
2495 struct hci_conn *conn;
2497 BT_DBG("%s", hdev->name);
2501 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2503 if (ev->link_type != SCO_LINK)
2506 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2510 conn->type = SCO_LINK;
2514 conn->handle = __le16_to_cpu(ev->handle);
2516 if (conn->type == ACL_LINK) {
2517 conn->state = BT_CONFIG;
2518 hci_conn_hold(conn);
2520 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2521 !hci_find_link_key(hdev, &ev->bdaddr))
2522 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2524 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2526 conn->state = BT_CONNECTED;
2528 hci_debugfs_create_conn(conn);
2529 hci_conn_add_sysfs(conn);
2531 if (test_bit(HCI_AUTH, &hdev->flags))
2532 set_bit(HCI_CONN_AUTH, &conn->flags);
2534 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2535 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2537 /* Get remote features */
2538 if (conn->type == ACL_LINK) {
2539 struct hci_cp_read_remote_features cp;
2540 cp.handle = ev->handle;
2541 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2544 hci_req_update_scan(hdev);
2547 /* Set packet type for incoming connection */
2548 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2549 struct hci_cp_change_conn_ptype cp;
2550 cp.handle = ev->handle;
2551 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2552 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2556 conn->state = BT_CLOSED;
2557 if (conn->type == ACL_LINK)
2558 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2559 conn->dst_type, ev->status);
2562 if (conn->type == ACL_LINK)
2563 hci_sco_setup(conn, ev->status);
2566 hci_connect_cfm(conn, ev->status);
2568 } else if (ev->link_type != ACL_LINK)
2569 hci_connect_cfm(conn, ev->status);
2572 hci_dev_unlock(hdev);
2574 hci_conn_check_pending(hdev);
2577 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2579 struct hci_cp_reject_conn_req cp;
2581 bacpy(&cp.bdaddr, bdaddr);
2582 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2583 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2586 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 struct hci_ev_conn_request *ev = (void *) skb->data;
2589 int mask = hdev->link_mode;
2590 struct inquiry_entry *ie;
2591 struct hci_conn *conn;
2594 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2597 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2600 if (!(mask & HCI_LM_ACCEPT)) {
2601 hci_reject_conn(hdev, &ev->bdaddr);
2605 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2607 hci_reject_conn(hdev, &ev->bdaddr);
2611 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2612 * connection. These features are only touched through mgmt so
2613 * only do the checks if HCI_MGMT is set.
2615 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2616 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2617 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2619 hci_reject_conn(hdev, &ev->bdaddr);
2623 /* Connection accepted */
2627 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2629 memcpy(ie->data.dev_class, ev->dev_class, 3);
2631 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2634 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2637 bt_dev_err(hdev, "no memory for new connection");
2638 hci_dev_unlock(hdev);
2643 memcpy(conn->dev_class, ev->dev_class, 3);
2645 hci_dev_unlock(hdev);
2647 if (ev->link_type == ACL_LINK ||
2648 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2649 struct hci_cp_accept_conn_req cp;
2650 conn->state = BT_CONNECT;
2652 bacpy(&cp.bdaddr, &ev->bdaddr);
2654 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2655 cp.role = 0x00; /* Become master */
2657 cp.role = 0x01; /* Remain slave */
2659 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2660 } else if (!(flags & HCI_PROTO_DEFER)) {
2661 struct hci_cp_accept_sync_conn_req cp;
2662 conn->state = BT_CONNECT;
2664 bacpy(&cp.bdaddr, &ev->bdaddr);
2665 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2667 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2668 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2669 cp.max_latency = cpu_to_le16(0xffff);
2670 cp.content_format = cpu_to_le16(hdev->voice_setting);
2671 cp.retrans_effort = 0xff;
2673 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2676 conn->state = BT_CONNECT2;
2677 hci_connect_cfm(conn, 0);
2681 static u8 hci_to_mgmt_reason(u8 err)
2684 case HCI_ERROR_CONNECTION_TIMEOUT:
2685 return MGMT_DEV_DISCONN_TIMEOUT;
2686 case HCI_ERROR_REMOTE_USER_TERM:
2687 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2688 case HCI_ERROR_REMOTE_POWER_OFF:
2689 return MGMT_DEV_DISCONN_REMOTE;
2690 case HCI_ERROR_LOCAL_HOST_TERM:
2691 return MGMT_DEV_DISCONN_LOCAL_HOST;
2693 return MGMT_DEV_DISCONN_UNKNOWN;
2697 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2699 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2701 struct hci_conn_params *params;
2702 struct hci_conn *conn;
2703 bool mgmt_connected;
2706 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2710 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2715 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2716 conn->dst_type, ev->status);
2720 conn->state = BT_CLOSED;
2722 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2724 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2725 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2727 reason = hci_to_mgmt_reason(ev->reason);
2729 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2730 reason, mgmt_connected);
2732 if (conn->type == ACL_LINK) {
2733 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2734 hci_remove_link_key(hdev, &conn->dst);
2736 hci_req_update_scan(hdev);
2739 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2741 switch (params->auto_connect) {
2742 case HCI_AUTO_CONN_LINK_LOSS:
2743 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2747 case HCI_AUTO_CONN_DIRECT:
2748 case HCI_AUTO_CONN_ALWAYS:
2749 list_del_init(¶ms->action);
2750 list_add(¶ms->action, &hdev->pend_le_conns);
2751 hci_update_background_scan(hdev);
2761 hci_disconn_cfm(conn, ev->reason);
2764 /* Re-enable advertising if necessary, since it might
2765 * have been disabled by the connection. From the
2766 * HCI_LE_Set_Advertise_Enable command description in
2767 * the core specification (v4.0):
2768 * "The Controller shall continue advertising until the Host
2769 * issues an LE_Set_Advertise_Enable command with
2770 * Advertising_Enable set to 0x00 (Advertising is disabled)
2771 * or until a connection is created or until the Advertising
2772 * is timed out due to Directed Advertising."
2774 if (type == LE_LINK)
2775 hci_req_reenable_advertising(hdev);
2778 hci_dev_unlock(hdev);
2781 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2783 struct hci_ev_auth_complete *ev = (void *) skb->data;
2784 struct hci_conn *conn;
2786 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2790 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2795 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2797 if (!hci_conn_ssp_enabled(conn) &&
2798 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2799 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2801 set_bit(HCI_CONN_AUTH, &conn->flags);
2802 conn->sec_level = conn->pending_sec_level;
2805 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2806 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2808 mgmt_auth_failed(conn, ev->status);
2811 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2812 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2814 if (conn->state == BT_CONFIG) {
2815 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2816 struct hci_cp_set_conn_encrypt cp;
2817 cp.handle = ev->handle;
2819 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2822 conn->state = BT_CONNECTED;
2823 hci_connect_cfm(conn, ev->status);
2824 hci_conn_drop(conn);
2827 hci_auth_cfm(conn, ev->status);
2829 hci_conn_hold(conn);
2830 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2831 hci_conn_drop(conn);
2834 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2836 struct hci_cp_set_conn_encrypt cp;
2837 cp.handle = ev->handle;
2839 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2842 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2843 hci_encrypt_cfm(conn, ev->status);
2848 hci_dev_unlock(hdev);
2851 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2853 struct hci_ev_remote_name *ev = (void *) skb->data;
2854 struct hci_conn *conn;
2856 BT_DBG("%s", hdev->name);
2858 hci_conn_check_pending(hdev);
2862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2864 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2867 if (ev->status == 0)
2868 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2869 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2871 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2877 if (!hci_outgoing_auth_needed(hdev, conn))
2880 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2881 struct hci_cp_auth_requested cp;
2883 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2885 cp.handle = __cpu_to_le16(conn->handle);
2886 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2890 hci_dev_unlock(hdev);
2893 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2894 u16 opcode, struct sk_buff *skb)
2896 const struct hci_rp_read_enc_key_size *rp;
2897 struct hci_conn *conn;
2900 BT_DBG("%s status 0x%02x", hdev->name, status);
2902 if (!skb || skb->len < sizeof(*rp)) {
2903 bt_dev_err(hdev, "invalid read key size response");
2907 rp = (void *)skb->data;
2908 handle = le16_to_cpu(rp->handle);
2912 conn = hci_conn_hash_lookup_handle(hdev, handle);
2916 /* If we fail to read the encryption key size, assume maximum
2917 * (which is the same we do also when this HCI command isn't
2921 bt_dev_err(hdev, "failed to read key size for handle %u",
2923 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2925 conn->enc_key_size = rp->key_size;
2928 hci_encrypt_cfm(conn, 0);
2931 hci_dev_unlock(hdev);
2934 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2936 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2937 struct hci_conn *conn;
2939 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2949 /* Encryption implies authentication */
2950 set_bit(HCI_CONN_AUTH, &conn->flags);
2951 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2952 conn->sec_level = conn->pending_sec_level;
2954 /* P-256 authentication key implies FIPS */
2955 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2956 set_bit(HCI_CONN_FIPS, &conn->flags);
2958 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2959 conn->type == LE_LINK)
2960 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2962 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2963 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2967 /* We should disregard the current RPA and generate a new one
2968 * whenever the encryption procedure fails.
2970 if (ev->status && conn->type == LE_LINK) {
2971 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2972 hci_adv_instances_set_rpa_expired(hdev, true);
2975 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2977 /* Check link security requirements are met */
2978 if (!hci_conn_check_link_mode(conn))
2979 ev->status = HCI_ERROR_AUTH_FAILURE;
2981 if (ev->status && conn->state == BT_CONNECTED) {
2982 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2983 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2985 /* Notify upper layers so they can cleanup before
2988 hci_encrypt_cfm(conn, ev->status);
2989 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2990 hci_conn_drop(conn);
2994 /* Try reading the encryption key size for encrypted ACL links */
2995 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2996 struct hci_cp_read_enc_key_size cp;
2997 struct hci_request req;
2999 /* Only send HCI_Read_Encryption_Key_Size if the
3000 * controller really supports it. If it doesn't, assume
3001 * the default size (16).
3003 if (!(hdev->commands[20] & 0x10)) {
3004 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3008 hci_req_init(&req, hdev);
3010 cp.handle = cpu_to_le16(conn->handle);
3011 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3013 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3014 bt_dev_err(hdev, "sending read key size failed");
3015 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3022 /* Set the default Authenticated Payload Timeout after
3023 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3024 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3025 * sent when the link is active and Encryption is enabled, the conn
3026 * type can be either LE or ACL and controller must support LMP Ping.
3027 * Ensure for AES-CCM encryption as well.
3029 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3030 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3031 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3032 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3033 struct hci_cp_write_auth_payload_to cp;
3035 cp.handle = cpu_to_le16(conn->handle);
3036 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3037 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3042 hci_encrypt_cfm(conn, ev->status);
3045 hci_dev_unlock(hdev);
3048 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3049 struct sk_buff *skb)
3051 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3052 struct hci_conn *conn;
3054 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3058 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3061 set_bit(HCI_CONN_SECURE, &conn->flags);
3063 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3065 hci_key_change_cfm(conn, ev->status);
3068 hci_dev_unlock(hdev);
3071 static void hci_remote_features_evt(struct hci_dev *hdev,
3072 struct sk_buff *skb)
3074 struct hci_ev_remote_features *ev = (void *) skb->data;
3075 struct hci_conn *conn;
3077 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3081 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3086 memcpy(conn->features[0], ev->features, 8);
3088 if (conn->state != BT_CONFIG)
3091 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3092 lmp_ext_feat_capable(conn)) {
3093 struct hci_cp_read_remote_ext_features cp;
3094 cp.handle = ev->handle;
3096 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3101 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3102 struct hci_cp_remote_name_req cp;
3103 memset(&cp, 0, sizeof(cp));
3104 bacpy(&cp.bdaddr, &conn->dst);
3105 cp.pscan_rep_mode = 0x02;
3106 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3107 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3108 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3110 if (!hci_outgoing_auth_needed(hdev, conn)) {
3111 conn->state = BT_CONNECTED;
3112 hci_connect_cfm(conn, ev->status);
3113 hci_conn_drop(conn);
3117 hci_dev_unlock(hdev);
3120 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3121 u16 *opcode, u8 *status,
3122 hci_req_complete_t *req_complete,
3123 hci_req_complete_skb_t *req_complete_skb)
3125 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3127 *opcode = __le16_to_cpu(ev->opcode);
3128 *status = skb->data[sizeof(*ev)];
3130 skb_pull(skb, sizeof(*ev));
3133 case HCI_OP_INQUIRY_CANCEL:
3134 hci_cc_inquiry_cancel(hdev, skb, status);
3137 case HCI_OP_PERIODIC_INQ:
3138 hci_cc_periodic_inq(hdev, skb);
3141 case HCI_OP_EXIT_PERIODIC_INQ:
3142 hci_cc_exit_periodic_inq(hdev, skb);
3145 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3146 hci_cc_remote_name_req_cancel(hdev, skb);
3149 case HCI_OP_ROLE_DISCOVERY:
3150 hci_cc_role_discovery(hdev, skb);
3153 case HCI_OP_READ_LINK_POLICY:
3154 hci_cc_read_link_policy(hdev, skb);
3157 case HCI_OP_WRITE_LINK_POLICY:
3158 hci_cc_write_link_policy(hdev, skb);
3161 case HCI_OP_READ_DEF_LINK_POLICY:
3162 hci_cc_read_def_link_policy(hdev, skb);
3165 case HCI_OP_WRITE_DEF_LINK_POLICY:
3166 hci_cc_write_def_link_policy(hdev, skb);
3170 hci_cc_reset(hdev, skb);
3173 case HCI_OP_READ_STORED_LINK_KEY:
3174 hci_cc_read_stored_link_key(hdev, skb);
3177 case HCI_OP_DELETE_STORED_LINK_KEY:
3178 hci_cc_delete_stored_link_key(hdev, skb);
3181 case HCI_OP_WRITE_LOCAL_NAME:
3182 hci_cc_write_local_name(hdev, skb);
3185 case HCI_OP_READ_LOCAL_NAME:
3186 hci_cc_read_local_name(hdev, skb);
3189 case HCI_OP_WRITE_AUTH_ENABLE:
3190 hci_cc_write_auth_enable(hdev, skb);
3193 case HCI_OP_WRITE_ENCRYPT_MODE:
3194 hci_cc_write_encrypt_mode(hdev, skb);
3197 case HCI_OP_WRITE_SCAN_ENABLE:
3198 hci_cc_write_scan_enable(hdev, skb);
3201 case HCI_OP_READ_CLASS_OF_DEV:
3202 hci_cc_read_class_of_dev(hdev, skb);
3205 case HCI_OP_WRITE_CLASS_OF_DEV:
3206 hci_cc_write_class_of_dev(hdev, skb);
3209 case HCI_OP_READ_VOICE_SETTING:
3210 hci_cc_read_voice_setting(hdev, skb);
3213 case HCI_OP_WRITE_VOICE_SETTING:
3214 hci_cc_write_voice_setting(hdev, skb);
3217 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3218 hci_cc_read_num_supported_iac(hdev, skb);
3221 case HCI_OP_WRITE_SSP_MODE:
3222 hci_cc_write_ssp_mode(hdev, skb);
3225 case HCI_OP_WRITE_SC_SUPPORT:
3226 hci_cc_write_sc_support(hdev, skb);
3229 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3230 hci_cc_read_auth_payload_timeout(hdev, skb);
3233 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3234 hci_cc_write_auth_payload_timeout(hdev, skb);
3237 case HCI_OP_READ_LOCAL_VERSION:
3238 hci_cc_read_local_version(hdev, skb);
3241 case HCI_OP_READ_LOCAL_COMMANDS:
3242 hci_cc_read_local_commands(hdev, skb);
3245 case HCI_OP_READ_LOCAL_FEATURES:
3246 hci_cc_read_local_features(hdev, skb);
3249 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3250 hci_cc_read_local_ext_features(hdev, skb);
3253 case HCI_OP_READ_BUFFER_SIZE:
3254 hci_cc_read_buffer_size(hdev, skb);
3257 case HCI_OP_READ_BD_ADDR:
3258 hci_cc_read_bd_addr(hdev, skb);
3261 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3262 hci_cc_read_page_scan_activity(hdev, skb);
3265 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3266 hci_cc_write_page_scan_activity(hdev, skb);
3269 case HCI_OP_READ_PAGE_SCAN_TYPE:
3270 hci_cc_read_page_scan_type(hdev, skb);
3273 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3274 hci_cc_write_page_scan_type(hdev, skb);
3277 case HCI_OP_READ_DATA_BLOCK_SIZE:
3278 hci_cc_read_data_block_size(hdev, skb);
3281 case HCI_OP_READ_FLOW_CONTROL_MODE:
3282 hci_cc_read_flow_control_mode(hdev, skb);
3285 case HCI_OP_READ_LOCAL_AMP_INFO:
3286 hci_cc_read_local_amp_info(hdev, skb);
3289 case HCI_OP_READ_CLOCK:
3290 hci_cc_read_clock(hdev, skb);
3293 case HCI_OP_READ_INQ_RSP_TX_POWER:
3294 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3297 case HCI_OP_PIN_CODE_REPLY:
3298 hci_cc_pin_code_reply(hdev, skb);
3301 case HCI_OP_PIN_CODE_NEG_REPLY:
3302 hci_cc_pin_code_neg_reply(hdev, skb);
3305 case HCI_OP_READ_LOCAL_OOB_DATA:
3306 hci_cc_read_local_oob_data(hdev, skb);
3309 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3310 hci_cc_read_local_oob_ext_data(hdev, skb);
3313 case HCI_OP_LE_READ_BUFFER_SIZE:
3314 hci_cc_le_read_buffer_size(hdev, skb);
3317 case HCI_OP_LE_READ_LOCAL_FEATURES:
3318 hci_cc_le_read_local_features(hdev, skb);
3321 case HCI_OP_LE_READ_ADV_TX_POWER:
3322 hci_cc_le_read_adv_tx_power(hdev, skb);
3325 case HCI_OP_USER_CONFIRM_REPLY:
3326 hci_cc_user_confirm_reply(hdev, skb);
3329 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3330 hci_cc_user_confirm_neg_reply(hdev, skb);
3333 case HCI_OP_USER_PASSKEY_REPLY:
3334 hci_cc_user_passkey_reply(hdev, skb);
3337 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3338 hci_cc_user_passkey_neg_reply(hdev, skb);
3341 case HCI_OP_LE_SET_RANDOM_ADDR:
3342 hci_cc_le_set_random_addr(hdev, skb);
3345 case HCI_OP_LE_SET_ADV_ENABLE:
3346 hci_cc_le_set_adv_enable(hdev, skb);
3349 case HCI_OP_LE_SET_SCAN_PARAM:
3350 hci_cc_le_set_scan_param(hdev, skb);
3353 case HCI_OP_LE_SET_SCAN_ENABLE:
3354 hci_cc_le_set_scan_enable(hdev, skb);
3357 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3358 hci_cc_le_read_white_list_size(hdev, skb);
3361 case HCI_OP_LE_CLEAR_WHITE_LIST:
3362 hci_cc_le_clear_white_list(hdev, skb);
3365 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3366 hci_cc_le_add_to_white_list(hdev, skb);
3369 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3370 hci_cc_le_del_from_white_list(hdev, skb);
3373 case HCI_OP_LE_READ_SUPPORTED_STATES:
3374 hci_cc_le_read_supported_states(hdev, skb);
3377 case HCI_OP_LE_READ_DEF_DATA_LEN:
3378 hci_cc_le_read_def_data_len(hdev, skb);
3381 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3382 hci_cc_le_write_def_data_len(hdev, skb);
3385 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3386 hci_cc_le_add_to_resolv_list(hdev, skb);
3389 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3390 hci_cc_le_del_from_resolv_list(hdev, skb);
3393 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3394 hci_cc_le_clear_resolv_list(hdev, skb);
3397 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3398 hci_cc_le_read_resolv_list_size(hdev, skb);
3401 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3402 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3405 case HCI_OP_LE_READ_MAX_DATA_LEN:
3406 hci_cc_le_read_max_data_len(hdev, skb);
3409 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3410 hci_cc_write_le_host_supported(hdev, skb);
3413 case HCI_OP_LE_SET_ADV_PARAM:
3414 hci_cc_set_adv_param(hdev, skb);
3417 case HCI_OP_READ_RSSI:
3418 hci_cc_read_rssi(hdev, skb);
3421 case HCI_OP_READ_TX_POWER:
3422 hci_cc_read_tx_power(hdev, skb);
3425 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3426 hci_cc_write_ssp_debug_mode(hdev, skb);
3429 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3430 hci_cc_le_set_ext_scan_param(hdev, skb);
3433 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3434 hci_cc_le_set_ext_scan_enable(hdev, skb);
3437 case HCI_OP_LE_SET_DEFAULT_PHY:
3438 hci_cc_le_set_default_phy(hdev, skb);
3441 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3442 hci_cc_le_read_num_adv_sets(hdev, skb);
3445 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3446 hci_cc_set_ext_adv_param(hdev, skb);
3449 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3450 hci_cc_le_set_ext_adv_enable(hdev, skb);
3453 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3454 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3458 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3462 if (*opcode != HCI_OP_NOP)
3463 cancel_delayed_work(&hdev->cmd_timer);
3465 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3466 atomic_set(&hdev->cmd_cnt, 1);
3468 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3471 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3473 "unexpected event for opcode 0x%4.4x", *opcode);
3477 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3478 queue_work(hdev->workqueue, &hdev->cmd_work);
3481 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3482 u16 *opcode, u8 *status,
3483 hci_req_complete_t *req_complete,
3484 hci_req_complete_skb_t *req_complete_skb)
3486 struct hci_ev_cmd_status *ev = (void *) skb->data;
3488 skb_pull(skb, sizeof(*ev));
3490 *opcode = __le16_to_cpu(ev->opcode);
3491 *status = ev->status;
3494 case HCI_OP_INQUIRY:
3495 hci_cs_inquiry(hdev, ev->status);
3498 case HCI_OP_CREATE_CONN:
3499 hci_cs_create_conn(hdev, ev->status);
3502 case HCI_OP_DISCONNECT:
3503 hci_cs_disconnect(hdev, ev->status);
3506 case HCI_OP_ADD_SCO:
3507 hci_cs_add_sco(hdev, ev->status);
3510 case HCI_OP_AUTH_REQUESTED:
3511 hci_cs_auth_requested(hdev, ev->status);
3514 case HCI_OP_SET_CONN_ENCRYPT:
3515 hci_cs_set_conn_encrypt(hdev, ev->status);
3518 case HCI_OP_REMOTE_NAME_REQ:
3519 hci_cs_remote_name_req(hdev, ev->status);
3522 case HCI_OP_READ_REMOTE_FEATURES:
3523 hci_cs_read_remote_features(hdev, ev->status);
3526 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3527 hci_cs_read_remote_ext_features(hdev, ev->status);
3530 case HCI_OP_SETUP_SYNC_CONN:
3531 hci_cs_setup_sync_conn(hdev, ev->status);
3534 case HCI_OP_SNIFF_MODE:
3535 hci_cs_sniff_mode(hdev, ev->status);
3538 case HCI_OP_EXIT_SNIFF_MODE:
3539 hci_cs_exit_sniff_mode(hdev, ev->status);
3542 case HCI_OP_SWITCH_ROLE:
3543 hci_cs_switch_role(hdev, ev->status);
3546 case HCI_OP_LE_CREATE_CONN:
3547 hci_cs_le_create_conn(hdev, ev->status);
3550 case HCI_OP_LE_READ_REMOTE_FEATURES:
3551 hci_cs_le_read_remote_features(hdev, ev->status);
3554 case HCI_OP_LE_START_ENC:
3555 hci_cs_le_start_enc(hdev, ev->status);
3558 case HCI_OP_LE_EXT_CREATE_CONN:
3559 hci_cs_le_ext_create_conn(hdev, ev->status);
3563 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3567 if (*opcode != HCI_OP_NOP)
3568 cancel_delayed_work(&hdev->cmd_timer);
3570 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3571 atomic_set(&hdev->cmd_cnt, 1);
3573 /* Indicate request completion if the command failed. Also, if
3574 * we're not waiting for a special event and we get a success
3575 * command status we should try to flag the request as completed
3576 * (since for this kind of commands there will not be a command
3580 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3581 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3584 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3586 "unexpected event for opcode 0x%4.4x", *opcode);
3590 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3591 queue_work(hdev->workqueue, &hdev->cmd_work);
3594 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3596 struct hci_ev_hardware_error *ev = (void *) skb->data;
3598 hdev->hw_error_code = ev->code;
3600 queue_work(hdev->req_workqueue, &hdev->error_reset);
3603 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3605 struct hci_ev_role_change *ev = (void *) skb->data;
3606 struct hci_conn *conn;
3608 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3612 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3615 conn->role = ev->role;
3617 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3619 hci_role_switch_cfm(conn, ev->status, ev->role);
3622 hci_dev_unlock(hdev);
3625 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3627 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3630 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3631 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3635 if (skb->len < sizeof(*ev) ||
3636 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3637 BT_DBG("%s bad parameters", hdev->name);
3641 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3643 for (i = 0; i < ev->num_hndl; i++) {
3644 struct hci_comp_pkts_info *info = &ev->handles[i];
3645 struct hci_conn *conn;
3646 __u16 handle, count;
3648 handle = __le16_to_cpu(info->handle);
3649 count = __le16_to_cpu(info->count);
3651 conn = hci_conn_hash_lookup_handle(hdev, handle);
3655 conn->sent -= count;
3657 switch (conn->type) {
3659 hdev->acl_cnt += count;
3660 if (hdev->acl_cnt > hdev->acl_pkts)
3661 hdev->acl_cnt = hdev->acl_pkts;
3665 if (hdev->le_pkts) {
3666 hdev->le_cnt += count;
3667 if (hdev->le_cnt > hdev->le_pkts)
3668 hdev->le_cnt = hdev->le_pkts;
3670 hdev->acl_cnt += count;
3671 if (hdev->acl_cnt > hdev->acl_pkts)
3672 hdev->acl_cnt = hdev->acl_pkts;
3677 hdev->sco_cnt += count;
3678 if (hdev->sco_cnt > hdev->sco_pkts)
3679 hdev->sco_cnt = hdev->sco_pkts;
3683 bt_dev_err(hdev, "unknown type %d conn %p",
3689 queue_work(hdev->workqueue, &hdev->tx_work);
3692 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3695 struct hci_chan *chan;
3697 switch (hdev->dev_type) {
3699 return hci_conn_hash_lookup_handle(hdev, handle);
3701 chan = hci_chan_lookup_handle(hdev, handle);
3706 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3713 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3715 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3718 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3719 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3723 if (skb->len < sizeof(*ev) ||
3724 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3725 BT_DBG("%s bad parameters", hdev->name);
3729 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3732 for (i = 0; i < ev->num_hndl; i++) {
3733 struct hci_comp_blocks_info *info = &ev->handles[i];
3734 struct hci_conn *conn = NULL;
3735 __u16 handle, block_count;
3737 handle = __le16_to_cpu(info->handle);
3738 block_count = __le16_to_cpu(info->blocks);
3740 conn = __hci_conn_lookup_handle(hdev, handle);
3744 conn->sent -= block_count;
3746 switch (conn->type) {
3749 hdev->block_cnt += block_count;
3750 if (hdev->block_cnt > hdev->num_blocks)
3751 hdev->block_cnt = hdev->num_blocks;
3755 bt_dev_err(hdev, "unknown type %d conn %p",
3761 queue_work(hdev->workqueue, &hdev->tx_work);
3764 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3766 struct hci_ev_mode_change *ev = (void *) skb->data;
3767 struct hci_conn *conn;
3769 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3773 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3775 conn->mode = ev->mode;
3777 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3779 if (conn->mode == HCI_CM_ACTIVE)
3780 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3782 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3785 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3786 hci_sco_setup(conn, ev->status);
3789 hci_dev_unlock(hdev);
3792 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3794 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3795 struct hci_conn *conn;
3797 BT_DBG("%s", hdev->name);
3801 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3805 if (conn->state == BT_CONNECTED) {
3806 hci_conn_hold(conn);
3807 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3808 hci_conn_drop(conn);
3811 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3812 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3813 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3814 sizeof(ev->bdaddr), &ev->bdaddr);
3815 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3818 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3823 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3827 hci_dev_unlock(hdev);
3830 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3832 if (key_type == HCI_LK_CHANGED_COMBINATION)
3835 conn->pin_length = pin_len;
3836 conn->key_type = key_type;
3839 case HCI_LK_LOCAL_UNIT:
3840 case HCI_LK_REMOTE_UNIT:
3841 case HCI_LK_DEBUG_COMBINATION:
3843 case HCI_LK_COMBINATION:
3845 conn->pending_sec_level = BT_SECURITY_HIGH;
3847 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3849 case HCI_LK_UNAUTH_COMBINATION_P192:
3850 case HCI_LK_UNAUTH_COMBINATION_P256:
3851 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3853 case HCI_LK_AUTH_COMBINATION_P192:
3854 conn->pending_sec_level = BT_SECURITY_HIGH;
3856 case HCI_LK_AUTH_COMBINATION_P256:
3857 conn->pending_sec_level = BT_SECURITY_FIPS;
3862 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3864 struct hci_ev_link_key_req *ev = (void *) skb->data;
3865 struct hci_cp_link_key_reply cp;
3866 struct hci_conn *conn;
3867 struct link_key *key;
3869 BT_DBG("%s", hdev->name);
3871 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3876 key = hci_find_link_key(hdev, &ev->bdaddr);
3878 BT_DBG("%s link key not found for %pMR", hdev->name,
3883 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3888 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3890 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3891 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3892 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3893 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3897 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3898 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3899 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3900 BT_DBG("%s ignoring key unauthenticated for high security",
3905 conn_set_key(conn, key->type, key->pin_len);
3908 bacpy(&cp.bdaddr, &ev->bdaddr);
3909 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3911 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3913 hci_dev_unlock(hdev);
3918 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3919 hci_dev_unlock(hdev);
3922 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3924 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3925 struct hci_conn *conn;
3926 struct link_key *key;
3930 BT_DBG("%s", hdev->name);
3934 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3938 hci_conn_hold(conn);
3939 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3940 hci_conn_drop(conn);
3942 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3943 conn_set_key(conn, ev->key_type, conn->pin_length);
3945 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3948 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3949 ev->key_type, pin_len, &persistent);
3953 /* Update connection information since adding the key will have
3954 * fixed up the type in the case of changed combination keys.
3956 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3957 conn_set_key(conn, key->type, key->pin_len);
3959 mgmt_new_link_key(hdev, key, persistent);
3961 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3962 * is set. If it's not set simply remove the key from the kernel
3963 * list (we've still notified user space about it but with
3964 * store_hint being 0).
3966 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3967 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3968 list_del_rcu(&key->list);
3969 kfree_rcu(key, rcu);
3974 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3976 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3979 hci_dev_unlock(hdev);
3982 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3984 struct hci_ev_clock_offset *ev = (void *) skb->data;
3985 struct hci_conn *conn;
3987 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3991 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3992 if (conn && !ev->status) {
3993 struct inquiry_entry *ie;
3995 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3997 ie->data.clock_offset = ev->clock_offset;
3998 ie->timestamp = jiffies;
4002 hci_dev_unlock(hdev);
4005 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4007 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4008 struct hci_conn *conn;
4010 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4014 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4015 if (conn && !ev->status)
4016 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4018 hci_dev_unlock(hdev);
4021 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4023 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4024 struct inquiry_entry *ie;
4026 BT_DBG("%s", hdev->name);
4030 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4032 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4033 ie->timestamp = jiffies;
4036 hci_dev_unlock(hdev);
4039 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4040 struct sk_buff *skb)
4042 struct inquiry_data data;
4043 int num_rsp = *((__u8 *) skb->data);
4045 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4050 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4055 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4056 struct inquiry_info_with_rssi_and_pscan_mode *info;
4057 info = (void *) (skb->data + 1);
4059 if (skb->len < num_rsp * sizeof(*info) + 1)
4062 for (; num_rsp; num_rsp--, info++) {
4065 bacpy(&data.bdaddr, &info->bdaddr);
4066 data.pscan_rep_mode = info->pscan_rep_mode;
4067 data.pscan_period_mode = info->pscan_period_mode;
4068 data.pscan_mode = info->pscan_mode;
4069 memcpy(data.dev_class, info->dev_class, 3);
4070 data.clock_offset = info->clock_offset;
4071 data.rssi = info->rssi;
4072 data.ssp_mode = 0x00;
4074 flags = hci_inquiry_cache_update(hdev, &data, false);
4076 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4077 info->dev_class, info->rssi,
4078 flags, NULL, 0, NULL, 0);
4081 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4083 if (skb->len < num_rsp * sizeof(*info) + 1)
4086 for (; num_rsp; num_rsp--, info++) {
4089 bacpy(&data.bdaddr, &info->bdaddr);
4090 data.pscan_rep_mode = info->pscan_rep_mode;
4091 data.pscan_period_mode = info->pscan_period_mode;
4092 data.pscan_mode = 0x00;
4093 memcpy(data.dev_class, info->dev_class, 3);
4094 data.clock_offset = info->clock_offset;
4095 data.rssi = info->rssi;
4096 data.ssp_mode = 0x00;
4098 flags = hci_inquiry_cache_update(hdev, &data, false);
4100 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4101 info->dev_class, info->rssi,
4102 flags, NULL, 0, NULL, 0);
4107 hci_dev_unlock(hdev);
4110 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4111 struct sk_buff *skb)
4113 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4114 struct hci_conn *conn;
4116 BT_DBG("%s", hdev->name);
4120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4124 if (ev->page < HCI_MAX_PAGES)
4125 memcpy(conn->features[ev->page], ev->features, 8);
4127 if (!ev->status && ev->page == 0x01) {
4128 struct inquiry_entry *ie;
4130 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4132 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4134 if (ev->features[0] & LMP_HOST_SSP) {
4135 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4137 /* It is mandatory by the Bluetooth specification that
4138 * Extended Inquiry Results are only used when Secure
4139 * Simple Pairing is enabled, but some devices violate
4142 * To make these devices work, the internal SSP
4143 * enabled flag needs to be cleared if the remote host
4144 * features do not indicate SSP support */
4145 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4148 if (ev->features[0] & LMP_HOST_SC)
4149 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4152 if (conn->state != BT_CONFIG)
4155 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4156 struct hci_cp_remote_name_req cp;
4157 memset(&cp, 0, sizeof(cp));
4158 bacpy(&cp.bdaddr, &conn->dst);
4159 cp.pscan_rep_mode = 0x02;
4160 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4161 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4162 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4164 if (!hci_outgoing_auth_needed(hdev, conn)) {
4165 conn->state = BT_CONNECTED;
4166 hci_connect_cfm(conn, ev->status);
4167 hci_conn_drop(conn);
4171 hci_dev_unlock(hdev);
4174 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4175 struct sk_buff *skb)
4177 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4178 struct hci_conn *conn;
4180 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4184 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4186 if (ev->link_type == ESCO_LINK)
4189 /* When the link type in the event indicates SCO connection
4190 * and lookup of the connection object fails, then check
4191 * if an eSCO connection object exists.
4193 * The core limits the synchronous connections to either
4194 * SCO or eSCO. The eSCO connection is preferred and tried
4195 * to be setup first and until successfully established,
4196 * the link type will be hinted as eSCO.
4198 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4203 switch (ev->status) {
4205 /* The synchronous connection complete event should only be
4206 * sent once per new connection. Receiving a successful
4207 * complete event when the connection status is already
4208 * BT_CONNECTED means that the device is misbehaving and sent
4209 * multiple complete event packets for the same new connection.
4211 * Registering the device more than once can corrupt kernel
4212 * memory, hence upon detecting this invalid event, we report
4213 * an error and ignore the packet.
4215 if (conn->state == BT_CONNECTED) {
4216 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4220 conn->handle = __le16_to_cpu(ev->handle);
4221 conn->state = BT_CONNECTED;
4222 conn->type = ev->link_type;
4224 hci_debugfs_create_conn(conn);
4225 hci_conn_add_sysfs(conn);
4228 case 0x10: /* Connection Accept Timeout */
4229 case 0x0d: /* Connection Rejected due to Limited Resources */
4230 case 0x11: /* Unsupported Feature or Parameter Value */
4231 case 0x1c: /* SCO interval rejected */
4232 case 0x1a: /* Unsupported Remote Feature */
4233 case 0x1e: /* Invalid LMP Parameters */
4234 case 0x1f: /* Unspecified error */
4235 case 0x20: /* Unsupported LMP Parameter value */
4237 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4238 (hdev->esco_type & EDR_ESCO_MASK);
4239 if (hci_setup_sync(conn, conn->link->handle))
4245 conn->state = BT_CLOSED;
4249 hci_connect_cfm(conn, ev->status);
4254 hci_dev_unlock(hdev);
4257 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4261 while (parsed < eir_len) {
4262 u8 field_len = eir[0];
4267 parsed += field_len + 1;
4268 eir += field_len + 1;
4274 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4275 struct sk_buff *skb)
4277 struct inquiry_data data;
4278 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4279 int num_rsp = *((__u8 *) skb->data);
4282 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4284 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4287 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4292 for (; num_rsp; num_rsp--, info++) {
4296 bacpy(&data.bdaddr, &info->bdaddr);
4297 data.pscan_rep_mode = info->pscan_rep_mode;
4298 data.pscan_period_mode = info->pscan_period_mode;
4299 data.pscan_mode = 0x00;
4300 memcpy(data.dev_class, info->dev_class, 3);
4301 data.clock_offset = info->clock_offset;
4302 data.rssi = info->rssi;
4303 data.ssp_mode = 0x01;
4305 if (hci_dev_test_flag(hdev, HCI_MGMT))
4306 name_known = eir_get_data(info->data,
4308 EIR_NAME_COMPLETE, NULL);
4312 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4314 eir_len = eir_get_length(info->data, sizeof(info->data));
4316 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4317 info->dev_class, info->rssi,
4318 flags, info->data, eir_len, NULL, 0);
4321 hci_dev_unlock(hdev);
4324 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4325 struct sk_buff *skb)
4327 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4328 struct hci_conn *conn;
4330 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4331 __le16_to_cpu(ev->handle));
4335 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4339 /* For BR/EDR the necessary steps are taken through the
4340 * auth_complete event.
4342 if (conn->type != LE_LINK)
4346 conn->sec_level = conn->pending_sec_level;
4348 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4350 if (ev->status && conn->state == BT_CONNECTED) {
4351 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4352 hci_conn_drop(conn);
4356 if (conn->state == BT_CONFIG) {
4358 conn->state = BT_CONNECTED;
4360 hci_connect_cfm(conn, ev->status);
4361 hci_conn_drop(conn);
4363 hci_auth_cfm(conn, ev->status);
4365 hci_conn_hold(conn);
4366 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4367 hci_conn_drop(conn);
4371 hci_dev_unlock(hdev);
4374 static u8 hci_get_auth_req(struct hci_conn *conn)
4376 /* If remote requests no-bonding follow that lead */
4377 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4378 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4379 return conn->remote_auth | (conn->auth_type & 0x01);
4381 /* If both remote and local have enough IO capabilities, require
4384 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4385 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4386 return conn->remote_auth | 0x01;
4388 /* No MITM protection possible so ignore remote requirement */
4389 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4392 static u8 bredr_oob_data_present(struct hci_conn *conn)
4394 struct hci_dev *hdev = conn->hdev;
4395 struct oob_data *data;
4397 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4401 if (bredr_sc_enabled(hdev)) {
4402 /* When Secure Connections is enabled, then just
4403 * return the present value stored with the OOB
4404 * data. The stored value contains the right present
4405 * information. However it can only be trusted when
4406 * not in Secure Connection Only mode.
4408 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4409 return data->present;
4411 /* When Secure Connections Only mode is enabled, then
4412 * the P-256 values are required. If they are not
4413 * available, then do not declare that OOB data is
4416 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4417 !memcmp(data->hash256, ZERO_KEY, 16))
4423 /* When Secure Connections is not enabled or actually
4424 * not supported by the hardware, then check that if
4425 * P-192 data values are present.
4427 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4428 !memcmp(data->hash192, ZERO_KEY, 16))
4434 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4436 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4437 struct hci_conn *conn;
4439 BT_DBG("%s", hdev->name);
4443 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4447 hci_conn_hold(conn);
4449 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4452 /* Allow pairing if we're pairable, the initiators of the
4453 * pairing or if the remote is not requesting bonding.
4455 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4456 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4457 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4458 struct hci_cp_io_capability_reply cp;
4460 bacpy(&cp.bdaddr, &ev->bdaddr);
4461 /* Change the IO capability from KeyboardDisplay
4462 * to DisplayYesNo as it is not supported by BT spec. */
4463 cp.capability = (conn->io_capability == 0x04) ?
4464 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4466 /* If we are initiators, there is no remote information yet */
4467 if (conn->remote_auth == 0xff) {
4468 /* Request MITM protection if our IO caps allow it
4469 * except for the no-bonding case.
4471 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4472 conn->auth_type != HCI_AT_NO_BONDING)
4473 conn->auth_type |= 0x01;
4475 conn->auth_type = hci_get_auth_req(conn);
4478 /* If we're not bondable, force one of the non-bondable
4479 * authentication requirement values.
4481 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4482 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4484 cp.authentication = conn->auth_type;
4485 cp.oob_data = bredr_oob_data_present(conn);
4487 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4490 struct hci_cp_io_capability_neg_reply cp;
4492 bacpy(&cp.bdaddr, &ev->bdaddr);
4493 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4495 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4500 hci_dev_unlock(hdev);
4503 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4505 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4506 struct hci_conn *conn;
4508 BT_DBG("%s", hdev->name);
4512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4516 conn->remote_cap = ev->capability;
4517 conn->remote_auth = ev->authentication;
4520 hci_dev_unlock(hdev);
4523 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4524 struct sk_buff *skb)
4526 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4527 int loc_mitm, rem_mitm, confirm_hint = 0;
4528 struct hci_conn *conn;
4530 BT_DBG("%s", hdev->name);
4534 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4541 loc_mitm = (conn->auth_type & 0x01);
4542 rem_mitm = (conn->remote_auth & 0x01);
4544 /* If we require MITM but the remote device can't provide that
4545 * (it has NoInputNoOutput) then reject the confirmation
4546 * request. We check the security level here since it doesn't
4547 * necessarily match conn->auth_type.
4549 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4550 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4551 BT_DBG("Rejecting request: remote device can't provide MITM");
4552 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4553 sizeof(ev->bdaddr), &ev->bdaddr);
4557 /* If no side requires MITM protection; auto-accept */
4558 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4559 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4561 /* If we're not the initiators request authorization to
4562 * proceed from user space (mgmt_user_confirm with
4563 * confirm_hint set to 1). The exception is if neither
4564 * side had MITM or if the local IO capability is
4565 * NoInputNoOutput, in which case we do auto-accept
4567 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4568 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4569 (loc_mitm || rem_mitm)) {
4570 BT_DBG("Confirming auto-accept as acceptor");
4575 BT_DBG("Auto-accept of user confirmation with %ums delay",
4576 hdev->auto_accept_delay);
4578 if (hdev->auto_accept_delay > 0) {
4579 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4580 queue_delayed_work(conn->hdev->workqueue,
4581 &conn->auto_accept_work, delay);
4585 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4586 sizeof(ev->bdaddr), &ev->bdaddr);
4591 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4592 le32_to_cpu(ev->passkey), confirm_hint);
4595 hci_dev_unlock(hdev);
4598 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4599 struct sk_buff *skb)
4601 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4603 BT_DBG("%s", hdev->name);
4605 if (hci_dev_test_flag(hdev, HCI_MGMT))
4606 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4609 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4610 struct sk_buff *skb)
4612 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4613 struct hci_conn *conn;
4615 BT_DBG("%s", hdev->name);
4617 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4621 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4622 conn->passkey_entered = 0;
4624 if (hci_dev_test_flag(hdev, HCI_MGMT))
4625 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4626 conn->dst_type, conn->passkey_notify,
4627 conn->passkey_entered);
4630 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4632 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4633 struct hci_conn *conn;
4635 BT_DBG("%s", hdev->name);
4637 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4642 case HCI_KEYPRESS_STARTED:
4643 conn->passkey_entered = 0;
4646 case HCI_KEYPRESS_ENTERED:
4647 conn->passkey_entered++;
4650 case HCI_KEYPRESS_ERASED:
4651 conn->passkey_entered--;
4654 case HCI_KEYPRESS_CLEARED:
4655 conn->passkey_entered = 0;
4658 case HCI_KEYPRESS_COMPLETED:
4662 if (hci_dev_test_flag(hdev, HCI_MGMT))
4663 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4664 conn->dst_type, conn->passkey_notify,
4665 conn->passkey_entered);
4668 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4669 struct sk_buff *skb)
4671 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4672 struct hci_conn *conn;
4674 BT_DBG("%s", hdev->name);
4678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4682 /* Reset the authentication requirement to unknown */
4683 conn->remote_auth = 0xff;
4685 /* To avoid duplicate auth_failed events to user space we check
4686 * the HCI_CONN_AUTH_PEND flag which will be set if we
4687 * initiated the authentication. A traditional auth_complete
4688 * event gets always produced as initiator and is also mapped to
4689 * the mgmt_auth_failed event */
4690 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4691 mgmt_auth_failed(conn, ev->status);
4693 hci_conn_drop(conn);
4696 hci_dev_unlock(hdev);
4699 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4700 struct sk_buff *skb)
4702 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4703 struct inquiry_entry *ie;
4704 struct hci_conn *conn;
4706 BT_DBG("%s", hdev->name);
4710 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4712 memcpy(conn->features[1], ev->features, 8);
4714 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4716 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4718 hci_dev_unlock(hdev);
4721 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4722 struct sk_buff *skb)
4724 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4725 struct oob_data *data;
4727 BT_DBG("%s", hdev->name);
4731 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4734 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4736 struct hci_cp_remote_oob_data_neg_reply cp;
4738 bacpy(&cp.bdaddr, &ev->bdaddr);
4739 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4744 if (bredr_sc_enabled(hdev)) {
4745 struct hci_cp_remote_oob_ext_data_reply cp;
4747 bacpy(&cp.bdaddr, &ev->bdaddr);
4748 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4749 memset(cp.hash192, 0, sizeof(cp.hash192));
4750 memset(cp.rand192, 0, sizeof(cp.rand192));
4752 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4753 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4755 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4756 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4758 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4761 struct hci_cp_remote_oob_data_reply cp;
4763 bacpy(&cp.bdaddr, &ev->bdaddr);
4764 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4765 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4767 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4772 hci_dev_unlock(hdev);
4775 #if IS_ENABLED(CONFIG_BT_HS)
4776 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4778 struct hci_ev_channel_selected *ev = (void *)skb->data;
4779 struct hci_conn *hcon;
4781 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4783 skb_pull(skb, sizeof(*ev));
4785 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4789 amp_read_loc_assoc_final_data(hdev, hcon);
4792 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4793 struct sk_buff *skb)
4795 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4796 struct hci_conn *hcon, *bredr_hcon;
4798 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4803 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4805 hci_dev_unlock(hdev);
4809 if (!hcon->amp_mgr) {
4810 hci_dev_unlock(hdev);
4816 hci_dev_unlock(hdev);
4820 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4822 hcon->state = BT_CONNECTED;
4823 bacpy(&hcon->dst, &bredr_hcon->dst);
4825 hci_conn_hold(hcon);
4826 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4827 hci_conn_drop(hcon);
4829 hci_debugfs_create_conn(hcon);
4830 hci_conn_add_sysfs(hcon);
4832 amp_physical_cfm(bredr_hcon, hcon);
4834 hci_dev_unlock(hdev);
4837 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4839 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4840 struct hci_conn *hcon;
4841 struct hci_chan *hchan;
4842 struct amp_mgr *mgr;
4844 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4845 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4848 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4852 /* Create AMP hchan */
4853 hchan = hci_chan_create(hcon);
4857 hchan->handle = le16_to_cpu(ev->handle);
4860 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4862 mgr = hcon->amp_mgr;
4863 if (mgr && mgr->bredr_chan) {
4864 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4866 l2cap_chan_lock(bredr_chan);
4868 bredr_chan->conn->mtu = hdev->block_mtu;
4869 l2cap_logical_cfm(bredr_chan, hchan, 0);
4870 hci_conn_hold(hcon);
4872 l2cap_chan_unlock(bredr_chan);
4876 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4877 struct sk_buff *skb)
4879 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4880 struct hci_chan *hchan;
4882 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4883 le16_to_cpu(ev->handle), ev->status);
4890 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4891 if (!hchan || !hchan->amp)
4894 amp_destroy_logical_link(hchan, ev->reason);
4897 hci_dev_unlock(hdev);
4900 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4901 struct sk_buff *skb)
4903 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4904 struct hci_conn *hcon;
4906 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4913 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4914 if (hcon && hcon->type == AMP_LINK) {
4915 hcon->state = BT_CLOSED;
4916 hci_disconn_cfm(hcon, ev->reason);
4920 hci_dev_unlock(hdev);
4924 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
4925 u8 bdaddr_type, bdaddr_t *local_rpa)
4928 conn->dst_type = bdaddr_type;
4929 conn->resp_addr_type = bdaddr_type;
4930 bacpy(&conn->resp_addr, bdaddr);
4932 /* Check if the controller has set a Local RPA then it must be
4933 * used instead or hdev->rpa.
4935 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4936 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4937 bacpy(&conn->init_addr, local_rpa);
4938 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
4939 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4940 bacpy(&conn->init_addr, &conn->hdev->rpa);
4942 hci_copy_identity_address(conn->hdev, &conn->init_addr,
4943 &conn->init_addr_type);
4946 conn->resp_addr_type = conn->hdev->adv_addr_type;
4947 /* Check if the controller has set a Local RPA then it must be
4948 * used instead or hdev->rpa.
4950 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4951 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
4952 bacpy(&conn->resp_addr, local_rpa);
4953 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4954 /* In case of ext adv, resp_addr will be updated in
4955 * Adv Terminated event.
4957 if (!ext_adv_capable(conn->hdev))
4958 bacpy(&conn->resp_addr,
4959 &conn->hdev->random_addr);
4961 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
4964 conn->init_addr_type = bdaddr_type;
4965 bacpy(&conn->init_addr, bdaddr);
4967 /* For incoming connections, set the default minimum
4968 * and maximum connection interval. They will be used
4969 * to check if the parameters are in range and if not
4970 * trigger the connection update procedure.
4972 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
4973 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
4977 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4978 bdaddr_t *bdaddr, u8 bdaddr_type,
4979 bdaddr_t *local_rpa, u8 role, u16 handle,
4980 u16 interval, u16 latency,
4981 u16 supervision_timeout)
4983 struct hci_conn_params *params;
4984 struct hci_conn *conn;
4985 struct smp_irk *irk;
4990 /* All controllers implicitly stop advertising in the event of a
4991 * connection, so ensure that the state bit is cleared.
4993 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4995 conn = hci_lookup_le_connect(hdev);
4997 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4999 bt_dev_err(hdev, "no memory for new connection");
5003 conn->dst_type = bdaddr_type;
5005 /* If we didn't have a hci_conn object previously
5006 * but we're in master role this must be something
5007 * initiated using a white list. Since white list based
5008 * connections are not "first class citizens" we don't
5009 * have full tracking of them. Therefore, we go ahead
5010 * with a "best effort" approach of determining the
5011 * initiator address based on the HCI_PRIVACY flag.
5014 conn->resp_addr_type = bdaddr_type;
5015 bacpy(&conn->resp_addr, bdaddr);
5016 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5017 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5018 bacpy(&conn->init_addr, &hdev->rpa);
5020 hci_copy_identity_address(hdev,
5022 &conn->init_addr_type);
5026 cancel_delayed_work(&conn->le_conn_timeout);
5029 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5031 /* Lookup the identity address from the stored connection
5032 * address and address type.
5034 * When establishing connections to an identity address, the
5035 * connection procedure will store the resolvable random
5036 * address first. Now if it can be converted back into the
5037 * identity address, start using the identity address from
5040 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5042 bacpy(&conn->dst, &irk->bdaddr);
5043 conn->dst_type = irk->addr_type;
5047 hci_le_conn_failed(conn, status);
5051 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5052 addr_type = BDADDR_LE_PUBLIC;
5054 addr_type = BDADDR_LE_RANDOM;
5056 /* Drop the connection if the device is blocked */
5057 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5058 hci_conn_drop(conn);
5062 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5063 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5065 conn->sec_level = BT_SECURITY_LOW;
5066 conn->handle = handle;
5067 conn->state = BT_CONFIG;
5069 conn->le_conn_interval = interval;
5070 conn->le_conn_latency = latency;
5071 conn->le_supv_timeout = supervision_timeout;
5073 hci_debugfs_create_conn(conn);
5074 hci_conn_add_sysfs(conn);
5076 /* The remote features procedure is defined for master
5077 * role only. So only in case of an initiated connection
5078 * request the remote features.
5080 * If the local controller supports slave-initiated features
5081 * exchange, then requesting the remote features in slave
5082 * role is possible. Otherwise just transition into the
5083 * connected state without requesting the remote features.
5086 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5087 struct hci_cp_le_read_remote_features cp;
5089 cp.handle = __cpu_to_le16(conn->handle);
5091 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5094 hci_conn_hold(conn);
5096 conn->state = BT_CONNECTED;
5097 hci_connect_cfm(conn, status);
5100 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5103 list_del_init(¶ms->action);
5105 hci_conn_drop(params->conn);
5106 hci_conn_put(params->conn);
5107 params->conn = NULL;
5112 hci_update_background_scan(hdev);
5113 hci_dev_unlock(hdev);
5116 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5118 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5120 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5122 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5123 NULL, ev->role, le16_to_cpu(ev->handle),
5124 le16_to_cpu(ev->interval),
5125 le16_to_cpu(ev->latency),
5126 le16_to_cpu(ev->supervision_timeout));
5129 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5130 struct sk_buff *skb)
5132 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5134 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5136 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5137 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5138 le16_to_cpu(ev->interval),
5139 le16_to_cpu(ev->latency),
5140 le16_to_cpu(ev->supervision_timeout));
5143 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5145 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5146 struct hci_conn *conn;
5148 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5151 struct adv_info *adv;
5153 adv = hci_find_adv_instance(hdev, ev->handle);
5157 /* Remove advertising as it has been terminated */
5158 hci_remove_adv_instance(hdev, ev->handle);
5159 mgmt_advertising_removed(NULL, hdev, ev->handle);
5164 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5166 struct adv_info *adv_instance;
5168 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5169 bacmp(&conn->resp_addr, BDADDR_ANY))
5172 if (!hdev->cur_adv_instance) {
5173 bacpy(&conn->resp_addr, &hdev->random_addr);
5177 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5179 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5183 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5184 struct sk_buff *skb)
5186 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5187 struct hci_conn *conn;
5189 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5196 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5198 conn->le_conn_interval = le16_to_cpu(ev->interval);
5199 conn->le_conn_latency = le16_to_cpu(ev->latency);
5200 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5203 hci_dev_unlock(hdev);
5206 /* This function requires the caller holds hdev->lock */
5207 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5209 u8 addr_type, u8 adv_type,
5210 bdaddr_t *direct_rpa)
5212 struct hci_conn *conn;
5213 struct hci_conn_params *params;
5215 /* If the event is not connectable don't proceed further */
5216 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5219 /* Ignore if the device is blocked */
5220 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5223 /* Most controller will fail if we try to create new connections
5224 * while we have an existing one in slave role.
5226 if (hdev->conn_hash.le_num_slave > 0)
5229 /* If we're not connectable only connect devices that we have in
5230 * our pend_le_conns list.
5232 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5237 if (!params->explicit_connect) {
5238 switch (params->auto_connect) {
5239 case HCI_AUTO_CONN_DIRECT:
5240 /* Only devices advertising with ADV_DIRECT_IND are
5241 * triggering a connection attempt. This is allowing
5242 * incoming connections from slave devices.
5244 if (adv_type != LE_ADV_DIRECT_IND)
5247 case HCI_AUTO_CONN_ALWAYS:
5248 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5249 * are triggering a connection attempt. This means
5250 * that incoming connectioms from slave device are
5251 * accepted and also outgoing connections to slave
5252 * devices are established when found.
5260 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5261 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5263 if (!IS_ERR(conn)) {
5264 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5265 * by higher layer that tried to connect, if no then
5266 * store the pointer since we don't really have any
5267 * other owner of the object besides the params that
5268 * triggered it. This way we can abort the connection if
5269 * the parameters get removed and keep the reference
5270 * count consistent once the connection is established.
5273 if (!params->explicit_connect)
5274 params->conn = hci_conn_get(conn);
5279 switch (PTR_ERR(conn)) {
5281 /* If hci_connect() returns -EBUSY it means there is already
5282 * an LE connection attempt going on. Since controllers don't
5283 * support more than one connection attempt at the time, we
5284 * don't consider this an error case.
5288 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5295 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5296 u8 bdaddr_type, bdaddr_t *direct_addr,
5297 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5300 struct discovery_state *d = &hdev->discovery;
5301 struct smp_irk *irk;
5302 struct hci_conn *conn;
5309 case LE_ADV_DIRECT_IND:
5310 case LE_ADV_SCAN_IND:
5311 case LE_ADV_NONCONN_IND:
5312 case LE_ADV_SCAN_RSP:
5315 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5316 "type: 0x%02x", type);
5320 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5321 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5325 /* Find the end of the data in case the report contains padded zero
5326 * bytes at the end causing an invalid length value.
5328 * When data is NULL, len is 0 so there is no need for extra ptr
5329 * check as 'ptr < data + 0' is already false in such case.
5331 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5332 if (ptr + 1 + *ptr > data + len)
5336 real_len = ptr - data;
5338 /* Adjust for actual length */
5339 if (len != real_len) {
5340 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5344 /* If the direct address is present, then this report is from
5345 * a LE Direct Advertising Report event. In that case it is
5346 * important to see if the address is matching the local
5347 * controller address.
5350 /* Only resolvable random addresses are valid for these
5351 * kind of reports and others can be ignored.
5353 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5356 /* If the controller is not using resolvable random
5357 * addresses, then this report can be ignored.
5359 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5362 /* If the local IRK of the controller does not match
5363 * with the resolvable random address provided, then
5364 * this report can be ignored.
5366 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5370 /* Check if we need to convert to identity address */
5371 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5373 bdaddr = &irk->bdaddr;
5374 bdaddr_type = irk->addr_type;
5377 /* Check if we have been requested to connect to this device.
5379 * direct_addr is set only for directed advertising reports (it is NULL
5380 * for advertising reports) and is already verified to be RPA above.
5382 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5384 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5385 /* Store report for later inclusion by
5386 * mgmt_device_connected
5388 memcpy(conn->le_adv_data, data, len);
5389 conn->le_adv_data_len = len;
5392 /* Passive scanning shouldn't trigger any device found events,
5393 * except for devices marked as CONN_REPORT for which we do send
5394 * device found events.
5396 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5397 if (type == LE_ADV_DIRECT_IND)
5400 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5401 bdaddr, bdaddr_type))
5404 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5405 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5408 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5409 rssi, flags, data, len, NULL, 0);
5413 /* When receiving non-connectable or scannable undirected
5414 * advertising reports, this means that the remote device is
5415 * not connectable and then clearly indicate this in the
5416 * device found event.
5418 * When receiving a scan response, then there is no way to
5419 * know if the remote device is connectable or not. However
5420 * since scan responses are merged with a previously seen
5421 * advertising report, the flags field from that report
5424 * In the really unlikely case that a controller get confused
5425 * and just sends a scan response event, then it is marked as
5426 * not connectable as well.
5428 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5429 type == LE_ADV_SCAN_RSP)
5430 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5434 /* If there's nothing pending either store the data from this
5435 * event or send an immediate device found event if the data
5436 * should not be stored for later.
5438 if (!ext_adv && !has_pending_adv_report(hdev)) {
5439 /* If the report will trigger a SCAN_REQ store it for
5442 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5443 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5444 rssi, flags, data, len);
5448 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5449 rssi, flags, data, len, NULL, 0);
5453 /* Check if the pending report is for the same device as the new one */
5454 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5455 bdaddr_type == d->last_adv_addr_type);
5457 /* If the pending data doesn't match this report or this isn't a
5458 * scan response (e.g. we got a duplicate ADV_IND) then force
5459 * sending of the pending data.
5461 if (type != LE_ADV_SCAN_RSP || !match) {
5462 /* Send out whatever is in the cache, but skip duplicates */
5464 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5465 d->last_adv_addr_type, NULL,
5466 d->last_adv_rssi, d->last_adv_flags,
5468 d->last_adv_data_len, NULL, 0);
5470 /* If the new report will trigger a SCAN_REQ store it for
5473 if (!ext_adv && (type == LE_ADV_IND ||
5474 type == LE_ADV_SCAN_IND)) {
5475 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5476 rssi, flags, data, len);
5480 /* The advertising reports cannot be merged, so clear
5481 * the pending report and send out a device found event.
5483 clear_pending_adv_report(hdev);
5484 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5485 rssi, flags, data, len, NULL, 0);
5489 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5490 * the new event is a SCAN_RSP. We can therefore proceed with
5491 * sending a merged device found event.
5493 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5494 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5495 d->last_adv_data, d->last_adv_data_len, data, len);
5496 clear_pending_adv_report(hdev);
5499 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5501 u8 num_reports = skb->data[0];
5502 void *ptr = &skb->data[1];
5506 while (num_reports--) {
5507 struct hci_ev_le_advertising_info *ev = ptr;
5510 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5511 bt_dev_err(hdev, "Malicious advertising data.");
5515 if (ev->length <= HCI_MAX_AD_LENGTH &&
5516 ev->data + ev->length <= skb_tail_pointer(skb)) {
5517 rssi = ev->data[ev->length];
5518 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5519 ev->bdaddr_type, NULL, 0, rssi,
5520 ev->data, ev->length, false);
5522 bt_dev_err(hdev, "Dropping invalid advertising data");
5525 ptr += sizeof(*ev) + ev->length + 1;
5528 hci_dev_unlock(hdev);
5531 static u8 ext_evt_type_to_legacy(u16 evt_type)
5533 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5535 case LE_LEGACY_ADV_IND:
5537 case LE_LEGACY_ADV_DIRECT_IND:
5538 return LE_ADV_DIRECT_IND;
5539 case LE_LEGACY_ADV_SCAN_IND:
5540 return LE_ADV_SCAN_IND;
5541 case LE_LEGACY_NONCONN_IND:
5542 return LE_ADV_NONCONN_IND;
5543 case LE_LEGACY_SCAN_RSP_ADV:
5544 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5545 return LE_ADV_SCAN_RSP;
5548 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5551 return LE_ADV_INVALID;
5554 if (evt_type & LE_EXT_ADV_CONN_IND) {
5555 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5556 return LE_ADV_DIRECT_IND;
5561 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5562 return LE_ADV_SCAN_RSP;
5564 if (evt_type & LE_EXT_ADV_SCAN_IND)
5565 return LE_ADV_SCAN_IND;
5567 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5568 evt_type & LE_EXT_ADV_DIRECT_IND)
5569 return LE_ADV_NONCONN_IND;
5571 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5574 return LE_ADV_INVALID;
5577 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5579 u8 num_reports = skb->data[0];
5580 void *ptr = &skb->data[1];
5584 while (num_reports--) {
5585 struct hci_ev_le_ext_adv_report *ev = ptr;
5589 evt_type = __le16_to_cpu(ev->evt_type);
5590 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5591 if (legacy_evt_type != LE_ADV_INVALID) {
5592 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5593 ev->bdaddr_type, NULL, 0, ev->rssi,
5594 ev->data, ev->length,
5595 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5598 ptr += sizeof(*ev) + ev->length;
5601 hci_dev_unlock(hdev);
5604 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5605 struct sk_buff *skb)
5607 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5608 struct hci_conn *conn;
5610 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5614 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5617 memcpy(conn->features[0], ev->features, 8);
5619 if (conn->state == BT_CONFIG) {
5622 /* If the local controller supports slave-initiated
5623 * features exchange, but the remote controller does
5624 * not, then it is possible that the error code 0x1a
5625 * for unsupported remote feature gets returned.
5627 * In this specific case, allow the connection to
5628 * transition into connected state and mark it as
5631 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5632 !conn->out && ev->status == 0x1a)
5635 status = ev->status;
5637 conn->state = BT_CONNECTED;
5638 hci_connect_cfm(conn, status);
5639 hci_conn_drop(conn);
5643 hci_dev_unlock(hdev);
5646 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5648 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5649 struct hci_cp_le_ltk_reply cp;
5650 struct hci_cp_le_ltk_neg_reply neg;
5651 struct hci_conn *conn;
5652 struct smp_ltk *ltk;
5654 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5658 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5662 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5666 if (smp_ltk_is_sc(ltk)) {
5667 /* With SC both EDiv and Rand are set to zero */
5668 if (ev->ediv || ev->rand)
5671 /* For non-SC keys check that EDiv and Rand match */
5672 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5676 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5677 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5678 cp.handle = cpu_to_le16(conn->handle);
5680 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5682 conn->enc_key_size = ltk->enc_size;
5684 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5686 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5687 * temporary key used to encrypt a connection following
5688 * pairing. It is used during the Encrypted Session Setup to
5689 * distribute the keys. Later, security can be re-established
5690 * using a distributed LTK.
5692 if (ltk->type == SMP_STK) {
5693 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5694 list_del_rcu(<k->list);
5695 kfree_rcu(ltk, rcu);
5697 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5700 hci_dev_unlock(hdev);
5705 neg.handle = ev->handle;
5706 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5707 hci_dev_unlock(hdev);
5710 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5713 struct hci_cp_le_conn_param_req_neg_reply cp;
5715 cp.handle = cpu_to_le16(handle);
5718 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5722 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5723 struct sk_buff *skb)
5725 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5726 struct hci_cp_le_conn_param_req_reply cp;
5727 struct hci_conn *hcon;
5728 u16 handle, min, max, latency, timeout;
5730 handle = le16_to_cpu(ev->handle);
5731 min = le16_to_cpu(ev->interval_min);
5732 max = le16_to_cpu(ev->interval_max);
5733 latency = le16_to_cpu(ev->latency);
5734 timeout = le16_to_cpu(ev->timeout);
5736 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5737 if (!hcon || hcon->state != BT_CONNECTED)
5738 return send_conn_param_neg_reply(hdev, handle,
5739 HCI_ERROR_UNKNOWN_CONN_ID);
5741 if (hci_check_conn_params(min, max, latency, timeout))
5742 return send_conn_param_neg_reply(hdev, handle,
5743 HCI_ERROR_INVALID_LL_PARAMS);
5745 if (hcon->role == HCI_ROLE_MASTER) {
5746 struct hci_conn_params *params;
5751 params = hci_conn_params_lookup(hdev, &hcon->dst,
5754 params->conn_min_interval = min;
5755 params->conn_max_interval = max;
5756 params->conn_latency = latency;
5757 params->supervision_timeout = timeout;
5763 hci_dev_unlock(hdev);
5765 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5766 store_hint, min, max, latency, timeout);
5769 cp.handle = ev->handle;
5770 cp.interval_min = ev->interval_min;
5771 cp.interval_max = ev->interval_max;
5772 cp.latency = ev->latency;
5773 cp.timeout = ev->timeout;
5777 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5780 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5781 struct sk_buff *skb)
5783 u8 num_reports = skb->data[0];
5784 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5786 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5791 for (; num_reports; num_reports--, ev++)
5792 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5793 ev->bdaddr_type, &ev->direct_addr,
5794 ev->direct_addr_type, ev->rssi, NULL, 0,
5797 hci_dev_unlock(hdev);
5800 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5802 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5804 skb_pull(skb, sizeof(*le_ev));
5806 switch (le_ev->subevent) {
5807 case HCI_EV_LE_CONN_COMPLETE:
5808 hci_le_conn_complete_evt(hdev, skb);
5811 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5812 hci_le_conn_update_complete_evt(hdev, skb);
5815 case HCI_EV_LE_ADVERTISING_REPORT:
5816 hci_le_adv_report_evt(hdev, skb);
5819 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5820 hci_le_remote_feat_complete_evt(hdev, skb);
5823 case HCI_EV_LE_LTK_REQ:
5824 hci_le_ltk_request_evt(hdev, skb);
5827 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5828 hci_le_remote_conn_param_req_evt(hdev, skb);
5831 case HCI_EV_LE_DIRECT_ADV_REPORT:
5832 hci_le_direct_adv_report_evt(hdev, skb);
5835 case HCI_EV_LE_EXT_ADV_REPORT:
5836 hci_le_ext_adv_report_evt(hdev, skb);
5839 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5840 hci_le_enh_conn_complete_evt(hdev, skb);
5843 case HCI_EV_LE_EXT_ADV_SET_TERM:
5844 hci_le_ext_adv_term_evt(hdev, skb);
5852 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5853 u8 event, struct sk_buff *skb)
5855 struct hci_ev_cmd_complete *ev;
5856 struct hci_event_hdr *hdr;
5861 if (skb->len < sizeof(*hdr)) {
5862 bt_dev_err(hdev, "too short HCI event");
5866 hdr = (void *) skb->data;
5867 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5870 if (hdr->evt != event)
5875 /* Check if request ended in Command Status - no way to retreive
5876 * any extra parameters in this case.
5878 if (hdr->evt == HCI_EV_CMD_STATUS)
5881 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5882 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5887 if (skb->len < sizeof(*ev)) {
5888 bt_dev_err(hdev, "too short cmd_complete event");
5892 ev = (void *) skb->data;
5893 skb_pull(skb, sizeof(*ev));
5895 if (opcode != __le16_to_cpu(ev->opcode)) {
5896 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5897 __le16_to_cpu(ev->opcode));
5904 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5906 struct hci_event_hdr *hdr = (void *) skb->data;
5907 hci_req_complete_t req_complete = NULL;
5908 hci_req_complete_skb_t req_complete_skb = NULL;
5909 struct sk_buff *orig_skb = NULL;
5910 u8 status = 0, event = hdr->evt, req_evt = 0;
5911 u16 opcode = HCI_OP_NOP;
5914 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5918 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5919 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5920 opcode = __le16_to_cpu(cmd_hdr->opcode);
5921 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5926 /* If it looks like we might end up having to call
5927 * req_complete_skb, store a pristine copy of the skb since the
5928 * various handlers may modify the original one through
5929 * skb_pull() calls, etc.
5931 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5932 event == HCI_EV_CMD_COMPLETE)
5933 orig_skb = skb_clone(skb, GFP_KERNEL);
5935 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5938 case HCI_EV_INQUIRY_COMPLETE:
5939 hci_inquiry_complete_evt(hdev, skb);
5942 case HCI_EV_INQUIRY_RESULT:
5943 hci_inquiry_result_evt(hdev, skb);
5946 case HCI_EV_CONN_COMPLETE:
5947 hci_conn_complete_evt(hdev, skb);
5950 case HCI_EV_CONN_REQUEST:
5951 hci_conn_request_evt(hdev, skb);
5954 case HCI_EV_DISCONN_COMPLETE:
5955 hci_disconn_complete_evt(hdev, skb);
5958 case HCI_EV_AUTH_COMPLETE:
5959 hci_auth_complete_evt(hdev, skb);
5962 case HCI_EV_REMOTE_NAME:
5963 hci_remote_name_evt(hdev, skb);
5966 case HCI_EV_ENCRYPT_CHANGE:
5967 hci_encrypt_change_evt(hdev, skb);
5970 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5971 hci_change_link_key_complete_evt(hdev, skb);
5974 case HCI_EV_REMOTE_FEATURES:
5975 hci_remote_features_evt(hdev, skb);
5978 case HCI_EV_CMD_COMPLETE:
5979 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5980 &req_complete, &req_complete_skb);
5983 case HCI_EV_CMD_STATUS:
5984 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5988 case HCI_EV_HARDWARE_ERROR:
5989 hci_hardware_error_evt(hdev, skb);
5992 case HCI_EV_ROLE_CHANGE:
5993 hci_role_change_evt(hdev, skb);
5996 case HCI_EV_NUM_COMP_PKTS:
5997 hci_num_comp_pkts_evt(hdev, skb);
6000 case HCI_EV_MODE_CHANGE:
6001 hci_mode_change_evt(hdev, skb);
6004 case HCI_EV_PIN_CODE_REQ:
6005 hci_pin_code_request_evt(hdev, skb);
6008 case HCI_EV_LINK_KEY_REQ:
6009 hci_link_key_request_evt(hdev, skb);
6012 case HCI_EV_LINK_KEY_NOTIFY:
6013 hci_link_key_notify_evt(hdev, skb);
6016 case HCI_EV_CLOCK_OFFSET:
6017 hci_clock_offset_evt(hdev, skb);
6020 case HCI_EV_PKT_TYPE_CHANGE:
6021 hci_pkt_type_change_evt(hdev, skb);
6024 case HCI_EV_PSCAN_REP_MODE:
6025 hci_pscan_rep_mode_evt(hdev, skb);
6028 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6029 hci_inquiry_result_with_rssi_evt(hdev, skb);
6032 case HCI_EV_REMOTE_EXT_FEATURES:
6033 hci_remote_ext_features_evt(hdev, skb);
6036 case HCI_EV_SYNC_CONN_COMPLETE:
6037 hci_sync_conn_complete_evt(hdev, skb);
6040 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6041 hci_extended_inquiry_result_evt(hdev, skb);
6044 case HCI_EV_KEY_REFRESH_COMPLETE:
6045 hci_key_refresh_complete_evt(hdev, skb);
6048 case HCI_EV_IO_CAPA_REQUEST:
6049 hci_io_capa_request_evt(hdev, skb);
6052 case HCI_EV_IO_CAPA_REPLY:
6053 hci_io_capa_reply_evt(hdev, skb);
6056 case HCI_EV_USER_CONFIRM_REQUEST:
6057 hci_user_confirm_request_evt(hdev, skb);
6060 case HCI_EV_USER_PASSKEY_REQUEST:
6061 hci_user_passkey_request_evt(hdev, skb);
6064 case HCI_EV_USER_PASSKEY_NOTIFY:
6065 hci_user_passkey_notify_evt(hdev, skb);
6068 case HCI_EV_KEYPRESS_NOTIFY:
6069 hci_keypress_notify_evt(hdev, skb);
6072 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6073 hci_simple_pair_complete_evt(hdev, skb);
6076 case HCI_EV_REMOTE_HOST_FEATURES:
6077 hci_remote_host_features_evt(hdev, skb);
6080 case HCI_EV_LE_META:
6081 hci_le_meta_evt(hdev, skb);
6084 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6085 hci_remote_oob_data_request_evt(hdev, skb);
6088 #if IS_ENABLED(CONFIG_BT_HS)
6089 case HCI_EV_CHANNEL_SELECTED:
6090 hci_chan_selected_evt(hdev, skb);
6093 case HCI_EV_PHY_LINK_COMPLETE:
6094 hci_phy_link_complete_evt(hdev, skb);
6097 case HCI_EV_LOGICAL_LINK_COMPLETE:
6098 hci_loglink_complete_evt(hdev, skb);
6101 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6102 hci_disconn_loglink_complete_evt(hdev, skb);
6105 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6106 hci_disconn_phylink_complete_evt(hdev, skb);
6110 case HCI_EV_NUM_COMP_BLOCKS:
6111 hci_num_comp_blocks_evt(hdev, skb);
6115 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6120 req_complete(hdev, status, opcode);
6121 } else if (req_complete_skb) {
6122 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6123 kfree_skb(orig_skb);
6126 req_complete_skb(hdev, status, opcode, orig_skb);
6130 kfree_skb(orig_skb);
6132 hdev->stat.evt_rx++;