2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
47 __u8 status = *((__u8 *) skb->data);
49 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 /* It is possible that we receive Inquiry Complete event right
52 * before we receive Inquiry Cancel Command Complete event, in
53 * which case the latter event should have status of Command
54 * Disallowed (0x0c). This should not be treated as error, since
55 * we actually achieve what Inquiry Cancel wants to achieve,
56 * which is to end the last Inquiry session.
58 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
59 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
68 clear_bit(HCI_INQUIRY, &hdev->flags);
69 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
70 wake_up_bit(&hdev->flags, HCI_INQUIRY);
73 /* Set discovery state to stopped if we're not doing LE active
76 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
77 hdev->le_scan_type != LE_SCAN_ACTIVE)
78 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 __u8 status = *((__u8 *) skb->data);
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
96 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 __u8 status = *((__u8 *) skb->data);
100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
105 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 hci_conn_check_pending(hdev);
110 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
113 BT_DBG("%s", hdev->name);
116 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 struct hci_rp_role_discovery *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 conn->role = rp->role;
132 hci_dev_unlock(hdev);
135 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 struct hci_rp_read_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 conn->link_policy = __le16_to_cpu(rp->policy);
151 hci_dev_unlock(hdev);
154 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 struct hci_rp_write_link_policy *rp = (void *) skb->data;
157 struct hci_conn *conn;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 conn->link_policy = get_unaligned_le16(sent + 2);
175 hci_dev_unlock(hdev);
178 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
181 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
188 hdev->link_policy = __le16_to_cpu(rp->policy);
191 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
194 __u8 status = *((__u8 *) skb->data);
197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 hdev->link_policy = get_unaligned_le16(sent);
209 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 __u8 status = *((__u8 *) skb->data);
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 clear_bit(HCI_RESET, &hdev->flags);
220 /* Reset all non-persistent flags */
221 hci_dev_clear_volatile_flags(hdev);
223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
226 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
229 hdev->adv_data_len = 0;
231 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
232 hdev->scan_rsp_data_len = 0;
234 hdev->le_scan_type = LE_SCAN_PASSIVE;
236 hdev->ssp_debug_mode = 0;
238 hci_bdaddr_list_clear(&hdev->le_white_list);
239 hci_bdaddr_list_clear(&hdev->le_resolv_list);
242 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
245 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
246 struct hci_cp_read_stored_link_key *sent;
248 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
254 if (!rp->status && sent->read_all == 0x01) {
255 hdev->stored_max_keys = rp->max_keys;
256 hdev->stored_num_keys = rp->num_keys;
260 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
270 if (rp->num_keys <= hdev->stored_num_keys)
271 hdev->stored_num_keys -= rp->num_keys;
273 hdev->stored_num_keys = 0;
276 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 __u8 status = *((__u8 *) skb->data);
281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
289 if (hci_dev_test_flag(hdev, HCI_MGMT))
290 mgmt_set_local_name_complete(hdev, sent, status);
292 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294 hci_dev_unlock(hdev);
297 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299 struct hci_rp_read_local_name *rp = (void *) skb->data;
301 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
306 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
307 hci_dev_test_flag(hdev, HCI_CONFIG))
308 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
311 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313 __u8 status = *((__u8 *) skb->data);
316 BT_DBG("%s status 0x%2.2x", hdev->name, status);
318 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
325 __u8 param = *((__u8 *) sent);
327 if (param == AUTH_ENABLED)
328 set_bit(HCI_AUTH, &hdev->flags);
330 clear_bit(HCI_AUTH, &hdev->flags);
333 if (hci_dev_test_flag(hdev, HCI_MGMT))
334 mgmt_auth_enable_complete(hdev, status);
336 hci_dev_unlock(hdev);
339 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341 __u8 status = *((__u8 *) skb->data);
345 BT_DBG("%s status 0x%2.2x", hdev->name, status);
350 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
354 param = *((__u8 *) sent);
357 set_bit(HCI_ENCRYPT, &hdev->flags);
359 clear_bit(HCI_ENCRYPT, &hdev->flags);
362 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364 __u8 status = *((__u8 *) skb->data);
368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
374 param = *((__u8 *) sent);
379 hdev->discov_timeout = 0;
383 if (param & SCAN_INQUIRY)
384 set_bit(HCI_ISCAN, &hdev->flags);
386 clear_bit(HCI_ISCAN, &hdev->flags);
388 if (param & SCAN_PAGE)
389 set_bit(HCI_PSCAN, &hdev->flags);
391 clear_bit(HCI_PSCAN, &hdev->flags);
394 hci_dev_unlock(hdev);
397 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
406 memcpy(hdev->dev_class, rp->dev_class, 3);
408 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
409 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
412 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414 __u8 status = *((__u8 *) skb->data);
417 BT_DBG("%s status 0x%2.2x", hdev->name, status);
419 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
426 memcpy(hdev->dev_class, sent, 3);
428 if (hci_dev_test_flag(hdev, HCI_MGMT))
429 mgmt_set_class_of_dev_complete(hdev, sent, status);
431 hci_dev_unlock(hdev);
434 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
439 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
444 setting = __le16_to_cpu(rp->voice_setting);
446 if (hdev->voice_setting == setting)
449 hdev->voice_setting = setting;
451 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
454 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
457 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
460 __u8 status = *((__u8 *) skb->data);
464 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
473 setting = get_unaligned_le16(sent);
475 if (hdev->voice_setting == setting)
478 hdev->voice_setting = setting;
480 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
483 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
486 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
489 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496 hdev->num_iac = rp->num_iac;
498 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
501 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503 __u8 status = *((__u8 *) skb->data);
504 struct hci_cp_write_ssp_mode *sent;
506 BT_DBG("%s status 0x%2.2x", hdev->name, status);
508 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
516 hdev->features[1][0] |= LMP_HOST_SSP;
518 hdev->features[1][0] &= ~LMP_HOST_SSP;
521 if (hci_dev_test_flag(hdev, HCI_MGMT))
522 mgmt_ssp_enable_complete(hdev, sent->mode, status);
525 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
527 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
530 hci_dev_unlock(hdev);
533 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535 u8 status = *((u8 *) skb->data);
536 struct hci_cp_write_sc_support *sent;
538 BT_DBG("%s status 0x%2.2x", hdev->name, status);
540 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
548 hdev->features[1][0] |= LMP_HOST_SC;
550 hdev->features[1][0] &= ~LMP_HOST_SC;
553 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
555 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
557 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
560 hci_dev_unlock(hdev);
563 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565 struct hci_rp_read_local_version *rp = (void *) skb->data;
567 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
572 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
573 hci_dev_test_flag(hdev, HCI_CONFIG)) {
574 hdev->hci_ver = rp->hci_ver;
575 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
576 hdev->lmp_ver = rp->lmp_ver;
577 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
578 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
582 static void hci_cc_read_local_commands(struct hci_dev *hdev,
585 struct hci_rp_read_local_commands *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
593 hci_dev_test_flag(hdev, HCI_CONFIG))
594 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
597 static void hci_cc_read_local_features(struct hci_dev *hdev,
600 struct hci_rp_read_local_features *rp = (void *) skb->data;
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607 memcpy(hdev->features, rp->features, 8);
609 /* Adjust default settings according to features
610 * supported by device. */
612 if (hdev->features[0][0] & LMP_3SLOT)
613 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
615 if (hdev->features[0][0] & LMP_5SLOT)
616 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
618 if (hdev->features[0][1] & LMP_HV2) {
619 hdev->pkt_type |= (HCI_HV2);
620 hdev->esco_type |= (ESCO_HV2);
623 if (hdev->features[0][1] & LMP_HV3) {
624 hdev->pkt_type |= (HCI_HV3);
625 hdev->esco_type |= (ESCO_HV3);
628 if (lmp_esco_capable(hdev))
629 hdev->esco_type |= (ESCO_EV3);
631 if (hdev->features[0][4] & LMP_EV4)
632 hdev->esco_type |= (ESCO_EV4);
634 if (hdev->features[0][4] & LMP_EV5)
635 hdev->esco_type |= (ESCO_EV5);
637 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
638 hdev->esco_type |= (ESCO_2EV3);
640 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
641 hdev->esco_type |= (ESCO_3EV3);
643 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
644 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
647 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
650 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
652 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
657 if (hdev->max_page < rp->max_page)
658 hdev->max_page = rp->max_page;
660 if (rp->page < HCI_MAX_PAGES)
661 memcpy(hdev->features[rp->page], rp->features, 8);
664 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
667 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
669 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
674 hdev->flow_ctl_mode = rp->mode;
677 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
679 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
681 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
687 hdev->sco_mtu = rp->sco_mtu;
688 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
689 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
691 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
696 hdev->acl_cnt = hdev->acl_pkts;
697 hdev->sco_cnt = hdev->sco_pkts;
699 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
700 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
703 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
705 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
707 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712 if (test_bit(HCI_INIT, &hdev->flags))
713 bacpy(&hdev->bdaddr, &rp->bdaddr);
715 if (hci_dev_test_flag(hdev, HCI_SETUP))
716 bacpy(&hdev->setup_addr, &rp->bdaddr);
719 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
722 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 if (test_bit(HCI_INIT, &hdev->flags)) {
730 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
731 hdev->page_scan_window = __le16_to_cpu(rp->window);
735 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
738 u8 status = *((u8 *) skb->data);
739 struct hci_cp_write_page_scan_activity *sent;
741 BT_DBG("%s status 0x%2.2x", hdev->name, status);
746 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
750 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
751 hdev->page_scan_window = __le16_to_cpu(sent->window);
754 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
757 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
759 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
764 if (test_bit(HCI_INIT, &hdev->flags))
765 hdev->page_scan_type = rp->type;
768 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
771 u8 status = *((u8 *) skb->data);
774 BT_DBG("%s status 0x%2.2x", hdev->name, status);
779 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
781 hdev->page_scan_type = *type;
784 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
787 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
795 hdev->block_len = __le16_to_cpu(rp->block_len);
796 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
798 hdev->block_cnt = hdev->num_blocks;
800 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
801 hdev->block_cnt, hdev->block_len);
804 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
806 struct hci_rp_read_clock *rp = (void *) skb->data;
807 struct hci_cp_read_clock *cp;
808 struct hci_conn *conn;
810 BT_DBG("%s", hdev->name);
812 if (skb->len < sizeof(*rp))
820 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
824 if (cp->which == 0x00) {
825 hdev->clock = le32_to_cpu(rp->clock);
829 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
831 conn->clock = le32_to_cpu(rp->clock);
832 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
836 hci_dev_unlock(hdev);
839 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
842 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
849 hdev->amp_status = rp->amp_status;
850 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
851 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
852 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
853 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
854 hdev->amp_type = rp->amp_type;
855 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
856 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
857 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
858 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
861 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
864 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
866 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
871 hdev->inq_tx_power = rp->tx_power;
874 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
876 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
877 struct hci_cp_pin_code_reply *cp;
878 struct hci_conn *conn;
880 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884 if (hci_dev_test_flag(hdev, HCI_MGMT))
885 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
890 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
894 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
896 conn->pin_length = cp->pin_len;
899 hci_dev_unlock(hdev);
902 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
904 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910 if (hci_dev_test_flag(hdev, HCI_MGMT))
911 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
914 hci_dev_unlock(hdev);
917 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
920 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
922 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
928 hdev->le_pkts = rp->le_max_pkt;
930 hdev->le_cnt = hdev->le_pkts;
932 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
935 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
938 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
940 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945 memcpy(hdev->le_features, rp->features, 8);
948 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
951 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958 hdev->adv_tx_power = rp->tx_power;
961 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
963 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
969 if (hci_dev_test_flag(hdev, HCI_MGMT))
970 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
973 hci_dev_unlock(hdev);
976 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
987 ACL_LINK, 0, rp->status);
989 hci_dev_unlock(hdev);
992 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
994 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1000 if (hci_dev_test_flag(hdev, HCI_MGMT))
1001 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1004 hci_dev_unlock(hdev);
1007 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1008 struct sk_buff *skb)
1010 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 if (hci_dev_test_flag(hdev, HCI_MGMT))
1017 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1018 ACL_LINK, 0, rp->status);
1020 hci_dev_unlock(hdev);
1023 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1024 struct sk_buff *skb)
1026 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1028 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1031 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1034 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1036 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1039 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1041 __u8 status = *((__u8 *) skb->data);
1044 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1049 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1055 bacpy(&hdev->random_addr, sent);
1057 hci_dev_unlock(hdev);
1060 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1062 __u8 status = *((__u8 *) skb->data);
1063 struct hci_cp_le_set_default_phy *cp;
1065 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1070 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1076 hdev->le_tx_def_phys = cp->tx_phys;
1077 hdev->le_rx_def_phys = cp->rx_phys;
1079 hci_dev_unlock(hdev);
1082 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1083 struct sk_buff *skb)
1085 __u8 status = *((__u8 *) skb->data);
1086 struct hci_cp_le_set_adv_set_rand_addr *cp;
1087 struct adv_info *adv_instance;
1092 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1098 if (!hdev->cur_adv_instance) {
1099 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1100 bacpy(&hdev->random_addr, &cp->bdaddr);
1102 adv_instance = hci_find_adv_instance(hdev,
1103 hdev->cur_adv_instance);
1105 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1108 hci_dev_unlock(hdev);
1111 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1113 __u8 *sent, status = *((__u8 *) skb->data);
1115 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1120 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1126 /* If we're doing connection initiation as peripheral. Set a
1127 * timeout in case something goes wrong.
1130 struct hci_conn *conn;
1132 hci_dev_set_flag(hdev, HCI_LE_ADV);
1134 conn = hci_lookup_le_connect(hdev);
1136 queue_delayed_work(hdev->workqueue,
1137 &conn->le_conn_timeout,
1138 conn->conn_timeout);
1140 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1143 hci_dev_unlock(hdev);
1146 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1147 struct sk_buff *skb)
1149 struct hci_cp_le_set_ext_adv_enable *cp;
1150 __u8 status = *((__u8 *) skb->data);
1152 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1164 struct hci_conn *conn;
1166 hci_dev_set_flag(hdev, HCI_LE_ADV);
1168 conn = hci_lookup_le_connect(hdev);
1170 queue_delayed_work(hdev->workqueue,
1171 &conn->le_conn_timeout,
1172 conn->conn_timeout);
1174 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1177 hci_dev_unlock(hdev);
1180 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1182 struct hci_cp_le_set_scan_param *cp;
1183 __u8 status = *((__u8 *) skb->data);
1185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1190 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1196 hdev->le_scan_type = cp->type;
1198 hci_dev_unlock(hdev);
1201 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1202 struct sk_buff *skb)
1204 struct hci_cp_le_set_ext_scan_params *cp;
1205 __u8 status = *((__u8 *) skb->data);
1206 struct hci_cp_le_scan_phy_params *phy_param;
1208 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1213 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1217 phy_param = (void *)cp->data;
1221 hdev->le_scan_type = phy_param->type;
1223 hci_dev_unlock(hdev);
1226 static bool has_pending_adv_report(struct hci_dev *hdev)
1228 struct discovery_state *d = &hdev->discovery;
1230 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1233 static void clear_pending_adv_report(struct hci_dev *hdev)
1235 struct discovery_state *d = &hdev->discovery;
1237 bacpy(&d->last_adv_addr, BDADDR_ANY);
1238 d->last_adv_data_len = 0;
1241 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1242 u8 bdaddr_type, s8 rssi, u32 flags,
1245 struct discovery_state *d = &hdev->discovery;
1247 if (len > HCI_MAX_AD_LENGTH)
1250 bacpy(&d->last_adv_addr, bdaddr);
1251 d->last_adv_addr_type = bdaddr_type;
1252 d->last_adv_rssi = rssi;
1253 d->last_adv_flags = flags;
1254 memcpy(d->last_adv_data, data, len);
1255 d->last_adv_data_len = len;
1258 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1263 case LE_SCAN_ENABLE:
1264 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1265 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1266 clear_pending_adv_report(hdev);
1269 case LE_SCAN_DISABLE:
1270 /* We do this here instead of when setting DISCOVERY_STOPPED
1271 * since the latter would potentially require waiting for
1272 * inquiry to stop too.
1274 if (has_pending_adv_report(hdev)) {
1275 struct discovery_state *d = &hdev->discovery;
1277 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1278 d->last_adv_addr_type, NULL,
1279 d->last_adv_rssi, d->last_adv_flags,
1281 d->last_adv_data_len, NULL, 0);
1284 /* Cancel this timer so that we don't try to disable scanning
1285 * when it's already disabled.
1287 cancel_delayed_work(&hdev->le_scan_disable);
1289 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1291 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1292 * interrupted scanning due to a connect request. Mark
1293 * therefore discovery as stopped. If this was not
1294 * because of a connect request advertising might have
1295 * been disabled because of active scanning, so
1296 * re-enable it again if necessary.
1298 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1299 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1300 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1301 hdev->discovery.state == DISCOVERY_FINDING)
1302 hci_req_reenable_advertising(hdev);
1307 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1312 hci_dev_unlock(hdev);
1315 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1318 struct hci_cp_le_set_scan_enable *cp;
1319 __u8 status = *((__u8 *) skb->data);
1321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1326 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1330 le_set_scan_enable_complete(hdev, cp->enable);
1333 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1334 struct sk_buff *skb)
1336 struct hci_cp_le_set_ext_scan_enable *cp;
1337 __u8 status = *((__u8 *) skb->data);
1339 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1344 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1348 le_set_scan_enable_complete(hdev, cp->enable);
1351 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1352 struct sk_buff *skb)
1354 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1356 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1362 hdev->le_num_of_adv_sets = rp->num_of_sets;
1365 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1366 struct sk_buff *skb)
1368 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1370 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1375 hdev->le_white_list_size = rp->size;
1378 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1379 struct sk_buff *skb)
1381 __u8 status = *((__u8 *) skb->data);
1383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1388 hci_bdaddr_list_clear(&hdev->le_white_list);
1391 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1392 struct sk_buff *skb)
1394 struct hci_cp_le_add_to_white_list *sent;
1395 __u8 status = *((__u8 *) skb->data);
1397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1406 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1410 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1411 struct sk_buff *skb)
1413 struct hci_cp_le_del_from_white_list *sent;
1414 __u8 status = *((__u8 *) skb->data);
1416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1421 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1425 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1429 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1430 struct sk_buff *skb)
1432 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1434 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1439 memcpy(hdev->le_states, rp->le_states, 8);
1442 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1443 struct sk_buff *skb)
1445 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1447 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1452 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1453 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1456 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1457 struct sk_buff *skb)
1459 struct hci_cp_le_write_def_data_len *sent;
1460 __u8 status = *((__u8 *) skb->data);
1462 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1467 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1471 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1472 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1475 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1476 struct sk_buff *skb)
1478 __u8 status = *((__u8 *) skb->data);
1480 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1485 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1488 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1489 struct sk_buff *skb)
1491 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1493 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1498 hdev->le_resolv_list_size = rp->size;
1501 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1502 struct sk_buff *skb)
1504 __u8 *sent, status = *((__u8 *) skb->data);
1506 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1518 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1520 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1522 hci_dev_unlock(hdev);
1525 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1526 struct sk_buff *skb)
1528 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1530 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1535 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1536 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1537 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1538 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1541 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1542 struct sk_buff *skb)
1544 struct hci_cp_write_le_host_supported *sent;
1545 __u8 status = *((__u8 *) skb->data);
1547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1552 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1559 hdev->features[1][0] |= LMP_HOST_LE;
1560 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1562 hdev->features[1][0] &= ~LMP_HOST_LE;
1563 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1564 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1568 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1570 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1572 hci_dev_unlock(hdev);
1575 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1577 struct hci_cp_le_set_adv_param *cp;
1578 u8 status = *((u8 *) skb->data);
1580 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1585 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1590 hdev->adv_addr_type = cp->own_address_type;
1591 hci_dev_unlock(hdev);
1594 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1596 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1597 struct hci_cp_le_set_ext_adv_params *cp;
1598 struct adv_info *adv_instance;
1600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1605 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1610 hdev->adv_addr_type = cp->own_addr_type;
1611 if (!hdev->cur_adv_instance) {
1612 /* Store in hdev for instance 0 */
1613 hdev->adv_tx_power = rp->tx_power;
1615 adv_instance = hci_find_adv_instance(hdev,
1616 hdev->cur_adv_instance);
1618 adv_instance->tx_power = rp->tx_power;
1620 /* Update adv data as tx power is known now */
1621 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1622 hci_dev_unlock(hdev);
1625 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1627 struct hci_rp_read_rssi *rp = (void *) skb->data;
1628 struct hci_conn *conn;
1630 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1637 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1639 conn->rssi = rp->rssi;
1641 hci_dev_unlock(hdev);
1644 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1646 struct hci_cp_read_tx_power *sent;
1647 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1648 struct hci_conn *conn;
1650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1655 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1665 switch (sent->type) {
1667 conn->tx_power = rp->tx_power;
1670 conn->max_tx_power = rp->tx_power;
1675 hci_dev_unlock(hdev);
1678 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1680 u8 status = *((u8 *) skb->data);
1683 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1688 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1690 hdev->ssp_debug_mode = *mode;
1693 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1698 hci_conn_check_pending(hdev);
1702 set_bit(HCI_INQUIRY, &hdev->flags);
1705 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1707 struct hci_cp_create_conn *cp;
1708 struct hci_conn *conn;
1710 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1712 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1718 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1720 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1723 if (conn && conn->state == BT_CONNECT) {
1724 if (status != 0x0c || conn->attempt > 2) {
1725 conn->state = BT_CLOSED;
1726 hci_connect_cfm(conn, status);
1729 conn->state = BT_CONNECT2;
1733 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1736 bt_dev_err(hdev, "no memory for new connection");
1740 hci_dev_unlock(hdev);
1743 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1745 struct hci_cp_add_sco *cp;
1746 struct hci_conn *acl, *sco;
1749 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1758 handle = __le16_to_cpu(cp->handle);
1760 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1764 acl = hci_conn_hash_lookup_handle(hdev, handle);
1768 sco->state = BT_CLOSED;
1770 hci_connect_cfm(sco, status);
1775 hci_dev_unlock(hdev);
1778 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1780 struct hci_cp_auth_requested *cp;
1781 struct hci_conn *conn;
1783 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1788 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1796 if (conn->state == BT_CONFIG) {
1797 hci_connect_cfm(conn, status);
1798 hci_conn_drop(conn);
1802 hci_dev_unlock(hdev);
1805 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1807 struct hci_cp_set_conn_encrypt *cp;
1808 struct hci_conn *conn;
1810 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1815 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1821 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1823 if (conn->state == BT_CONFIG) {
1824 hci_connect_cfm(conn, status);
1825 hci_conn_drop(conn);
1829 hci_dev_unlock(hdev);
1832 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1833 struct hci_conn *conn)
1835 if (conn->state != BT_CONFIG || !conn->out)
1838 if (conn->pending_sec_level == BT_SECURITY_SDP)
1841 /* Only request authentication for SSP connections or non-SSP
1842 * devices with sec_level MEDIUM or HIGH or if MITM protection
1845 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1846 conn->pending_sec_level != BT_SECURITY_FIPS &&
1847 conn->pending_sec_level != BT_SECURITY_HIGH &&
1848 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1854 static int hci_resolve_name(struct hci_dev *hdev,
1855 struct inquiry_entry *e)
1857 struct hci_cp_remote_name_req cp;
1859 memset(&cp, 0, sizeof(cp));
1861 bacpy(&cp.bdaddr, &e->data.bdaddr);
1862 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1863 cp.pscan_mode = e->data.pscan_mode;
1864 cp.clock_offset = e->data.clock_offset;
1866 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1869 static bool hci_resolve_next_name(struct hci_dev *hdev)
1871 struct discovery_state *discov = &hdev->discovery;
1872 struct inquiry_entry *e;
1874 if (list_empty(&discov->resolve))
1877 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1881 if (hci_resolve_name(hdev, e) == 0) {
1882 e->name_state = NAME_PENDING;
1889 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1890 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1892 struct discovery_state *discov = &hdev->discovery;
1893 struct inquiry_entry *e;
1895 /* Update the mgmt connected state if necessary. Be careful with
1896 * conn objects that exist but are not (yet) connected however.
1897 * Only those in BT_CONFIG or BT_CONNECTED states can be
1898 * considered connected.
1901 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1902 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1903 mgmt_device_connected(hdev, conn, 0, name, name_len);
1905 if (discov->state == DISCOVERY_STOPPED)
1908 if (discov->state == DISCOVERY_STOPPING)
1909 goto discov_complete;
1911 if (discov->state != DISCOVERY_RESOLVING)
1914 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1915 /* If the device was not found in a list of found devices names of which
1916 * are pending. there is no need to continue resolving a next name as it
1917 * will be done upon receiving another Remote Name Request Complete
1924 e->name_state = NAME_KNOWN;
1925 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1926 e->data.rssi, name, name_len);
1928 e->name_state = NAME_NOT_KNOWN;
1931 if (hci_resolve_next_name(hdev))
1935 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1938 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1940 struct hci_cp_remote_name_req *cp;
1941 struct hci_conn *conn;
1943 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1945 /* If successful wait for the name req complete event before
1946 * checking for the need to do authentication */
1950 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1958 if (hci_dev_test_flag(hdev, HCI_MGMT))
1959 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1964 if (!hci_outgoing_auth_needed(hdev, conn))
1967 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1968 struct hci_cp_auth_requested auth_cp;
1970 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1972 auth_cp.handle = __cpu_to_le16(conn->handle);
1973 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1974 sizeof(auth_cp), &auth_cp);
1978 hci_dev_unlock(hdev);
1981 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1983 struct hci_cp_read_remote_features *cp;
1984 struct hci_conn *conn;
1986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1991 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1997 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1999 if (conn->state == BT_CONFIG) {
2000 hci_connect_cfm(conn, status);
2001 hci_conn_drop(conn);
2005 hci_dev_unlock(hdev);
2008 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2010 struct hci_cp_read_remote_ext_features *cp;
2011 struct hci_conn *conn;
2013 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2018 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2024 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2026 if (conn->state == BT_CONFIG) {
2027 hci_connect_cfm(conn, status);
2028 hci_conn_drop(conn);
2032 hci_dev_unlock(hdev);
2035 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2037 struct hci_cp_setup_sync_conn *cp;
2038 struct hci_conn *acl, *sco;
2041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2046 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2050 handle = __le16_to_cpu(cp->handle);
2052 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2056 acl = hci_conn_hash_lookup_handle(hdev, handle);
2060 sco->state = BT_CLOSED;
2062 hci_connect_cfm(sco, status);
2067 hci_dev_unlock(hdev);
2070 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2072 struct hci_cp_sniff_mode *cp;
2073 struct hci_conn *conn;
2075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2080 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2086 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2088 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2090 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2091 hci_sco_setup(conn, status);
2094 hci_dev_unlock(hdev);
2097 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2099 struct hci_cp_exit_sniff_mode *cp;
2100 struct hci_conn *conn;
2102 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2107 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2115 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2117 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2118 hci_sco_setup(conn, status);
2121 hci_dev_unlock(hdev);
2124 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2126 struct hci_cp_disconnect *cp;
2127 struct hci_conn *conn;
2132 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2138 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2140 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2141 conn->dst_type, status);
2143 hci_dev_unlock(hdev);
2146 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2147 u8 peer_addr_type, u8 own_address_type,
2150 struct hci_conn *conn;
2152 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2157 /* Store the initiator and responder address information which
2158 * is needed for SMP. These values will not change during the
2159 * lifetime of the connection.
2161 conn->init_addr_type = own_address_type;
2162 if (own_address_type == ADDR_LE_DEV_RANDOM)
2163 bacpy(&conn->init_addr, &hdev->random_addr);
2165 bacpy(&conn->init_addr, &hdev->bdaddr);
2167 conn->resp_addr_type = peer_addr_type;
2168 bacpy(&conn->resp_addr, peer_addr);
2170 /* We don't want the connection attempt to stick around
2171 * indefinitely since LE doesn't have a page timeout concept
2172 * like BR/EDR. Set a timer for any connection that doesn't use
2173 * the white list for connecting.
2175 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2176 queue_delayed_work(conn->hdev->workqueue,
2177 &conn->le_conn_timeout,
2178 conn->conn_timeout);
2181 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2183 struct hci_cp_le_create_conn *cp;
2185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2187 /* All connection failure handling is taken care of by the
2188 * hci_le_conn_failed function which is triggered by the HCI
2189 * request completion callbacks used for connecting.
2194 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2200 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2201 cp->own_address_type, cp->filter_policy);
2203 hci_dev_unlock(hdev);
2206 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2208 struct hci_cp_le_ext_create_conn *cp;
2210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2212 /* All connection failure handling is taken care of by the
2213 * hci_le_conn_failed function which is triggered by the HCI
2214 * request completion callbacks used for connecting.
2219 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2225 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2226 cp->own_addr_type, cp->filter_policy);
2228 hci_dev_unlock(hdev);
2231 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2233 struct hci_cp_le_read_remote_features *cp;
2234 struct hci_conn *conn;
2236 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2241 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2247 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2249 if (conn->state == BT_CONFIG) {
2250 hci_connect_cfm(conn, status);
2251 hci_conn_drop(conn);
2255 hci_dev_unlock(hdev);
2258 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2260 struct hci_cp_le_start_enc *cp;
2261 struct hci_conn *conn;
2263 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2270 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2274 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2278 if (conn->state != BT_CONNECTED)
2281 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2282 hci_conn_drop(conn);
2285 hci_dev_unlock(hdev);
2288 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2290 struct hci_cp_switch_role *cp;
2291 struct hci_conn *conn;
2293 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2298 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2304 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2306 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2308 hci_dev_unlock(hdev);
2311 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2313 __u8 status = *((__u8 *) skb->data);
2314 struct discovery_state *discov = &hdev->discovery;
2315 struct inquiry_entry *e;
2317 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2319 hci_conn_check_pending(hdev);
2321 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2324 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2325 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2327 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2332 if (discov->state != DISCOVERY_FINDING)
2335 if (list_empty(&discov->resolve)) {
2336 /* When BR/EDR inquiry is active and no LE scanning is in
2337 * progress, then change discovery state to indicate completion.
2339 * When running LE scanning and BR/EDR inquiry simultaneously
2340 * and the LE scan already finished, then change the discovery
2341 * state to indicate completion.
2343 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2344 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2345 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2349 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2350 if (e && hci_resolve_name(hdev, e) == 0) {
2351 e->name_state = NAME_PENDING;
2352 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2354 /* When BR/EDR inquiry is active and no LE scanning is in
2355 * progress, then change discovery state to indicate completion.
2357 * When running LE scanning and BR/EDR inquiry simultaneously
2358 * and the LE scan already finished, then change the discovery
2359 * state to indicate completion.
2361 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2362 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2363 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2367 hci_dev_unlock(hdev);
2370 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2372 struct inquiry_data data;
2373 struct inquiry_info *info = (void *) (skb->data + 1);
2374 int num_rsp = *((__u8 *) skb->data);
2376 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2378 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2381 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2386 for (; num_rsp; num_rsp--, info++) {
2389 bacpy(&data.bdaddr, &info->bdaddr);
2390 data.pscan_rep_mode = info->pscan_rep_mode;
2391 data.pscan_period_mode = info->pscan_period_mode;
2392 data.pscan_mode = info->pscan_mode;
2393 memcpy(data.dev_class, info->dev_class, 3);
2394 data.clock_offset = info->clock_offset;
2395 data.rssi = HCI_RSSI_INVALID;
2396 data.ssp_mode = 0x00;
2398 flags = hci_inquiry_cache_update(hdev, &data, false);
2400 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2401 info->dev_class, HCI_RSSI_INVALID,
2402 flags, NULL, 0, NULL, 0);
2405 hci_dev_unlock(hdev);
2408 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2410 struct hci_ev_conn_complete *ev = (void *) skb->data;
2411 struct hci_conn *conn;
2413 BT_DBG("%s", hdev->name);
2417 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2419 if (ev->link_type != SCO_LINK)
2422 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2426 conn->type = SCO_LINK;
2430 conn->handle = __le16_to_cpu(ev->handle);
2432 if (conn->type == ACL_LINK) {
2433 conn->state = BT_CONFIG;
2434 hci_conn_hold(conn);
2436 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2437 !hci_find_link_key(hdev, &ev->bdaddr))
2438 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2440 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2442 conn->state = BT_CONNECTED;
2444 hci_debugfs_create_conn(conn);
2445 hci_conn_add_sysfs(conn);
2447 if (test_bit(HCI_AUTH, &hdev->flags))
2448 set_bit(HCI_CONN_AUTH, &conn->flags);
2450 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2451 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2453 /* Get remote features */
2454 if (conn->type == ACL_LINK) {
2455 struct hci_cp_read_remote_features cp;
2456 cp.handle = ev->handle;
2457 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2460 hci_req_update_scan(hdev);
2463 /* Set packet type for incoming connection */
2464 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2465 struct hci_cp_change_conn_ptype cp;
2466 cp.handle = ev->handle;
2467 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2468 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2472 conn->state = BT_CLOSED;
2473 if (conn->type == ACL_LINK)
2474 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2475 conn->dst_type, ev->status);
2478 if (conn->type == ACL_LINK)
2479 hci_sco_setup(conn, ev->status);
2482 hci_connect_cfm(conn, ev->status);
2484 } else if (ev->link_type != ACL_LINK)
2485 hci_connect_cfm(conn, ev->status);
2488 hci_dev_unlock(hdev);
2490 hci_conn_check_pending(hdev);
2493 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2495 struct hci_cp_reject_conn_req cp;
2497 bacpy(&cp.bdaddr, bdaddr);
2498 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2499 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2502 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2504 struct hci_ev_conn_request *ev = (void *) skb->data;
2505 int mask = hdev->link_mode;
2506 struct inquiry_entry *ie;
2507 struct hci_conn *conn;
2510 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2513 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2516 if (!(mask & HCI_LM_ACCEPT)) {
2517 hci_reject_conn(hdev, &ev->bdaddr);
2521 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2523 hci_reject_conn(hdev, &ev->bdaddr);
2527 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2528 * connection. These features are only touched through mgmt so
2529 * only do the checks if HCI_MGMT is set.
2531 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2532 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2533 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2535 hci_reject_conn(hdev, &ev->bdaddr);
2539 /* Connection accepted */
2543 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2545 memcpy(ie->data.dev_class, ev->dev_class, 3);
2547 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2550 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2553 bt_dev_err(hdev, "no memory for new connection");
2554 hci_dev_unlock(hdev);
2559 memcpy(conn->dev_class, ev->dev_class, 3);
2561 hci_dev_unlock(hdev);
2563 if (ev->link_type == ACL_LINK ||
2564 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2565 struct hci_cp_accept_conn_req cp;
2566 conn->state = BT_CONNECT;
2568 bacpy(&cp.bdaddr, &ev->bdaddr);
2570 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2571 cp.role = 0x00; /* Become master */
2573 cp.role = 0x01; /* Remain slave */
2575 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2576 } else if (!(flags & HCI_PROTO_DEFER)) {
2577 struct hci_cp_accept_sync_conn_req cp;
2578 conn->state = BT_CONNECT;
2580 bacpy(&cp.bdaddr, &ev->bdaddr);
2581 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2583 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2584 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2585 cp.max_latency = cpu_to_le16(0xffff);
2586 cp.content_format = cpu_to_le16(hdev->voice_setting);
2587 cp.retrans_effort = 0xff;
2589 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2592 conn->state = BT_CONNECT2;
2593 hci_connect_cfm(conn, 0);
2597 static u8 hci_to_mgmt_reason(u8 err)
2600 case HCI_ERROR_CONNECTION_TIMEOUT:
2601 return MGMT_DEV_DISCONN_TIMEOUT;
2602 case HCI_ERROR_REMOTE_USER_TERM:
2603 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2604 case HCI_ERROR_REMOTE_POWER_OFF:
2605 return MGMT_DEV_DISCONN_REMOTE;
2606 case HCI_ERROR_LOCAL_HOST_TERM:
2607 return MGMT_DEV_DISCONN_LOCAL_HOST;
2609 return MGMT_DEV_DISCONN_UNKNOWN;
2613 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2615 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2617 struct hci_conn_params *params;
2618 struct hci_conn *conn;
2619 bool mgmt_connected;
2622 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2626 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2631 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2632 conn->dst_type, ev->status);
2636 conn->state = BT_CLOSED;
2638 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2640 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2641 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2643 reason = hci_to_mgmt_reason(ev->reason);
2645 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2646 reason, mgmt_connected);
2648 if (conn->type == ACL_LINK) {
2649 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2650 hci_remove_link_key(hdev, &conn->dst);
2652 hci_req_update_scan(hdev);
2655 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2657 switch (params->auto_connect) {
2658 case HCI_AUTO_CONN_LINK_LOSS:
2659 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2663 case HCI_AUTO_CONN_DIRECT:
2664 case HCI_AUTO_CONN_ALWAYS:
2665 list_del_init(¶ms->action);
2666 list_add(¶ms->action, &hdev->pend_le_conns);
2667 hci_update_background_scan(hdev);
2677 hci_disconn_cfm(conn, ev->reason);
2680 /* Re-enable advertising if necessary, since it might
2681 * have been disabled by the connection. From the
2682 * HCI_LE_Set_Advertise_Enable command description in
2683 * the core specification (v4.0):
2684 * "The Controller shall continue advertising until the Host
2685 * issues an LE_Set_Advertise_Enable command with
2686 * Advertising_Enable set to 0x00 (Advertising is disabled)
2687 * or until a connection is created or until the Advertising
2688 * is timed out due to Directed Advertising."
2690 if (type == LE_LINK)
2691 hci_req_reenable_advertising(hdev);
2694 hci_dev_unlock(hdev);
2697 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2699 struct hci_ev_auth_complete *ev = (void *) skb->data;
2700 struct hci_conn *conn;
2702 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2711 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2713 if (!hci_conn_ssp_enabled(conn) &&
2714 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2715 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2717 set_bit(HCI_CONN_AUTH, &conn->flags);
2718 conn->sec_level = conn->pending_sec_level;
2721 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2722 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2724 mgmt_auth_failed(conn, ev->status);
2727 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2728 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2730 if (conn->state == BT_CONFIG) {
2731 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2732 struct hci_cp_set_conn_encrypt cp;
2733 cp.handle = ev->handle;
2735 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2738 conn->state = BT_CONNECTED;
2739 hci_connect_cfm(conn, ev->status);
2740 hci_conn_drop(conn);
2743 hci_auth_cfm(conn, ev->status);
2745 hci_conn_hold(conn);
2746 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2747 hci_conn_drop(conn);
2750 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2752 struct hci_cp_set_conn_encrypt cp;
2753 cp.handle = ev->handle;
2755 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2758 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2759 hci_encrypt_cfm(conn, ev->status);
2764 hci_dev_unlock(hdev);
2767 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769 struct hci_ev_remote_name *ev = (void *) skb->data;
2770 struct hci_conn *conn;
2772 BT_DBG("%s", hdev->name);
2774 hci_conn_check_pending(hdev);
2778 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2780 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2783 if (ev->status == 0)
2784 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2785 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2787 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2793 if (!hci_outgoing_auth_needed(hdev, conn))
2796 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2797 struct hci_cp_auth_requested cp;
2799 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2801 cp.handle = __cpu_to_le16(conn->handle);
2802 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2806 hci_dev_unlock(hdev);
2809 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2810 u16 opcode, struct sk_buff *skb)
2812 const struct hci_rp_read_enc_key_size *rp;
2813 struct hci_conn *conn;
2816 BT_DBG("%s status 0x%02x", hdev->name, status);
2818 if (!skb || skb->len < sizeof(*rp)) {
2819 bt_dev_err(hdev, "invalid read key size response");
2823 rp = (void *)skb->data;
2824 handle = le16_to_cpu(rp->handle);
2828 conn = hci_conn_hash_lookup_handle(hdev, handle);
2832 /* If we fail to read the encryption key size, assume maximum
2833 * (which is the same we do also when this HCI command isn't
2837 bt_dev_err(hdev, "failed to read key size for handle %u",
2839 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2841 conn->enc_key_size = rp->key_size;
2844 hci_encrypt_cfm(conn, 0);
2847 hci_dev_unlock(hdev);
2850 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2852 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2853 struct hci_conn *conn;
2855 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2859 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2865 /* Encryption implies authentication */
2866 set_bit(HCI_CONN_AUTH, &conn->flags);
2867 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2868 conn->sec_level = conn->pending_sec_level;
2870 /* P-256 authentication key implies FIPS */
2871 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2872 set_bit(HCI_CONN_FIPS, &conn->flags);
2874 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2875 conn->type == LE_LINK)
2876 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2878 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2879 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2883 /* We should disregard the current RPA and generate a new one
2884 * whenever the encryption procedure fails.
2886 if (ev->status && conn->type == LE_LINK) {
2887 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2888 hci_adv_instances_set_rpa_expired(hdev, true);
2891 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2893 /* Check link security requirements are met */
2894 if (!hci_conn_check_link_mode(conn))
2895 ev->status = HCI_ERROR_AUTH_FAILURE;
2897 if (ev->status && conn->state == BT_CONNECTED) {
2898 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2899 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2901 /* Notify upper layers so they can cleanup before
2904 hci_encrypt_cfm(conn, ev->status);
2905 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2906 hci_conn_drop(conn);
2910 /* Try reading the encryption key size for encrypted ACL links */
2911 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2912 struct hci_cp_read_enc_key_size cp;
2913 struct hci_request req;
2915 /* Only send HCI_Read_Encryption_Key_Size if the
2916 * controller really supports it. If it doesn't, assume
2917 * the default size (16).
2919 if (!(hdev->commands[20] & 0x10)) {
2920 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2924 hci_req_init(&req, hdev);
2926 cp.handle = cpu_to_le16(conn->handle);
2927 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2929 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2930 bt_dev_err(hdev, "sending read key size failed");
2931 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2939 hci_encrypt_cfm(conn, ev->status);
2942 hci_dev_unlock(hdev);
2945 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2946 struct sk_buff *skb)
2948 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2949 struct hci_conn *conn;
2951 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2955 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2958 set_bit(HCI_CONN_SECURE, &conn->flags);
2960 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2962 hci_key_change_cfm(conn, ev->status);
2965 hci_dev_unlock(hdev);
2968 static void hci_remote_features_evt(struct hci_dev *hdev,
2969 struct sk_buff *skb)
2971 struct hci_ev_remote_features *ev = (void *) skb->data;
2972 struct hci_conn *conn;
2974 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2978 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2983 memcpy(conn->features[0], ev->features, 8);
2985 if (conn->state != BT_CONFIG)
2988 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2989 lmp_ext_feat_capable(conn)) {
2990 struct hci_cp_read_remote_ext_features cp;
2991 cp.handle = ev->handle;
2993 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2998 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2999 struct hci_cp_remote_name_req cp;
3000 memset(&cp, 0, sizeof(cp));
3001 bacpy(&cp.bdaddr, &conn->dst);
3002 cp.pscan_rep_mode = 0x02;
3003 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3004 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3005 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3007 if (!hci_outgoing_auth_needed(hdev, conn)) {
3008 conn->state = BT_CONNECTED;
3009 hci_connect_cfm(conn, ev->status);
3010 hci_conn_drop(conn);
3014 hci_dev_unlock(hdev);
3017 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3018 u16 *opcode, u8 *status,
3019 hci_req_complete_t *req_complete,
3020 hci_req_complete_skb_t *req_complete_skb)
3022 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3024 *opcode = __le16_to_cpu(ev->opcode);
3025 *status = skb->data[sizeof(*ev)];
3027 skb_pull(skb, sizeof(*ev));
3030 case HCI_OP_INQUIRY_CANCEL:
3031 hci_cc_inquiry_cancel(hdev, skb, status);
3034 case HCI_OP_PERIODIC_INQ:
3035 hci_cc_periodic_inq(hdev, skb);
3038 case HCI_OP_EXIT_PERIODIC_INQ:
3039 hci_cc_exit_periodic_inq(hdev, skb);
3042 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3043 hci_cc_remote_name_req_cancel(hdev, skb);
3046 case HCI_OP_ROLE_DISCOVERY:
3047 hci_cc_role_discovery(hdev, skb);
3050 case HCI_OP_READ_LINK_POLICY:
3051 hci_cc_read_link_policy(hdev, skb);
3054 case HCI_OP_WRITE_LINK_POLICY:
3055 hci_cc_write_link_policy(hdev, skb);
3058 case HCI_OP_READ_DEF_LINK_POLICY:
3059 hci_cc_read_def_link_policy(hdev, skb);
3062 case HCI_OP_WRITE_DEF_LINK_POLICY:
3063 hci_cc_write_def_link_policy(hdev, skb);
3067 hci_cc_reset(hdev, skb);
3070 case HCI_OP_READ_STORED_LINK_KEY:
3071 hci_cc_read_stored_link_key(hdev, skb);
3074 case HCI_OP_DELETE_STORED_LINK_KEY:
3075 hci_cc_delete_stored_link_key(hdev, skb);
3078 case HCI_OP_WRITE_LOCAL_NAME:
3079 hci_cc_write_local_name(hdev, skb);
3082 case HCI_OP_READ_LOCAL_NAME:
3083 hci_cc_read_local_name(hdev, skb);
3086 case HCI_OP_WRITE_AUTH_ENABLE:
3087 hci_cc_write_auth_enable(hdev, skb);
3090 case HCI_OP_WRITE_ENCRYPT_MODE:
3091 hci_cc_write_encrypt_mode(hdev, skb);
3094 case HCI_OP_WRITE_SCAN_ENABLE:
3095 hci_cc_write_scan_enable(hdev, skb);
3098 case HCI_OP_READ_CLASS_OF_DEV:
3099 hci_cc_read_class_of_dev(hdev, skb);
3102 case HCI_OP_WRITE_CLASS_OF_DEV:
3103 hci_cc_write_class_of_dev(hdev, skb);
3106 case HCI_OP_READ_VOICE_SETTING:
3107 hci_cc_read_voice_setting(hdev, skb);
3110 case HCI_OP_WRITE_VOICE_SETTING:
3111 hci_cc_write_voice_setting(hdev, skb);
3114 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3115 hci_cc_read_num_supported_iac(hdev, skb);
3118 case HCI_OP_WRITE_SSP_MODE:
3119 hci_cc_write_ssp_mode(hdev, skb);
3122 case HCI_OP_WRITE_SC_SUPPORT:
3123 hci_cc_write_sc_support(hdev, skb);
3126 case HCI_OP_READ_LOCAL_VERSION:
3127 hci_cc_read_local_version(hdev, skb);
3130 case HCI_OP_READ_LOCAL_COMMANDS:
3131 hci_cc_read_local_commands(hdev, skb);
3134 case HCI_OP_READ_LOCAL_FEATURES:
3135 hci_cc_read_local_features(hdev, skb);
3138 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3139 hci_cc_read_local_ext_features(hdev, skb);
3142 case HCI_OP_READ_BUFFER_SIZE:
3143 hci_cc_read_buffer_size(hdev, skb);
3146 case HCI_OP_READ_BD_ADDR:
3147 hci_cc_read_bd_addr(hdev, skb);
3150 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3151 hci_cc_read_page_scan_activity(hdev, skb);
3154 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3155 hci_cc_write_page_scan_activity(hdev, skb);
3158 case HCI_OP_READ_PAGE_SCAN_TYPE:
3159 hci_cc_read_page_scan_type(hdev, skb);
3162 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3163 hci_cc_write_page_scan_type(hdev, skb);
3166 case HCI_OP_READ_DATA_BLOCK_SIZE:
3167 hci_cc_read_data_block_size(hdev, skb);
3170 case HCI_OP_READ_FLOW_CONTROL_MODE:
3171 hci_cc_read_flow_control_mode(hdev, skb);
3174 case HCI_OP_READ_LOCAL_AMP_INFO:
3175 hci_cc_read_local_amp_info(hdev, skb);
3178 case HCI_OP_READ_CLOCK:
3179 hci_cc_read_clock(hdev, skb);
3182 case HCI_OP_READ_INQ_RSP_TX_POWER:
3183 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3186 case HCI_OP_PIN_CODE_REPLY:
3187 hci_cc_pin_code_reply(hdev, skb);
3190 case HCI_OP_PIN_CODE_NEG_REPLY:
3191 hci_cc_pin_code_neg_reply(hdev, skb);
3194 case HCI_OP_READ_LOCAL_OOB_DATA:
3195 hci_cc_read_local_oob_data(hdev, skb);
3198 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3199 hci_cc_read_local_oob_ext_data(hdev, skb);
3202 case HCI_OP_LE_READ_BUFFER_SIZE:
3203 hci_cc_le_read_buffer_size(hdev, skb);
3206 case HCI_OP_LE_READ_LOCAL_FEATURES:
3207 hci_cc_le_read_local_features(hdev, skb);
3210 case HCI_OP_LE_READ_ADV_TX_POWER:
3211 hci_cc_le_read_adv_tx_power(hdev, skb);
3214 case HCI_OP_USER_CONFIRM_REPLY:
3215 hci_cc_user_confirm_reply(hdev, skb);
3218 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3219 hci_cc_user_confirm_neg_reply(hdev, skb);
3222 case HCI_OP_USER_PASSKEY_REPLY:
3223 hci_cc_user_passkey_reply(hdev, skb);
3226 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3227 hci_cc_user_passkey_neg_reply(hdev, skb);
3230 case HCI_OP_LE_SET_RANDOM_ADDR:
3231 hci_cc_le_set_random_addr(hdev, skb);
3234 case HCI_OP_LE_SET_ADV_ENABLE:
3235 hci_cc_le_set_adv_enable(hdev, skb);
3238 case HCI_OP_LE_SET_SCAN_PARAM:
3239 hci_cc_le_set_scan_param(hdev, skb);
3242 case HCI_OP_LE_SET_SCAN_ENABLE:
3243 hci_cc_le_set_scan_enable(hdev, skb);
3246 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3247 hci_cc_le_read_white_list_size(hdev, skb);
3250 case HCI_OP_LE_CLEAR_WHITE_LIST:
3251 hci_cc_le_clear_white_list(hdev, skb);
3254 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3255 hci_cc_le_add_to_white_list(hdev, skb);
3258 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3259 hci_cc_le_del_from_white_list(hdev, skb);
3262 case HCI_OP_LE_READ_SUPPORTED_STATES:
3263 hci_cc_le_read_supported_states(hdev, skb);
3266 case HCI_OP_LE_READ_DEF_DATA_LEN:
3267 hci_cc_le_read_def_data_len(hdev, skb);
3270 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3271 hci_cc_le_write_def_data_len(hdev, skb);
3274 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3275 hci_cc_le_clear_resolv_list(hdev, skb);
3278 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3279 hci_cc_le_read_resolv_list_size(hdev, skb);
3282 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3283 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3286 case HCI_OP_LE_READ_MAX_DATA_LEN:
3287 hci_cc_le_read_max_data_len(hdev, skb);
3290 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3291 hci_cc_write_le_host_supported(hdev, skb);
3294 case HCI_OP_LE_SET_ADV_PARAM:
3295 hci_cc_set_adv_param(hdev, skb);
3298 case HCI_OP_READ_RSSI:
3299 hci_cc_read_rssi(hdev, skb);
3302 case HCI_OP_READ_TX_POWER:
3303 hci_cc_read_tx_power(hdev, skb);
3306 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3307 hci_cc_write_ssp_debug_mode(hdev, skb);
3310 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3311 hci_cc_le_set_ext_scan_param(hdev, skb);
3314 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3315 hci_cc_le_set_ext_scan_enable(hdev, skb);
3318 case HCI_OP_LE_SET_DEFAULT_PHY:
3319 hci_cc_le_set_default_phy(hdev, skb);
3322 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3323 hci_cc_le_read_num_adv_sets(hdev, skb);
3326 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3327 hci_cc_set_ext_adv_param(hdev, skb);
3330 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3331 hci_cc_le_set_ext_adv_enable(hdev, skb);
3334 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3335 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3339 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3343 if (*opcode != HCI_OP_NOP)
3344 cancel_delayed_work(&hdev->cmd_timer);
3346 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3347 atomic_set(&hdev->cmd_cnt, 1);
3349 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3352 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3354 "unexpected event for opcode 0x%4.4x", *opcode);
3358 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3359 queue_work(hdev->workqueue, &hdev->cmd_work);
3362 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3363 u16 *opcode, u8 *status,
3364 hci_req_complete_t *req_complete,
3365 hci_req_complete_skb_t *req_complete_skb)
3367 struct hci_ev_cmd_status *ev = (void *) skb->data;
3369 skb_pull(skb, sizeof(*ev));
3371 *opcode = __le16_to_cpu(ev->opcode);
3372 *status = ev->status;
3375 case HCI_OP_INQUIRY:
3376 hci_cs_inquiry(hdev, ev->status);
3379 case HCI_OP_CREATE_CONN:
3380 hci_cs_create_conn(hdev, ev->status);
3383 case HCI_OP_DISCONNECT:
3384 hci_cs_disconnect(hdev, ev->status);
3387 case HCI_OP_ADD_SCO:
3388 hci_cs_add_sco(hdev, ev->status);
3391 case HCI_OP_AUTH_REQUESTED:
3392 hci_cs_auth_requested(hdev, ev->status);
3395 case HCI_OP_SET_CONN_ENCRYPT:
3396 hci_cs_set_conn_encrypt(hdev, ev->status);
3399 case HCI_OP_REMOTE_NAME_REQ:
3400 hci_cs_remote_name_req(hdev, ev->status);
3403 case HCI_OP_READ_REMOTE_FEATURES:
3404 hci_cs_read_remote_features(hdev, ev->status);
3407 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3408 hci_cs_read_remote_ext_features(hdev, ev->status);
3411 case HCI_OP_SETUP_SYNC_CONN:
3412 hci_cs_setup_sync_conn(hdev, ev->status);
3415 case HCI_OP_SNIFF_MODE:
3416 hci_cs_sniff_mode(hdev, ev->status);
3419 case HCI_OP_EXIT_SNIFF_MODE:
3420 hci_cs_exit_sniff_mode(hdev, ev->status);
3423 case HCI_OP_SWITCH_ROLE:
3424 hci_cs_switch_role(hdev, ev->status);
3427 case HCI_OP_LE_CREATE_CONN:
3428 hci_cs_le_create_conn(hdev, ev->status);
3431 case HCI_OP_LE_READ_REMOTE_FEATURES:
3432 hci_cs_le_read_remote_features(hdev, ev->status);
3435 case HCI_OP_LE_START_ENC:
3436 hci_cs_le_start_enc(hdev, ev->status);
3439 case HCI_OP_LE_EXT_CREATE_CONN:
3440 hci_cs_le_ext_create_conn(hdev, ev->status);
3444 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3448 if (*opcode != HCI_OP_NOP)
3449 cancel_delayed_work(&hdev->cmd_timer);
3451 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3452 atomic_set(&hdev->cmd_cnt, 1);
3454 /* Indicate request completion if the command failed. Also, if
3455 * we're not waiting for a special event and we get a success
3456 * command status we should try to flag the request as completed
3457 * (since for this kind of commands there will not be a command
3461 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3462 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3465 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3467 "unexpected event for opcode 0x%4.4x", *opcode);
3471 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3472 queue_work(hdev->workqueue, &hdev->cmd_work);
3475 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3477 struct hci_ev_hardware_error *ev = (void *) skb->data;
3479 hdev->hw_error_code = ev->code;
3481 queue_work(hdev->req_workqueue, &hdev->error_reset);
3484 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3486 struct hci_ev_role_change *ev = (void *) skb->data;
3487 struct hci_conn *conn;
3489 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3493 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3496 conn->role = ev->role;
3498 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3500 hci_role_switch_cfm(conn, ev->status, ev->role);
3503 hci_dev_unlock(hdev);
3506 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3508 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3511 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3512 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3516 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3517 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3518 BT_DBG("%s bad parameters", hdev->name);
3522 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3524 for (i = 0; i < ev->num_hndl; i++) {
3525 struct hci_comp_pkts_info *info = &ev->handles[i];
3526 struct hci_conn *conn;
3527 __u16 handle, count;
3529 handle = __le16_to_cpu(info->handle);
3530 count = __le16_to_cpu(info->count);
3532 conn = hci_conn_hash_lookup_handle(hdev, handle);
3536 conn->sent -= count;
3538 switch (conn->type) {
3540 hdev->acl_cnt += count;
3541 if (hdev->acl_cnt > hdev->acl_pkts)
3542 hdev->acl_cnt = hdev->acl_pkts;
3546 if (hdev->le_pkts) {
3547 hdev->le_cnt += count;
3548 if (hdev->le_cnt > hdev->le_pkts)
3549 hdev->le_cnt = hdev->le_pkts;
3551 hdev->acl_cnt += count;
3552 if (hdev->acl_cnt > hdev->acl_pkts)
3553 hdev->acl_cnt = hdev->acl_pkts;
3558 hdev->sco_cnt += count;
3559 if (hdev->sco_cnt > hdev->sco_pkts)
3560 hdev->sco_cnt = hdev->sco_pkts;
3564 bt_dev_err(hdev, "unknown type %d conn %p",
3570 queue_work(hdev->workqueue, &hdev->tx_work);
3573 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3576 struct hci_chan *chan;
3578 switch (hdev->dev_type) {
3580 return hci_conn_hash_lookup_handle(hdev, handle);
3582 chan = hci_chan_lookup_handle(hdev, handle);
3587 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3594 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3596 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3599 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3600 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3604 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3605 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3606 BT_DBG("%s bad parameters", hdev->name);
3610 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3613 for (i = 0; i < ev->num_hndl; i++) {
3614 struct hci_comp_blocks_info *info = &ev->handles[i];
3615 struct hci_conn *conn = NULL;
3616 __u16 handle, block_count;
3618 handle = __le16_to_cpu(info->handle);
3619 block_count = __le16_to_cpu(info->blocks);
3621 conn = __hci_conn_lookup_handle(hdev, handle);
3625 conn->sent -= block_count;
3627 switch (conn->type) {
3630 hdev->block_cnt += block_count;
3631 if (hdev->block_cnt > hdev->num_blocks)
3632 hdev->block_cnt = hdev->num_blocks;
3636 bt_dev_err(hdev, "unknown type %d conn %p",
3642 queue_work(hdev->workqueue, &hdev->tx_work);
3645 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3647 struct hci_ev_mode_change *ev = (void *) skb->data;
3648 struct hci_conn *conn;
3650 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3654 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3656 conn->mode = ev->mode;
3658 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3660 if (conn->mode == HCI_CM_ACTIVE)
3661 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3663 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3666 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3667 hci_sco_setup(conn, ev->status);
3670 hci_dev_unlock(hdev);
3673 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3675 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3676 struct hci_conn *conn;
3678 BT_DBG("%s", hdev->name);
3682 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3686 if (conn->state == BT_CONNECTED) {
3687 hci_conn_hold(conn);
3688 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3689 hci_conn_drop(conn);
3692 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3693 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3694 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3695 sizeof(ev->bdaddr), &ev->bdaddr);
3696 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3699 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3704 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3708 hci_dev_unlock(hdev);
3711 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3713 if (key_type == HCI_LK_CHANGED_COMBINATION)
3716 conn->pin_length = pin_len;
3717 conn->key_type = key_type;
3720 case HCI_LK_LOCAL_UNIT:
3721 case HCI_LK_REMOTE_UNIT:
3722 case HCI_LK_DEBUG_COMBINATION:
3724 case HCI_LK_COMBINATION:
3726 conn->pending_sec_level = BT_SECURITY_HIGH;
3728 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3730 case HCI_LK_UNAUTH_COMBINATION_P192:
3731 case HCI_LK_UNAUTH_COMBINATION_P256:
3732 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3734 case HCI_LK_AUTH_COMBINATION_P192:
3735 conn->pending_sec_level = BT_SECURITY_HIGH;
3737 case HCI_LK_AUTH_COMBINATION_P256:
3738 conn->pending_sec_level = BT_SECURITY_FIPS;
3743 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3745 struct hci_ev_link_key_req *ev = (void *) skb->data;
3746 struct hci_cp_link_key_reply cp;
3747 struct hci_conn *conn;
3748 struct link_key *key;
3750 BT_DBG("%s", hdev->name);
3752 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3757 key = hci_find_link_key(hdev, &ev->bdaddr);
3759 BT_DBG("%s link key not found for %pMR", hdev->name,
3764 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3767 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3769 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3771 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3772 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3773 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3774 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3778 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3779 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3780 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3781 BT_DBG("%s ignoring key unauthenticated for high security",
3786 conn_set_key(conn, key->type, key->pin_len);
3789 bacpy(&cp.bdaddr, &ev->bdaddr);
3790 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3792 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3794 hci_dev_unlock(hdev);
3799 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3800 hci_dev_unlock(hdev);
3803 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3805 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3806 struct hci_conn *conn;
3807 struct link_key *key;
3811 BT_DBG("%s", hdev->name);
3815 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3819 hci_conn_hold(conn);
3820 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3821 hci_conn_drop(conn);
3823 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3824 conn_set_key(conn, ev->key_type, conn->pin_length);
3826 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3829 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3830 ev->key_type, pin_len, &persistent);
3834 /* Update connection information since adding the key will have
3835 * fixed up the type in the case of changed combination keys.
3837 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3838 conn_set_key(conn, key->type, key->pin_len);
3840 mgmt_new_link_key(hdev, key, persistent);
3842 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3843 * is set. If it's not set simply remove the key from the kernel
3844 * list (we've still notified user space about it but with
3845 * store_hint being 0).
3847 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3848 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3849 list_del_rcu(&key->list);
3850 kfree_rcu(key, rcu);
3855 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3857 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3860 hci_dev_unlock(hdev);
3863 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3865 struct hci_ev_clock_offset *ev = (void *) skb->data;
3866 struct hci_conn *conn;
3868 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3872 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3873 if (conn && !ev->status) {
3874 struct inquiry_entry *ie;
3876 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3878 ie->data.clock_offset = ev->clock_offset;
3879 ie->timestamp = jiffies;
3883 hci_dev_unlock(hdev);
3886 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3888 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3889 struct hci_conn *conn;
3891 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3895 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3896 if (conn && !ev->status)
3897 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3899 hci_dev_unlock(hdev);
3902 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3904 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3905 struct inquiry_entry *ie;
3907 BT_DBG("%s", hdev->name);
3911 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3913 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3914 ie->timestamp = jiffies;
3917 hci_dev_unlock(hdev);
3920 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3921 struct sk_buff *skb)
3923 struct inquiry_data data;
3924 int num_rsp = *((__u8 *) skb->data);
3926 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3931 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3936 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3937 struct inquiry_info_with_rssi_and_pscan_mode *info;
3938 info = (void *) (skb->data + 1);
3940 if (skb->len < num_rsp * sizeof(*info) + 1)
3943 for (; num_rsp; num_rsp--, info++) {
3946 bacpy(&data.bdaddr, &info->bdaddr);
3947 data.pscan_rep_mode = info->pscan_rep_mode;
3948 data.pscan_period_mode = info->pscan_period_mode;
3949 data.pscan_mode = info->pscan_mode;
3950 memcpy(data.dev_class, info->dev_class, 3);
3951 data.clock_offset = info->clock_offset;
3952 data.rssi = info->rssi;
3953 data.ssp_mode = 0x00;
3955 flags = hci_inquiry_cache_update(hdev, &data, false);
3957 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3958 info->dev_class, info->rssi,
3959 flags, NULL, 0, NULL, 0);
3962 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3964 if (skb->len < num_rsp * sizeof(*info) + 1)
3967 for (; num_rsp; num_rsp--, info++) {
3970 bacpy(&data.bdaddr, &info->bdaddr);
3971 data.pscan_rep_mode = info->pscan_rep_mode;
3972 data.pscan_period_mode = info->pscan_period_mode;
3973 data.pscan_mode = 0x00;
3974 memcpy(data.dev_class, info->dev_class, 3);
3975 data.clock_offset = info->clock_offset;
3976 data.rssi = info->rssi;
3977 data.ssp_mode = 0x00;
3979 flags = hci_inquiry_cache_update(hdev, &data, false);
3981 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3982 info->dev_class, info->rssi,
3983 flags, NULL, 0, NULL, 0);
3988 hci_dev_unlock(hdev);
3991 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3992 struct sk_buff *skb)
3994 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3995 struct hci_conn *conn;
3997 BT_DBG("%s", hdev->name);
4001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4005 if (ev->page < HCI_MAX_PAGES)
4006 memcpy(conn->features[ev->page], ev->features, 8);
4008 if (!ev->status && ev->page == 0x01) {
4009 struct inquiry_entry *ie;
4011 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4013 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4015 if (ev->features[0] & LMP_HOST_SSP) {
4016 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4018 /* It is mandatory by the Bluetooth specification that
4019 * Extended Inquiry Results are only used when Secure
4020 * Simple Pairing is enabled, but some devices violate
4023 * To make these devices work, the internal SSP
4024 * enabled flag needs to be cleared if the remote host
4025 * features do not indicate SSP support */
4026 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4029 if (ev->features[0] & LMP_HOST_SC)
4030 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4033 if (conn->state != BT_CONFIG)
4036 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4037 struct hci_cp_remote_name_req cp;
4038 memset(&cp, 0, sizeof(cp));
4039 bacpy(&cp.bdaddr, &conn->dst);
4040 cp.pscan_rep_mode = 0x02;
4041 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4042 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4043 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4045 if (!hci_outgoing_auth_needed(hdev, conn)) {
4046 conn->state = BT_CONNECTED;
4047 hci_connect_cfm(conn, ev->status);
4048 hci_conn_drop(conn);
4052 hci_dev_unlock(hdev);
4055 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4056 struct sk_buff *skb)
4058 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4059 struct hci_conn *conn;
4061 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4065 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4067 if (ev->link_type == ESCO_LINK)
4070 /* When the link type in the event indicates SCO connection
4071 * and lookup of the connection object fails, then check
4072 * if an eSCO connection object exists.
4074 * The core limits the synchronous connections to either
4075 * SCO or eSCO. The eSCO connection is preferred and tried
4076 * to be setup first and until successfully established,
4077 * the link type will be hinted as eSCO.
4079 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4084 switch (ev->status) {
4086 /* The synchronous connection complete event should only be
4087 * sent once per new connection. Receiving a successful
4088 * complete event when the connection status is already
4089 * BT_CONNECTED means that the device is misbehaving and sent
4090 * multiple complete event packets for the same new connection.
4092 * Registering the device more than once can corrupt kernel
4093 * memory, hence upon detecting this invalid event, we report
4094 * an error and ignore the packet.
4096 if (conn->state == BT_CONNECTED) {
4097 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4101 conn->handle = __le16_to_cpu(ev->handle);
4102 conn->state = BT_CONNECTED;
4103 conn->type = ev->link_type;
4105 hci_debugfs_create_conn(conn);
4106 hci_conn_add_sysfs(conn);
4109 case 0x10: /* Connection Accept Timeout */
4110 case 0x0d: /* Connection Rejected due to Limited Resources */
4111 case 0x11: /* Unsupported Feature or Parameter Value */
4112 case 0x1c: /* SCO interval rejected */
4113 case 0x1a: /* Unsupported Remote Feature */
4114 case 0x1e: /* Invalid LMP Parameters */
4115 case 0x1f: /* Unspecified error */
4116 case 0x20: /* Unsupported LMP Parameter value */
4118 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4119 (hdev->esco_type & EDR_ESCO_MASK);
4120 if (hci_setup_sync(conn, conn->link->handle))
4126 conn->state = BT_CLOSED;
4130 hci_connect_cfm(conn, ev->status);
4135 hci_dev_unlock(hdev);
4138 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4142 while (parsed < eir_len) {
4143 u8 field_len = eir[0];
4148 parsed += field_len + 1;
4149 eir += field_len + 1;
4155 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4156 struct sk_buff *skb)
4158 struct inquiry_data data;
4159 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4160 int num_rsp = *((__u8 *) skb->data);
4163 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4165 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4168 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4173 for (; num_rsp; num_rsp--, info++) {
4177 bacpy(&data.bdaddr, &info->bdaddr);
4178 data.pscan_rep_mode = info->pscan_rep_mode;
4179 data.pscan_period_mode = info->pscan_period_mode;
4180 data.pscan_mode = 0x00;
4181 memcpy(data.dev_class, info->dev_class, 3);
4182 data.clock_offset = info->clock_offset;
4183 data.rssi = info->rssi;
4184 data.ssp_mode = 0x01;
4186 if (hci_dev_test_flag(hdev, HCI_MGMT))
4187 name_known = eir_get_data(info->data,
4189 EIR_NAME_COMPLETE, NULL);
4193 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4195 eir_len = eir_get_length(info->data, sizeof(info->data));
4197 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4198 info->dev_class, info->rssi,
4199 flags, info->data, eir_len, NULL, 0);
4202 hci_dev_unlock(hdev);
4205 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4206 struct sk_buff *skb)
4208 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4209 struct hci_conn *conn;
4211 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4212 __le16_to_cpu(ev->handle));
4216 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4220 /* For BR/EDR the necessary steps are taken through the
4221 * auth_complete event.
4223 if (conn->type != LE_LINK)
4227 conn->sec_level = conn->pending_sec_level;
4229 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4231 if (ev->status && conn->state == BT_CONNECTED) {
4232 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4233 hci_conn_drop(conn);
4237 if (conn->state == BT_CONFIG) {
4239 conn->state = BT_CONNECTED;
4241 hci_connect_cfm(conn, ev->status);
4242 hci_conn_drop(conn);
4244 hci_auth_cfm(conn, ev->status);
4246 hci_conn_hold(conn);
4247 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4248 hci_conn_drop(conn);
4252 hci_dev_unlock(hdev);
4255 static u8 hci_get_auth_req(struct hci_conn *conn)
4257 /* If remote requests no-bonding follow that lead */
4258 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4259 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4260 return conn->remote_auth | (conn->auth_type & 0x01);
4262 /* If both remote and local have enough IO capabilities, require
4265 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4266 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4267 return conn->remote_auth | 0x01;
4269 /* No MITM protection possible so ignore remote requirement */
4270 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4273 static u8 bredr_oob_data_present(struct hci_conn *conn)
4275 struct hci_dev *hdev = conn->hdev;
4276 struct oob_data *data;
4278 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4282 if (bredr_sc_enabled(hdev)) {
4283 /* When Secure Connections is enabled, then just
4284 * return the present value stored with the OOB
4285 * data. The stored value contains the right present
4286 * information. However it can only be trusted when
4287 * not in Secure Connection Only mode.
4289 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4290 return data->present;
4292 /* When Secure Connections Only mode is enabled, then
4293 * the P-256 values are required. If they are not
4294 * available, then do not declare that OOB data is
4297 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4298 !memcmp(data->hash256, ZERO_KEY, 16))
4304 /* When Secure Connections is not enabled or actually
4305 * not supported by the hardware, then check that if
4306 * P-192 data values are present.
4308 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4309 !memcmp(data->hash192, ZERO_KEY, 16))
4315 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4317 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4318 struct hci_conn *conn;
4320 BT_DBG("%s", hdev->name);
4324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4328 hci_conn_hold(conn);
4330 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4333 /* Allow pairing if we're pairable, the initiators of the
4334 * pairing or if the remote is not requesting bonding.
4336 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4337 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4338 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4339 struct hci_cp_io_capability_reply cp;
4341 bacpy(&cp.bdaddr, &ev->bdaddr);
4342 /* Change the IO capability from KeyboardDisplay
4343 * to DisplayYesNo as it is not supported by BT spec. */
4344 cp.capability = (conn->io_capability == 0x04) ?
4345 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4347 /* If we are initiators, there is no remote information yet */
4348 if (conn->remote_auth == 0xff) {
4349 /* Request MITM protection if our IO caps allow it
4350 * except for the no-bonding case.
4352 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4353 conn->auth_type != HCI_AT_NO_BONDING)
4354 conn->auth_type |= 0x01;
4356 conn->auth_type = hci_get_auth_req(conn);
4359 /* If we're not bondable, force one of the non-bondable
4360 * authentication requirement values.
4362 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4363 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4365 cp.authentication = conn->auth_type;
4366 cp.oob_data = bredr_oob_data_present(conn);
4368 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4371 struct hci_cp_io_capability_neg_reply cp;
4373 bacpy(&cp.bdaddr, &ev->bdaddr);
4374 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4376 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4381 hci_dev_unlock(hdev);
4384 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4386 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4387 struct hci_conn *conn;
4389 BT_DBG("%s", hdev->name);
4393 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4397 conn->remote_cap = ev->capability;
4398 conn->remote_auth = ev->authentication;
4401 hci_dev_unlock(hdev);
4404 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4405 struct sk_buff *skb)
4407 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4408 int loc_mitm, rem_mitm, confirm_hint = 0;
4409 struct hci_conn *conn;
4411 BT_DBG("%s", hdev->name);
4415 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4418 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4422 loc_mitm = (conn->auth_type & 0x01);
4423 rem_mitm = (conn->remote_auth & 0x01);
4425 /* If we require MITM but the remote device can't provide that
4426 * (it has NoInputNoOutput) then reject the confirmation
4427 * request. We check the security level here since it doesn't
4428 * necessarily match conn->auth_type.
4430 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4431 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4432 BT_DBG("Rejecting request: remote device can't provide MITM");
4433 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4434 sizeof(ev->bdaddr), &ev->bdaddr);
4438 /* If no side requires MITM protection; auto-accept */
4439 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4440 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4442 /* If we're not the initiators request authorization to
4443 * proceed from user space (mgmt_user_confirm with
4444 * confirm_hint set to 1). The exception is if neither
4445 * side had MITM or if the local IO capability is
4446 * NoInputNoOutput, in which case we do auto-accept
4448 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4449 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4450 (loc_mitm || rem_mitm)) {
4451 BT_DBG("Confirming auto-accept as acceptor");
4456 BT_DBG("Auto-accept of user confirmation with %ums delay",
4457 hdev->auto_accept_delay);
4459 if (hdev->auto_accept_delay > 0) {
4460 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4461 queue_delayed_work(conn->hdev->workqueue,
4462 &conn->auto_accept_work, delay);
4466 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4467 sizeof(ev->bdaddr), &ev->bdaddr);
4472 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4473 le32_to_cpu(ev->passkey), confirm_hint);
4476 hci_dev_unlock(hdev);
4479 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4480 struct sk_buff *skb)
4482 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4484 BT_DBG("%s", hdev->name);
4486 if (hci_dev_test_flag(hdev, HCI_MGMT))
4487 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4490 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4491 struct sk_buff *skb)
4493 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4494 struct hci_conn *conn;
4496 BT_DBG("%s", hdev->name);
4498 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4502 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4503 conn->passkey_entered = 0;
4505 if (hci_dev_test_flag(hdev, HCI_MGMT))
4506 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4507 conn->dst_type, conn->passkey_notify,
4508 conn->passkey_entered);
4511 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4513 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4514 struct hci_conn *conn;
4516 BT_DBG("%s", hdev->name);
4518 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4523 case HCI_KEYPRESS_STARTED:
4524 conn->passkey_entered = 0;
4527 case HCI_KEYPRESS_ENTERED:
4528 conn->passkey_entered++;
4531 case HCI_KEYPRESS_ERASED:
4532 conn->passkey_entered--;
4535 case HCI_KEYPRESS_CLEARED:
4536 conn->passkey_entered = 0;
4539 case HCI_KEYPRESS_COMPLETED:
4543 if (hci_dev_test_flag(hdev, HCI_MGMT))
4544 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4545 conn->dst_type, conn->passkey_notify,
4546 conn->passkey_entered);
4549 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4550 struct sk_buff *skb)
4552 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4553 struct hci_conn *conn;
4555 BT_DBG("%s", hdev->name);
4559 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4563 /* Reset the authentication requirement to unknown */
4564 conn->remote_auth = 0xff;
4566 /* To avoid duplicate auth_failed events to user space we check
4567 * the HCI_CONN_AUTH_PEND flag which will be set if we
4568 * initiated the authentication. A traditional auth_complete
4569 * event gets always produced as initiator and is also mapped to
4570 * the mgmt_auth_failed event */
4571 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4572 mgmt_auth_failed(conn, ev->status);
4574 hci_conn_drop(conn);
4577 hci_dev_unlock(hdev);
4580 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4581 struct sk_buff *skb)
4583 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4584 struct inquiry_entry *ie;
4585 struct hci_conn *conn;
4587 BT_DBG("%s", hdev->name);
4591 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4593 memcpy(conn->features[1], ev->features, 8);
4595 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4597 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4599 hci_dev_unlock(hdev);
4602 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4603 struct sk_buff *skb)
4605 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4606 struct oob_data *data;
4608 BT_DBG("%s", hdev->name);
4612 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4615 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4617 struct hci_cp_remote_oob_data_neg_reply cp;
4619 bacpy(&cp.bdaddr, &ev->bdaddr);
4620 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4625 if (bredr_sc_enabled(hdev)) {
4626 struct hci_cp_remote_oob_ext_data_reply cp;
4628 bacpy(&cp.bdaddr, &ev->bdaddr);
4629 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4630 memset(cp.hash192, 0, sizeof(cp.hash192));
4631 memset(cp.rand192, 0, sizeof(cp.rand192));
4633 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4634 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4636 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4637 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4639 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4642 struct hci_cp_remote_oob_data_reply cp;
4644 bacpy(&cp.bdaddr, &ev->bdaddr);
4645 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4646 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4648 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4653 hci_dev_unlock(hdev);
4656 #if IS_ENABLED(CONFIG_BT_HS)
4657 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4659 struct hci_ev_channel_selected *ev = (void *)skb->data;
4660 struct hci_conn *hcon;
4662 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4664 skb_pull(skb, sizeof(*ev));
4666 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4670 amp_read_loc_assoc_final_data(hdev, hcon);
4673 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4674 struct sk_buff *skb)
4676 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4677 struct hci_conn *hcon, *bredr_hcon;
4679 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4684 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4686 hci_dev_unlock(hdev);
4690 if (!hcon->amp_mgr) {
4691 hci_dev_unlock(hdev);
4697 hci_dev_unlock(hdev);
4701 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4703 hcon->state = BT_CONNECTED;
4704 bacpy(&hcon->dst, &bredr_hcon->dst);
4706 hci_conn_hold(hcon);
4707 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4708 hci_conn_drop(hcon);
4710 hci_debugfs_create_conn(hcon);
4711 hci_conn_add_sysfs(hcon);
4713 amp_physical_cfm(bredr_hcon, hcon);
4715 hci_dev_unlock(hdev);
4718 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4720 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4721 struct hci_conn *hcon;
4722 struct hci_chan *hchan;
4723 struct amp_mgr *mgr;
4725 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4726 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4729 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4733 /* Create AMP hchan */
4734 hchan = hci_chan_create(hcon);
4738 hchan->handle = le16_to_cpu(ev->handle);
4741 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4743 mgr = hcon->amp_mgr;
4744 if (mgr && mgr->bredr_chan) {
4745 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4747 l2cap_chan_lock(bredr_chan);
4749 bredr_chan->conn->mtu = hdev->block_mtu;
4750 l2cap_logical_cfm(bredr_chan, hchan, 0);
4751 hci_conn_hold(hcon);
4753 l2cap_chan_unlock(bredr_chan);
4757 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4758 struct sk_buff *skb)
4760 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4761 struct hci_chan *hchan;
4763 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4764 le16_to_cpu(ev->handle), ev->status);
4771 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4772 if (!hchan || !hchan->amp)
4775 amp_destroy_logical_link(hchan, ev->reason);
4778 hci_dev_unlock(hdev);
4781 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4782 struct sk_buff *skb)
4784 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4785 struct hci_conn *hcon;
4787 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4794 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4796 hcon->state = BT_CLOSED;
4800 hci_dev_unlock(hdev);
4804 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
4805 u8 bdaddr_type, bdaddr_t *local_rpa)
4808 conn->dst_type = bdaddr_type;
4809 conn->resp_addr_type = bdaddr_type;
4810 bacpy(&conn->resp_addr, bdaddr);
4812 /* Check if the controller has set a Local RPA then it must be
4813 * used instead or hdev->rpa.
4815 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4816 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4817 bacpy(&conn->init_addr, local_rpa);
4818 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
4819 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4820 bacpy(&conn->init_addr, &conn->hdev->rpa);
4822 hci_copy_identity_address(conn->hdev, &conn->init_addr,
4823 &conn->init_addr_type);
4826 conn->resp_addr_type = conn->hdev->adv_addr_type;
4827 /* Check if the controller has set a Local RPA then it must be
4828 * used instead or hdev->rpa.
4830 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
4831 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
4832 bacpy(&conn->resp_addr, local_rpa);
4833 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4834 /* In case of ext adv, resp_addr will be updated in
4835 * Adv Terminated event.
4837 if (!ext_adv_capable(conn->hdev))
4838 bacpy(&conn->resp_addr,
4839 &conn->hdev->random_addr);
4841 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
4844 conn->init_addr_type = bdaddr_type;
4845 bacpy(&conn->init_addr, bdaddr);
4847 /* For incoming connections, set the default minimum
4848 * and maximum connection interval. They will be used
4849 * to check if the parameters are in range and if not
4850 * trigger the connection update procedure.
4852 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
4853 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
4857 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4858 bdaddr_t *bdaddr, u8 bdaddr_type,
4859 bdaddr_t *local_rpa, u8 role, u16 handle,
4860 u16 interval, u16 latency,
4861 u16 supervision_timeout)
4863 struct hci_conn_params *params;
4864 struct hci_conn *conn;
4865 struct smp_irk *irk;
4870 /* All controllers implicitly stop advertising in the event of a
4871 * connection, so ensure that the state bit is cleared.
4873 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4875 conn = hci_lookup_le_connect(hdev);
4877 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4879 bt_dev_err(hdev, "no memory for new connection");
4883 conn->dst_type = bdaddr_type;
4885 /* If we didn't have a hci_conn object previously
4886 * but we're in master role this must be something
4887 * initiated using a white list. Since white list based
4888 * connections are not "first class citizens" we don't
4889 * have full tracking of them. Therefore, we go ahead
4890 * with a "best effort" approach of determining the
4891 * initiator address based on the HCI_PRIVACY flag.
4894 conn->resp_addr_type = bdaddr_type;
4895 bacpy(&conn->resp_addr, bdaddr);
4896 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4897 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4898 bacpy(&conn->init_addr, &hdev->rpa);
4900 hci_copy_identity_address(hdev,
4902 &conn->init_addr_type);
4906 cancel_delayed_work(&conn->le_conn_timeout);
4909 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
4911 /* Lookup the identity address from the stored connection
4912 * address and address type.
4914 * When establishing connections to an identity address, the
4915 * connection procedure will store the resolvable random
4916 * address first. Now if it can be converted back into the
4917 * identity address, start using the identity address from
4920 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4922 bacpy(&conn->dst, &irk->bdaddr);
4923 conn->dst_type = irk->addr_type;
4927 hci_le_conn_failed(conn, status);
4931 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4932 addr_type = BDADDR_LE_PUBLIC;
4934 addr_type = BDADDR_LE_RANDOM;
4936 /* Drop the connection if the device is blocked */
4937 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4938 hci_conn_drop(conn);
4942 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4943 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4945 conn->sec_level = BT_SECURITY_LOW;
4946 conn->handle = handle;
4947 conn->state = BT_CONFIG;
4949 conn->le_conn_interval = interval;
4950 conn->le_conn_latency = latency;
4951 conn->le_supv_timeout = supervision_timeout;
4953 hci_debugfs_create_conn(conn);
4954 hci_conn_add_sysfs(conn);
4957 /* The remote features procedure is defined for master
4958 * role only. So only in case of an initiated connection
4959 * request the remote features.
4961 * If the local controller supports slave-initiated features
4962 * exchange, then requesting the remote features in slave
4963 * role is possible. Otherwise just transition into the
4964 * connected state without requesting the remote features.
4967 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4968 struct hci_cp_le_read_remote_features cp;
4970 cp.handle = __cpu_to_le16(conn->handle);
4972 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4975 hci_conn_hold(conn);
4977 conn->state = BT_CONNECTED;
4978 hci_connect_cfm(conn, status);
4981 hci_connect_cfm(conn, status);
4984 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4987 list_del_init(¶ms->action);
4989 hci_conn_drop(params->conn);
4990 hci_conn_put(params->conn);
4991 params->conn = NULL;
4996 hci_update_background_scan(hdev);
4997 hci_dev_unlock(hdev);
5000 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5002 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5004 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5006 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5007 NULL, ev->role, le16_to_cpu(ev->handle),
5008 le16_to_cpu(ev->interval),
5009 le16_to_cpu(ev->latency),
5010 le16_to_cpu(ev->supervision_timeout));
5013 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5014 struct sk_buff *skb)
5016 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5018 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5020 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5021 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5022 le16_to_cpu(ev->interval),
5023 le16_to_cpu(ev->latency),
5024 le16_to_cpu(ev->supervision_timeout));
5027 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5029 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5030 struct hci_conn *conn;
5032 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5035 struct adv_info *adv;
5037 adv = hci_find_adv_instance(hdev, ev->handle);
5041 /* Remove advertising as it has been terminated */
5042 hci_remove_adv_instance(hdev, ev->handle);
5043 mgmt_advertising_removed(NULL, hdev, ev->handle);
5048 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5050 struct adv_info *adv_instance;
5052 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5053 bacmp(&conn->resp_addr, BDADDR_ANY))
5056 if (!hdev->cur_adv_instance) {
5057 bacpy(&conn->resp_addr, &hdev->random_addr);
5061 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5063 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5067 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5068 struct sk_buff *skb)
5070 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5071 struct hci_conn *conn;
5073 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5080 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5082 conn->le_conn_interval = le16_to_cpu(ev->interval);
5083 conn->le_conn_latency = le16_to_cpu(ev->latency);
5084 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5087 hci_dev_unlock(hdev);
5090 /* This function requires the caller holds hdev->lock */
5091 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5093 u8 addr_type, u8 adv_type,
5094 bdaddr_t *direct_rpa)
5096 struct hci_conn *conn;
5097 struct hci_conn_params *params;
5099 /* If the event is not connectable don't proceed further */
5100 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5103 /* Ignore if the device is blocked */
5104 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5107 /* Most controller will fail if we try to create new connections
5108 * while we have an existing one in slave role.
5110 if (hdev->conn_hash.le_num_slave > 0)
5113 /* If we're not connectable only connect devices that we have in
5114 * our pend_le_conns list.
5116 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5121 if (!params->explicit_connect) {
5122 switch (params->auto_connect) {
5123 case HCI_AUTO_CONN_DIRECT:
5124 /* Only devices advertising with ADV_DIRECT_IND are
5125 * triggering a connection attempt. This is allowing
5126 * incoming connections from slave devices.
5128 if (adv_type != LE_ADV_DIRECT_IND)
5131 case HCI_AUTO_CONN_ALWAYS:
5132 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5133 * are triggering a connection attempt. This means
5134 * that incoming connectioms from slave device are
5135 * accepted and also outgoing connections to slave
5136 * devices are established when found.
5144 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5145 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5147 if (!IS_ERR(conn)) {
5148 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5149 * by higher layer that tried to connect, if no then
5150 * store the pointer since we don't really have any
5151 * other owner of the object besides the params that
5152 * triggered it. This way we can abort the connection if
5153 * the parameters get removed and keep the reference
5154 * count consistent once the connection is established.
5157 if (!params->explicit_connect)
5158 params->conn = hci_conn_get(conn);
5163 switch (PTR_ERR(conn)) {
5165 /* If hci_connect() returns -EBUSY it means there is already
5166 * an LE connection attempt going on. Since controllers don't
5167 * support more than one connection attempt at the time, we
5168 * don't consider this an error case.
5172 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5179 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5180 u8 bdaddr_type, bdaddr_t *direct_addr,
5181 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5184 struct discovery_state *d = &hdev->discovery;
5185 struct smp_irk *irk;
5186 struct hci_conn *conn;
5193 case LE_ADV_DIRECT_IND:
5194 case LE_ADV_SCAN_IND:
5195 case LE_ADV_NONCONN_IND:
5196 case LE_ADV_SCAN_RSP:
5199 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5200 "type: 0x%02x", type);
5204 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5205 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5209 /* Find the end of the data in case the report contains padded zero
5210 * bytes at the end causing an invalid length value.
5212 * When data is NULL, len is 0 so there is no need for extra ptr
5213 * check as 'ptr < data + 0' is already false in such case.
5215 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5216 if (ptr + 1 + *ptr > data + len)
5220 real_len = ptr - data;
5222 /* Adjust for actual length */
5223 if (len != real_len) {
5224 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5228 /* If the direct address is present, then this report is from
5229 * a LE Direct Advertising Report event. In that case it is
5230 * important to see if the address is matching the local
5231 * controller address.
5234 /* Only resolvable random addresses are valid for these
5235 * kind of reports and others can be ignored.
5237 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5240 /* If the controller is not using resolvable random
5241 * addresses, then this report can be ignored.
5243 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5246 /* If the local IRK of the controller does not match
5247 * with the resolvable random address provided, then
5248 * this report can be ignored.
5250 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5254 /* Check if we need to convert to identity address */
5255 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5257 bdaddr = &irk->bdaddr;
5258 bdaddr_type = irk->addr_type;
5261 /* Check if we have been requested to connect to this device.
5263 * direct_addr is set only for directed advertising reports (it is NULL
5264 * for advertising reports) and is already verified to be RPA above.
5266 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5268 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5269 /* Store report for later inclusion by
5270 * mgmt_device_connected
5272 memcpy(conn->le_adv_data, data, len);
5273 conn->le_adv_data_len = len;
5276 /* Passive scanning shouldn't trigger any device found events,
5277 * except for devices marked as CONN_REPORT for which we do send
5278 * device found events.
5280 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5281 if (type == LE_ADV_DIRECT_IND)
5284 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5285 bdaddr, bdaddr_type))
5288 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5289 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5292 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5293 rssi, flags, data, len, NULL, 0);
5297 /* When receiving non-connectable or scannable undirected
5298 * advertising reports, this means that the remote device is
5299 * not connectable and then clearly indicate this in the
5300 * device found event.
5302 * When receiving a scan response, then there is no way to
5303 * know if the remote device is connectable or not. However
5304 * since scan responses are merged with a previously seen
5305 * advertising report, the flags field from that report
5308 * In the really unlikely case that a controller get confused
5309 * and just sends a scan response event, then it is marked as
5310 * not connectable as well.
5312 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5313 type == LE_ADV_SCAN_RSP)
5314 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5318 /* If there's nothing pending either store the data from this
5319 * event or send an immediate device found event if the data
5320 * should not be stored for later.
5322 if (!ext_adv && !has_pending_adv_report(hdev)) {
5323 /* If the report will trigger a SCAN_REQ store it for
5326 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5327 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5328 rssi, flags, data, len);
5332 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5333 rssi, flags, data, len, NULL, 0);
5337 /* Check if the pending report is for the same device as the new one */
5338 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5339 bdaddr_type == d->last_adv_addr_type);
5341 /* If the pending data doesn't match this report or this isn't a
5342 * scan response (e.g. we got a duplicate ADV_IND) then force
5343 * sending of the pending data.
5345 if (type != LE_ADV_SCAN_RSP || !match) {
5346 /* Send out whatever is in the cache, but skip duplicates */
5348 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5349 d->last_adv_addr_type, NULL,
5350 d->last_adv_rssi, d->last_adv_flags,
5352 d->last_adv_data_len, NULL, 0);
5354 /* If the new report will trigger a SCAN_REQ store it for
5357 if (!ext_adv && (type == LE_ADV_IND ||
5358 type == LE_ADV_SCAN_IND)) {
5359 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5360 rssi, flags, data, len);
5364 /* The advertising reports cannot be merged, so clear
5365 * the pending report and send out a device found event.
5367 clear_pending_adv_report(hdev);
5368 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5369 rssi, flags, data, len, NULL, 0);
5373 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5374 * the new event is a SCAN_RSP. We can therefore proceed with
5375 * sending a merged device found event.
5377 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5378 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5379 d->last_adv_data, d->last_adv_data_len, data, len);
5380 clear_pending_adv_report(hdev);
5383 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5385 u8 num_reports = skb->data[0];
5386 void *ptr = &skb->data[1];
5390 while (num_reports--) {
5391 struct hci_ev_le_advertising_info *ev = ptr;
5394 if (ev->length <= HCI_MAX_AD_LENGTH) {
5395 rssi = ev->data[ev->length];
5396 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5397 ev->bdaddr_type, NULL, 0, rssi,
5398 ev->data, ev->length, false);
5400 bt_dev_err(hdev, "Dropping invalid advertising data");
5403 ptr += sizeof(*ev) + ev->length + 1;
5406 hci_dev_unlock(hdev);
5409 static u8 ext_evt_type_to_legacy(u16 evt_type)
5411 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5413 case LE_LEGACY_ADV_IND:
5415 case LE_LEGACY_ADV_DIRECT_IND:
5416 return LE_ADV_DIRECT_IND;
5417 case LE_LEGACY_ADV_SCAN_IND:
5418 return LE_ADV_SCAN_IND;
5419 case LE_LEGACY_NONCONN_IND:
5420 return LE_ADV_NONCONN_IND;
5421 case LE_LEGACY_SCAN_RSP_ADV:
5422 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5423 return LE_ADV_SCAN_RSP;
5426 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5429 return LE_ADV_INVALID;
5432 if (evt_type & LE_EXT_ADV_CONN_IND) {
5433 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5434 return LE_ADV_DIRECT_IND;
5439 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5440 return LE_ADV_SCAN_RSP;
5442 if (evt_type & LE_EXT_ADV_SCAN_IND)
5443 return LE_ADV_SCAN_IND;
5445 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5446 evt_type & LE_EXT_ADV_DIRECT_IND)
5447 return LE_ADV_NONCONN_IND;
5449 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5452 return LE_ADV_INVALID;
5455 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5457 u8 num_reports = skb->data[0];
5458 void *ptr = &skb->data[1];
5462 while (num_reports--) {
5463 struct hci_ev_le_ext_adv_report *ev = ptr;
5467 evt_type = __le16_to_cpu(ev->evt_type);
5468 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5469 if (legacy_evt_type != LE_ADV_INVALID) {
5470 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5471 ev->bdaddr_type, NULL, 0, ev->rssi,
5472 ev->data, ev->length,
5473 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5476 ptr += sizeof(*ev) + ev->length + 1;
5479 hci_dev_unlock(hdev);
5482 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5483 struct sk_buff *skb)
5485 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5486 struct hci_conn *conn;
5488 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5495 memcpy(conn->features[0], ev->features, 8);
5497 if (conn->state == BT_CONFIG) {
5500 /* If the local controller supports slave-initiated
5501 * features exchange, but the remote controller does
5502 * not, then it is possible that the error code 0x1a
5503 * for unsupported remote feature gets returned.
5505 * In this specific case, allow the connection to
5506 * transition into connected state and mark it as
5509 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5510 !conn->out && ev->status == 0x1a)
5513 status = ev->status;
5515 conn->state = BT_CONNECTED;
5516 hci_connect_cfm(conn, status);
5517 hci_conn_drop(conn);
5521 hci_dev_unlock(hdev);
5524 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5526 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5527 struct hci_cp_le_ltk_reply cp;
5528 struct hci_cp_le_ltk_neg_reply neg;
5529 struct hci_conn *conn;
5530 struct smp_ltk *ltk;
5532 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5536 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5540 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5544 if (smp_ltk_is_sc(ltk)) {
5545 /* With SC both EDiv and Rand are set to zero */
5546 if (ev->ediv || ev->rand)
5549 /* For non-SC keys check that EDiv and Rand match */
5550 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5554 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5555 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5556 cp.handle = cpu_to_le16(conn->handle);
5558 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5560 conn->enc_key_size = ltk->enc_size;
5562 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5564 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5565 * temporary key used to encrypt a connection following
5566 * pairing. It is used during the Encrypted Session Setup to
5567 * distribute the keys. Later, security can be re-established
5568 * using a distributed LTK.
5570 if (ltk->type == SMP_STK) {
5571 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5572 list_del_rcu(<k->list);
5573 kfree_rcu(ltk, rcu);
5575 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5578 hci_dev_unlock(hdev);
5583 neg.handle = ev->handle;
5584 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5585 hci_dev_unlock(hdev);
5588 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5591 struct hci_cp_le_conn_param_req_neg_reply cp;
5593 cp.handle = cpu_to_le16(handle);
5596 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5600 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5601 struct sk_buff *skb)
5603 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5604 struct hci_cp_le_conn_param_req_reply cp;
5605 struct hci_conn *hcon;
5606 u16 handle, min, max, latency, timeout;
5608 handle = le16_to_cpu(ev->handle);
5609 min = le16_to_cpu(ev->interval_min);
5610 max = le16_to_cpu(ev->interval_max);
5611 latency = le16_to_cpu(ev->latency);
5612 timeout = le16_to_cpu(ev->timeout);
5614 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5615 if (!hcon || hcon->state != BT_CONNECTED)
5616 return send_conn_param_neg_reply(hdev, handle,
5617 HCI_ERROR_UNKNOWN_CONN_ID);
5619 if (hci_check_conn_params(min, max, latency, timeout))
5620 return send_conn_param_neg_reply(hdev, handle,
5621 HCI_ERROR_INVALID_LL_PARAMS);
5623 if (hcon->role == HCI_ROLE_MASTER) {
5624 struct hci_conn_params *params;
5629 params = hci_conn_params_lookup(hdev, &hcon->dst,
5632 params->conn_min_interval = min;
5633 params->conn_max_interval = max;
5634 params->conn_latency = latency;
5635 params->supervision_timeout = timeout;
5641 hci_dev_unlock(hdev);
5643 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5644 store_hint, min, max, latency, timeout);
5647 cp.handle = ev->handle;
5648 cp.interval_min = ev->interval_min;
5649 cp.interval_max = ev->interval_max;
5650 cp.latency = ev->latency;
5651 cp.timeout = ev->timeout;
5655 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5658 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5659 struct sk_buff *skb)
5661 u8 num_reports = skb->data[0];
5662 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
5664 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
5669 for (; num_reports; num_reports--, ev++)
5670 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5671 ev->bdaddr_type, &ev->direct_addr,
5672 ev->direct_addr_type, ev->rssi, NULL, 0,
5675 hci_dev_unlock(hdev);
5678 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5680 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5682 skb_pull(skb, sizeof(*le_ev));
5684 switch (le_ev->subevent) {
5685 case HCI_EV_LE_CONN_COMPLETE:
5686 hci_le_conn_complete_evt(hdev, skb);
5689 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5690 hci_le_conn_update_complete_evt(hdev, skb);
5693 case HCI_EV_LE_ADVERTISING_REPORT:
5694 hci_le_adv_report_evt(hdev, skb);
5697 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5698 hci_le_remote_feat_complete_evt(hdev, skb);
5701 case HCI_EV_LE_LTK_REQ:
5702 hci_le_ltk_request_evt(hdev, skb);
5705 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5706 hci_le_remote_conn_param_req_evt(hdev, skb);
5709 case HCI_EV_LE_DIRECT_ADV_REPORT:
5710 hci_le_direct_adv_report_evt(hdev, skb);
5713 case HCI_EV_LE_EXT_ADV_REPORT:
5714 hci_le_ext_adv_report_evt(hdev, skb);
5717 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5718 hci_le_enh_conn_complete_evt(hdev, skb);
5721 case HCI_EV_LE_EXT_ADV_SET_TERM:
5722 hci_le_ext_adv_term_evt(hdev, skb);
5730 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5731 u8 event, struct sk_buff *skb)
5733 struct hci_ev_cmd_complete *ev;
5734 struct hci_event_hdr *hdr;
5739 if (skb->len < sizeof(*hdr)) {
5740 bt_dev_err(hdev, "too short HCI event");
5744 hdr = (void *) skb->data;
5745 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5748 if (hdr->evt != event)
5753 /* Check if request ended in Command Status - no way to retreive
5754 * any extra parameters in this case.
5756 if (hdr->evt == HCI_EV_CMD_STATUS)
5759 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5760 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5765 if (skb->len < sizeof(*ev)) {
5766 bt_dev_err(hdev, "too short cmd_complete event");
5770 ev = (void *) skb->data;
5771 skb_pull(skb, sizeof(*ev));
5773 if (opcode != __le16_to_cpu(ev->opcode)) {
5774 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5775 __le16_to_cpu(ev->opcode));
5782 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5784 struct hci_event_hdr *hdr = (void *) skb->data;
5785 hci_req_complete_t req_complete = NULL;
5786 hci_req_complete_skb_t req_complete_skb = NULL;
5787 struct sk_buff *orig_skb = NULL;
5788 u8 status = 0, event = hdr->evt, req_evt = 0;
5789 u16 opcode = HCI_OP_NOP;
5792 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5796 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5797 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5798 opcode = __le16_to_cpu(cmd_hdr->opcode);
5799 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5804 /* If it looks like we might end up having to call
5805 * req_complete_skb, store a pristine copy of the skb since the
5806 * various handlers may modify the original one through
5807 * skb_pull() calls, etc.
5809 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5810 event == HCI_EV_CMD_COMPLETE)
5811 orig_skb = skb_clone(skb, GFP_KERNEL);
5813 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5816 case HCI_EV_INQUIRY_COMPLETE:
5817 hci_inquiry_complete_evt(hdev, skb);
5820 case HCI_EV_INQUIRY_RESULT:
5821 hci_inquiry_result_evt(hdev, skb);
5824 case HCI_EV_CONN_COMPLETE:
5825 hci_conn_complete_evt(hdev, skb);
5828 case HCI_EV_CONN_REQUEST:
5829 hci_conn_request_evt(hdev, skb);
5832 case HCI_EV_DISCONN_COMPLETE:
5833 hci_disconn_complete_evt(hdev, skb);
5836 case HCI_EV_AUTH_COMPLETE:
5837 hci_auth_complete_evt(hdev, skb);
5840 case HCI_EV_REMOTE_NAME:
5841 hci_remote_name_evt(hdev, skb);
5844 case HCI_EV_ENCRYPT_CHANGE:
5845 hci_encrypt_change_evt(hdev, skb);
5848 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5849 hci_change_link_key_complete_evt(hdev, skb);
5852 case HCI_EV_REMOTE_FEATURES:
5853 hci_remote_features_evt(hdev, skb);
5856 case HCI_EV_CMD_COMPLETE:
5857 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5858 &req_complete, &req_complete_skb);
5861 case HCI_EV_CMD_STATUS:
5862 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5866 case HCI_EV_HARDWARE_ERROR:
5867 hci_hardware_error_evt(hdev, skb);
5870 case HCI_EV_ROLE_CHANGE:
5871 hci_role_change_evt(hdev, skb);
5874 case HCI_EV_NUM_COMP_PKTS:
5875 hci_num_comp_pkts_evt(hdev, skb);
5878 case HCI_EV_MODE_CHANGE:
5879 hci_mode_change_evt(hdev, skb);
5882 case HCI_EV_PIN_CODE_REQ:
5883 hci_pin_code_request_evt(hdev, skb);
5886 case HCI_EV_LINK_KEY_REQ:
5887 hci_link_key_request_evt(hdev, skb);
5890 case HCI_EV_LINK_KEY_NOTIFY:
5891 hci_link_key_notify_evt(hdev, skb);
5894 case HCI_EV_CLOCK_OFFSET:
5895 hci_clock_offset_evt(hdev, skb);
5898 case HCI_EV_PKT_TYPE_CHANGE:
5899 hci_pkt_type_change_evt(hdev, skb);
5902 case HCI_EV_PSCAN_REP_MODE:
5903 hci_pscan_rep_mode_evt(hdev, skb);
5906 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5907 hci_inquiry_result_with_rssi_evt(hdev, skb);
5910 case HCI_EV_REMOTE_EXT_FEATURES:
5911 hci_remote_ext_features_evt(hdev, skb);
5914 case HCI_EV_SYNC_CONN_COMPLETE:
5915 hci_sync_conn_complete_evt(hdev, skb);
5918 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5919 hci_extended_inquiry_result_evt(hdev, skb);
5922 case HCI_EV_KEY_REFRESH_COMPLETE:
5923 hci_key_refresh_complete_evt(hdev, skb);
5926 case HCI_EV_IO_CAPA_REQUEST:
5927 hci_io_capa_request_evt(hdev, skb);
5930 case HCI_EV_IO_CAPA_REPLY:
5931 hci_io_capa_reply_evt(hdev, skb);
5934 case HCI_EV_USER_CONFIRM_REQUEST:
5935 hci_user_confirm_request_evt(hdev, skb);
5938 case HCI_EV_USER_PASSKEY_REQUEST:
5939 hci_user_passkey_request_evt(hdev, skb);
5942 case HCI_EV_USER_PASSKEY_NOTIFY:
5943 hci_user_passkey_notify_evt(hdev, skb);
5946 case HCI_EV_KEYPRESS_NOTIFY:
5947 hci_keypress_notify_evt(hdev, skb);
5950 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5951 hci_simple_pair_complete_evt(hdev, skb);
5954 case HCI_EV_REMOTE_HOST_FEATURES:
5955 hci_remote_host_features_evt(hdev, skb);
5958 case HCI_EV_LE_META:
5959 hci_le_meta_evt(hdev, skb);
5962 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5963 hci_remote_oob_data_request_evt(hdev, skb);
5966 #if IS_ENABLED(CONFIG_BT_HS)
5967 case HCI_EV_CHANNEL_SELECTED:
5968 hci_chan_selected_evt(hdev, skb);
5971 case HCI_EV_PHY_LINK_COMPLETE:
5972 hci_phy_link_complete_evt(hdev, skb);
5975 case HCI_EV_LOGICAL_LINK_COMPLETE:
5976 hci_loglink_complete_evt(hdev, skb);
5979 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5980 hci_disconn_loglink_complete_evt(hdev, skb);
5983 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5984 hci_disconn_phylink_complete_evt(hdev, skb);
5988 case HCI_EV_NUM_COMP_BLOCKS:
5989 hci_num_comp_blocks_evt(hdev, skb);
5993 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5998 req_complete(hdev, status, opcode);
5999 } else if (req_complete_skb) {
6000 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6001 kfree_skb(orig_skb);
6004 req_complete_skb(hdev, status, opcode, orig_skb);
6008 kfree_skb(orig_skb);
6010 hdev->stat.evt_rx++;