2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI connection handling. */
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
37 #include "hci_request.h"
47 struct conn_handle_t {
48 struct hci_conn *conn;
52 static const struct sco_param esco_param_cvsd[] = {
53 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
55 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
56 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
57 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
60 static const struct sco_param sco_param_cvsd[] = {
61 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
62 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
65 static const struct sco_param esco_param_msbc[] = {
66 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
67 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
70 /* This function requires the caller holds hdev->lock */
71 static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
73 struct hci_conn_params *params;
74 struct hci_dev *hdev = conn->hdev;
80 bdaddr_type = conn->dst_type;
82 /* Check if we need to convert to identity address */
83 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
85 bdaddr = &irk->bdaddr;
86 bdaddr_type = irk->addr_type;
89 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
95 hci_conn_drop(params->conn);
96 hci_conn_put(params->conn);
100 if (!params->explicit_connect)
103 /* If the status indicates successful cancellation of
104 * the attempt (i.e. Unknown Connection Id) there's no point of
105 * notifying failure since we'll go back to keep trying to
106 * connect. The only exception is explicit connect requests
107 * where a timeout + cancel does indicate an actual failure.
109 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110 mgmt_connect_failed(hdev, &conn->dst, conn->type,
111 conn->dst_type, status);
113 /* The connection attempt was doing scan for new RPA, and is
114 * in scan phase. If params are not associated with any other
115 * autoconnect action, remove them completely. If they are, just unmark
116 * them as waiting for connection, by clearing explicit_connect field.
118 params->explicit_connect = false;
120 hci_pend_le_list_del_init(params);
122 switch (params->auto_connect) {
123 case HCI_AUTO_CONN_EXPLICIT:
124 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125 /* return instead of break to avoid duplicate scan update */
127 case HCI_AUTO_CONN_DIRECT:
128 case HCI_AUTO_CONN_ALWAYS:
129 hci_pend_le_list_add(params, &hdev->pend_le_conns);
131 case HCI_AUTO_CONN_REPORT:
132 hci_pend_le_list_add(params, &hdev->pend_le_reports);
138 hci_update_passive_scan(hdev);
141 static void hci_conn_cleanup(struct hci_conn *conn)
143 struct hci_dev *hdev = conn->hdev;
145 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
148 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149 hci_remove_link_key(hdev, &conn->dst);
151 hci_chan_list_flush(conn);
153 hci_conn_hash_del(hdev, conn);
155 if (HCI_CONN_HANDLE_UNSET(conn->handle))
156 ida_free(&hdev->unset_handle_ida, conn->handle);
161 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162 switch (conn->setting & SCO_AIRMODE_MASK) {
163 case SCO_AIRMODE_CVSD:
164 case SCO_AIRMODE_TRANSP:
166 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
171 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
174 debugfs_remove_recursive(conn->debugfs);
176 hci_conn_del_sysfs(conn);
181 static void hci_acl_create_connection(struct hci_conn *conn)
183 struct hci_dev *hdev = conn->hdev;
184 struct inquiry_entry *ie;
185 struct hci_cp_create_conn cp;
187 BT_DBG("hcon %p", conn);
189 /* Many controllers disallow HCI Create Connection while it is doing
190 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191 * Connection. This may cause the MGMT discovering state to become false
192 * without user space's request but it is okay since the MGMT Discovery
193 * APIs do not promise that discovery should be done forever. Instead,
194 * the user space monitors the status of MGMT discovering and it may
195 * request for discovery again when this flag becomes false.
197 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198 /* Put this connection to "pending" state so that it will be
199 * executed after the inquiry cancel command complete event.
201 conn->state = BT_CONNECT2;
202 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
206 conn->state = BT_CONNECT;
208 conn->role = HCI_ROLE_MASTER;
212 conn->link_policy = hdev->link_policy;
214 memset(&cp, 0, sizeof(cp));
215 bacpy(&cp.bdaddr, &conn->dst);
216 cp.pscan_rep_mode = 0x02;
218 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
220 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222 cp.pscan_mode = ie->data.pscan_mode;
223 cp.clock_offset = ie->data.clock_offset |
227 memcpy(conn->dev_class, ie->data.dev_class, 3);
230 cp.pkt_type = cpu_to_le16(conn->pkt_type);
231 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232 cp.role_switch = 0x01;
234 cp.role_switch = 0x00;
236 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
239 int hci_disconnect(struct hci_conn *conn, __u8 reason)
241 BT_DBG("hcon %p", conn);
243 /* When we are central of an established connection and it enters
244 * the disconnect timeout, then go ahead and try to read the
245 * current clock offset. Processing of the result is done
246 * within the event handling and hci_clock_offset_evt function.
248 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250 struct hci_dev *hdev = conn->hdev;
251 struct hci_cp_read_clock_offset clkoff_cp;
253 clkoff_cp.handle = cpu_to_le16(conn->handle);
254 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
258 return hci_abort_conn(conn, reason);
261 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
263 struct hci_dev *hdev = conn->hdev;
264 struct hci_cp_add_sco cp;
266 BT_DBG("hcon %p", conn);
268 conn->state = BT_CONNECT;
273 cp.handle = cpu_to_le16(handle);
274 cp.pkt_type = cpu_to_le16(conn->pkt_type);
276 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
279 static bool find_next_esco_param(struct hci_conn *conn,
280 const struct sco_param *esco_param, int size)
285 for (; conn->attempt <= size; conn->attempt++) {
286 if (lmp_esco_2m_capable(conn->parent) ||
287 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
289 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290 conn, conn->attempt);
293 return conn->attempt <= size;
296 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
299 __u8 vnd_len, *vnd_data = NULL;
300 struct hci_op_configure_data_path *cmd = NULL;
302 if (!codec->data_path || !hdev->get_codec_config_data)
305 /* Do not take me as error */
306 if (!hdev->get_codec_config_data)
309 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
314 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
320 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
324 cmd->vnd_len = vnd_len;
325 memcpy(cmd->vnd_data, vnd_data, vnd_len);
327 cmd->direction = 0x00;
328 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
329 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
331 cmd->direction = 0x01;
332 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
333 sizeof(*cmd) + vnd_len, cmd,
342 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
344 struct conn_handle_t *conn_handle = data;
345 struct hci_conn *conn = conn_handle->conn;
346 __u16 handle = conn_handle->handle;
347 struct hci_cp_enhanced_setup_sync_conn cp;
348 const struct sco_param *param;
352 bt_dev_dbg(hdev, "hcon %p", conn);
354 configure_datapath_sync(hdev, &conn->codec);
356 conn->state = BT_CONNECT;
361 memset(&cp, 0x00, sizeof(cp));
363 cp.handle = cpu_to_le16(handle);
365 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
366 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
368 switch (conn->codec.id) {
370 if (!find_next_esco_param(conn, esco_param_msbc,
371 ARRAY_SIZE(esco_param_msbc)))
374 param = &esco_param_msbc[conn->attempt - 1];
375 cp.tx_coding_format.id = 0x05;
376 cp.rx_coding_format.id = 0x05;
377 cp.tx_codec_frame_size = __cpu_to_le16(60);
378 cp.rx_codec_frame_size = __cpu_to_le16(60);
379 cp.in_bandwidth = __cpu_to_le32(32000);
380 cp.out_bandwidth = __cpu_to_le32(32000);
381 cp.in_coding_format.id = 0x04;
382 cp.out_coding_format.id = 0x04;
383 cp.in_coded_data_size = __cpu_to_le16(16);
384 cp.out_coded_data_size = __cpu_to_le16(16);
385 cp.in_pcm_data_format = 2;
386 cp.out_pcm_data_format = 2;
387 cp.in_pcm_sample_payload_msb_pos = 0;
388 cp.out_pcm_sample_payload_msb_pos = 0;
389 cp.in_data_path = conn->codec.data_path;
390 cp.out_data_path = conn->codec.data_path;
391 cp.in_transport_unit_size = 1;
392 cp.out_transport_unit_size = 1;
395 case BT_CODEC_TRANSPARENT:
396 if (!find_next_esco_param(conn, esco_param_msbc,
397 ARRAY_SIZE(esco_param_msbc)))
399 param = &esco_param_msbc[conn->attempt - 1];
400 cp.tx_coding_format.id = 0x03;
401 cp.rx_coding_format.id = 0x03;
402 cp.tx_codec_frame_size = __cpu_to_le16(60);
403 cp.rx_codec_frame_size = __cpu_to_le16(60);
404 cp.in_bandwidth = __cpu_to_le32(0x1f40);
405 cp.out_bandwidth = __cpu_to_le32(0x1f40);
406 cp.in_coding_format.id = 0x03;
407 cp.out_coding_format.id = 0x03;
408 cp.in_coded_data_size = __cpu_to_le16(16);
409 cp.out_coded_data_size = __cpu_to_le16(16);
410 cp.in_pcm_data_format = 2;
411 cp.out_pcm_data_format = 2;
412 cp.in_pcm_sample_payload_msb_pos = 0;
413 cp.out_pcm_sample_payload_msb_pos = 0;
414 cp.in_data_path = conn->codec.data_path;
415 cp.out_data_path = conn->codec.data_path;
416 cp.in_transport_unit_size = 1;
417 cp.out_transport_unit_size = 1;
421 if (conn->parent && lmp_esco_capable(conn->parent)) {
422 if (!find_next_esco_param(conn, esco_param_cvsd,
423 ARRAY_SIZE(esco_param_cvsd)))
425 param = &esco_param_cvsd[conn->attempt - 1];
427 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
429 param = &sco_param_cvsd[conn->attempt - 1];
431 cp.tx_coding_format.id = 2;
432 cp.rx_coding_format.id = 2;
433 cp.tx_codec_frame_size = __cpu_to_le16(60);
434 cp.rx_codec_frame_size = __cpu_to_le16(60);
435 cp.in_bandwidth = __cpu_to_le32(16000);
436 cp.out_bandwidth = __cpu_to_le32(16000);
437 cp.in_coding_format.id = 4;
438 cp.out_coding_format.id = 4;
439 cp.in_coded_data_size = __cpu_to_le16(16);
440 cp.out_coded_data_size = __cpu_to_le16(16);
441 cp.in_pcm_data_format = 2;
442 cp.out_pcm_data_format = 2;
443 cp.in_pcm_sample_payload_msb_pos = 0;
444 cp.out_pcm_sample_payload_msb_pos = 0;
445 cp.in_data_path = conn->codec.data_path;
446 cp.out_data_path = conn->codec.data_path;
447 cp.in_transport_unit_size = 16;
448 cp.out_transport_unit_size = 16;
454 cp.retrans_effort = param->retrans_effort;
455 cp.pkt_type = __cpu_to_le16(param->pkt_type);
456 cp.max_latency = __cpu_to_le16(param->max_latency);
458 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
464 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
466 struct hci_dev *hdev = conn->hdev;
467 struct hci_cp_setup_sync_conn cp;
468 const struct sco_param *param;
470 bt_dev_dbg(hdev, "hcon %p", conn);
472 conn->state = BT_CONNECT;
477 cp.handle = cpu_to_le16(handle);
479 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
480 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
481 cp.voice_setting = cpu_to_le16(conn->setting);
483 switch (conn->setting & SCO_AIRMODE_MASK) {
484 case SCO_AIRMODE_TRANSP:
485 if (!find_next_esco_param(conn, esco_param_msbc,
486 ARRAY_SIZE(esco_param_msbc)))
488 param = &esco_param_msbc[conn->attempt - 1];
490 case SCO_AIRMODE_CVSD:
491 if (conn->parent && lmp_esco_capable(conn->parent)) {
492 if (!find_next_esco_param(conn, esco_param_cvsd,
493 ARRAY_SIZE(esco_param_cvsd)))
495 param = &esco_param_cvsd[conn->attempt - 1];
497 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
499 param = &sco_param_cvsd[conn->attempt - 1];
506 cp.retrans_effort = param->retrans_effort;
507 cp.pkt_type = __cpu_to_le16(param->pkt_type);
508 cp.max_latency = __cpu_to_le16(param->max_latency);
510 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
516 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
519 struct conn_handle_t *conn_handle;
521 if (enhanced_sync_conn_capable(conn->hdev)) {
522 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
527 conn_handle->conn = conn;
528 conn_handle->handle = handle;
529 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
537 return hci_setup_sync_conn(conn, handle);
540 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
543 struct hci_dev *hdev = conn->hdev;
544 struct hci_conn_params *params;
545 struct hci_cp_le_conn_update cp;
549 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
551 params->conn_min_interval = min;
552 params->conn_max_interval = max;
553 params->conn_latency = latency;
554 params->supervision_timeout = to_multiplier;
557 hci_dev_unlock(hdev);
559 memset(&cp, 0, sizeof(cp));
560 cp.handle = cpu_to_le16(conn->handle);
561 cp.conn_interval_min = cpu_to_le16(min);
562 cp.conn_interval_max = cpu_to_le16(max);
563 cp.conn_latency = cpu_to_le16(latency);
564 cp.supervision_timeout = cpu_to_le16(to_multiplier);
565 cp.min_ce_len = cpu_to_le16(0x0000);
566 cp.max_ce_len = cpu_to_le16(0x0000);
568 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
576 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
577 __u8 ltk[16], __u8 key_size)
579 struct hci_dev *hdev = conn->hdev;
580 struct hci_cp_le_start_enc cp;
582 BT_DBG("hcon %p", conn);
584 memset(&cp, 0, sizeof(cp));
586 cp.handle = cpu_to_le16(conn->handle);
589 memcpy(cp.ltk, ltk, key_size);
591 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
594 /* Device _must_ be locked */
595 void hci_sco_setup(struct hci_conn *conn, __u8 status)
597 struct hci_link *link;
599 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
600 if (!link || !link->conn)
603 BT_DBG("hcon %p", conn);
606 if (lmp_esco_capable(conn->hdev))
607 hci_setup_sync(link->conn, conn->handle);
609 hci_add_sco(link->conn, conn->handle);
611 hci_connect_cfm(link->conn, status);
612 hci_conn_del(link->conn);
616 static void hci_conn_timeout(struct work_struct *work)
618 struct hci_conn *conn = container_of(work, struct hci_conn,
620 int refcnt = atomic_read(&conn->refcnt);
622 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
626 /* FIXME: It was observed that in pairing failed scenario, refcnt
627 * drops below 0. Probably this is because l2cap_conn_del calls
628 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
629 * dropped. After that loop hci_chan_del is called which also drops
630 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
636 hci_abort_conn(conn, hci_proto_disconn_ind(conn));
639 /* Enter sniff mode */
640 static void hci_conn_idle(struct work_struct *work)
642 struct hci_conn *conn = container_of(work, struct hci_conn,
644 struct hci_dev *hdev = conn->hdev;
646 BT_DBG("hcon %p mode %d", conn, conn->mode);
648 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
651 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
654 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
655 struct hci_cp_sniff_subrate cp;
656 cp.handle = cpu_to_le16(conn->handle);
657 cp.max_latency = cpu_to_le16(0);
658 cp.min_remote_timeout = cpu_to_le16(0);
659 cp.min_local_timeout = cpu_to_le16(0);
660 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
663 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
664 struct hci_cp_sniff_mode cp;
665 cp.handle = cpu_to_le16(conn->handle);
666 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
667 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
668 cp.attempt = cpu_to_le16(4);
669 cp.timeout = cpu_to_le16(1);
670 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
674 static void hci_conn_auto_accept(struct work_struct *work)
676 struct hci_conn *conn = container_of(work, struct hci_conn,
677 auto_accept_work.work);
679 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
683 static void le_disable_advertising(struct hci_dev *hdev)
685 if (ext_adv_capable(hdev)) {
686 struct hci_cp_le_set_ext_adv_enable cp;
689 cp.num_of_sets = 0x00;
691 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
695 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
700 static void le_conn_timeout(struct work_struct *work)
702 struct hci_conn *conn = container_of(work, struct hci_conn,
703 le_conn_timeout.work);
704 struct hci_dev *hdev = conn->hdev;
708 /* We could end up here due to having done directed advertising,
709 * so clean up the state if necessary. This should however only
710 * happen with broken hardware or if low duty cycle was used
711 * (which doesn't have a timeout of its own).
713 if (conn->role == HCI_ROLE_SLAVE) {
714 /* Disable LE Advertising */
715 le_disable_advertising(hdev);
717 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
718 hci_dev_unlock(hdev);
722 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
725 struct iso_cig_params {
726 struct hci_cp_le_set_cig_params cp;
727 struct hci_cis_params cis[0x1f];
730 struct iso_list_data {
746 static void bis_list(struct hci_conn *conn, void *data)
748 struct iso_list_data *d = data;
750 /* Skip if not broadcast/ANY address */
751 if (bacmp(&conn->dst, BDADDR_ANY))
754 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
755 d->bis != conn->iso_qos.bcast.bis)
761 static int terminate_big_sync(struct hci_dev *hdev, void *data)
763 struct iso_list_data *d = data;
765 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
767 hci_disable_per_advertising_sync(hdev, d->bis);
768 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
770 /* Only terminate BIG if it has been created */
774 return hci_le_terminate_big_sync(hdev, d->big,
775 HCI_ERROR_LOCAL_HOST_TERM);
778 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
783 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
785 struct iso_list_data *d;
788 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
789 conn->iso_qos.bcast.bis);
791 d = kzalloc(sizeof(*d), GFP_KERNEL);
795 d->big = conn->iso_qos.bcast.big;
796 d->bis = conn->iso_qos.bcast.bis;
797 d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
799 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
800 terminate_big_destroy);
807 static int big_terminate_sync(struct hci_dev *hdev, void *data)
809 struct iso_list_data *d = data;
811 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
814 if (d->big_sync_term)
815 hci_le_big_terminate_sync(hdev, d->big);
818 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
823 static void find_bis(struct hci_conn *conn, void *data)
825 struct iso_list_data *d = data;
827 /* Ignore if BIG doesn't match */
828 if (d->big != conn->iso_qos.bcast.big)
834 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
836 struct iso_list_data *d;
839 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
841 d = kzalloc(sizeof(*d), GFP_KERNEL);
845 memset(d, 0, sizeof(*d));
847 d->sync_handle = conn->sync_handle;
849 if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
850 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
851 HCI_CONN_PA_SYNC, d);
854 d->pa_sync_term = true;
859 if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
860 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
861 HCI_CONN_BIG_SYNC, d);
864 d->big_sync_term = true;
867 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
868 terminate_big_destroy);
875 /* Cleanup BIS connection
877 * Detects if there any BIS left connected in a BIG
878 * broadcaster: Remove advertising instance and terminate BIG.
879 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
881 static void bis_cleanup(struct hci_conn *conn)
883 struct hci_dev *hdev = conn->hdev;
884 struct hci_conn *bis;
886 bt_dev_dbg(hdev, "conn %p", conn);
888 if (conn->role == HCI_ROLE_MASTER) {
889 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
892 /* Check if ISO connection is a BIS and terminate advertising
893 * set and BIG if there are no other connections using it.
895 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
899 hci_le_terminate_big(hdev, conn);
901 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
906 static int remove_cig_sync(struct hci_dev *hdev, void *data)
908 u8 handle = PTR_UINT(data);
910 return hci_le_remove_cig_sync(hdev, handle);
913 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
915 bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
917 return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
921 static void find_cis(struct hci_conn *conn, void *data)
923 struct iso_list_data *d = data;
925 /* Ignore broadcast or if CIG don't match */
926 if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
932 /* Cleanup CIS connection:
934 * Detects if there any CIS left connected in a CIG and remove it.
936 static void cis_cleanup(struct hci_conn *conn)
938 struct hci_dev *hdev = conn->hdev;
939 struct iso_list_data d;
941 if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
944 memset(&d, 0, sizeof(d));
945 d.cig = conn->iso_qos.ucast.cig;
947 /* Check if ISO connection is a CIS and remove CIG if there are
948 * no other connections using it.
950 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
951 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
952 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
956 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
959 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
961 return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
962 U16_MAX, GFP_ATOMIC);
965 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
968 struct hci_conn *conn;
970 bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
972 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
976 bacpy(&conn->dst, dst);
977 bacpy(&conn->src, &hdev->bdaddr);
978 conn->handle = handle;
982 conn->mode = HCI_CM_ACTIVE;
983 conn->state = BT_OPEN;
984 conn->auth_type = HCI_AT_GENERAL_BONDING;
985 conn->io_capability = hdev->io_capability;
986 conn->remote_auth = 0xff;
987 conn->key_type = 0xff;
988 conn->rssi = HCI_RSSI_INVALID;
989 conn->tx_power = HCI_TX_POWER_INVALID;
990 conn->max_tx_power = HCI_TX_POWER_INVALID;
991 conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
993 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
994 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
996 /* Set Default Authenticated payload timeout to 30s */
997 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
999 if (conn->role == HCI_ROLE_MASTER)
1004 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
1007 /* conn->src should reflect the local identity address */
1008 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1011 /* conn->src should reflect the local identity address */
1012 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
1014 /* set proper cleanup function */
1015 if (!bacmp(dst, BDADDR_ANY))
1016 conn->cleanup = bis_cleanup;
1017 else if (conn->role == HCI_ROLE_MASTER)
1018 conn->cleanup = cis_cleanup;
1022 if (lmp_esco_capable(hdev))
1023 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
1024 (hdev->esco_type & EDR_ESCO_MASK);
1026 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
1029 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1033 skb_queue_head_init(&conn->data_q);
1035 INIT_LIST_HEAD(&conn->chan_list);
1036 INIT_LIST_HEAD(&conn->link_list);
1038 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1039 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1040 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1041 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1043 atomic_set(&conn->refcnt, 0);
1047 hci_conn_hash_add(hdev, conn);
1049 /* The SCO and eSCO connections will only be notified when their
1050 * setup has been completed. This is different to ACL links which
1051 * can be notified right away.
1053 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1055 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1058 hci_conn_init_sysfs(conn);
1063 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1064 bdaddr_t *dst, u8 role)
1068 bt_dev_dbg(hdev, "dst %pMR", dst);
1070 handle = hci_conn_hash_alloc_unset(hdev);
1071 if (unlikely(handle < 0))
1074 return hci_conn_add(hdev, type, dst, role, handle);
1077 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1080 reason = HCI_ERROR_REMOTE_USER_TERM;
1082 /* Due to race, SCO/ISO conn might be not established yet at this point,
1083 * and nothing else will clean it up. In other cases it is done via HCI
1086 switch (conn->type) {
1089 if (HCI_CONN_HANDLE_UNSET(conn->handle))
1090 hci_conn_failed(conn, reason);
1093 if ((conn->state != BT_CONNECTED &&
1094 !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1095 test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1096 hci_conn_failed(conn, reason);
1101 static void hci_conn_unlink(struct hci_conn *conn)
1103 struct hci_dev *hdev = conn->hdev;
1105 bt_dev_dbg(hdev, "hcon %p", conn);
1107 if (!conn->parent) {
1108 struct hci_link *link, *t;
1110 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1111 struct hci_conn *child = link->conn;
1113 hci_conn_unlink(child);
1115 /* If hdev is down it means
1116 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1117 * and links don't need to be cleanup as all connections
1120 if (!test_bit(HCI_UP, &hdev->flags))
1123 hci_conn_cleanup_child(child, conn->abort_reason);
1132 list_del_rcu(&conn->link->list);
1135 hci_conn_drop(conn->parent);
1136 hci_conn_put(conn->parent);
1137 conn->parent = NULL;
1143 void hci_conn_del(struct hci_conn *conn)
1145 struct hci_dev *hdev = conn->hdev;
1147 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1149 hci_conn_unlink(conn);
1151 cancel_delayed_work_sync(&conn->disc_work);
1152 cancel_delayed_work_sync(&conn->auto_accept_work);
1153 cancel_delayed_work_sync(&conn->idle_work);
1155 if (conn->type == ACL_LINK) {
1156 /* Unacked frames */
1157 hdev->acl_cnt += conn->sent;
1158 } else if (conn->type == LE_LINK) {
1159 cancel_delayed_work(&conn->le_conn_timeout);
1162 hdev->le_cnt += conn->sent;
1164 hdev->acl_cnt += conn->sent;
1166 /* Unacked ISO frames */
1167 if (conn->type == ISO_LINK) {
1169 hdev->iso_cnt += conn->sent;
1170 else if (hdev->le_pkts)
1171 hdev->le_cnt += conn->sent;
1173 hdev->acl_cnt += conn->sent;
1177 skb_queue_purge(&conn->data_q);
1179 /* Remove the connection from the list and cleanup its remaining
1180 * state. This is a separate function since for some cases like
1181 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1182 * rest of hci_conn_del.
1184 hci_conn_cleanup(conn);
1187 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1189 int use_src = bacmp(src, BDADDR_ANY);
1190 struct hci_dev *hdev = NULL, *d;
1192 BT_DBG("%pMR -> %pMR", src, dst);
1194 read_lock(&hci_dev_list_lock);
1196 list_for_each_entry(d, &hci_dev_list, list) {
1197 if (!test_bit(HCI_UP, &d->flags) ||
1198 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1199 d->dev_type != HCI_PRIMARY)
1203 * No source address - find interface with bdaddr != dst
1204 * Source address - find interface with bdaddr == src
1211 if (src_type == BDADDR_BREDR) {
1212 if (!lmp_bredr_capable(d))
1214 bacpy(&id_addr, &d->bdaddr);
1215 id_addr_type = BDADDR_BREDR;
1217 if (!lmp_le_capable(d))
1220 hci_copy_identity_address(d, &id_addr,
1223 /* Convert from HCI to three-value type */
1224 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1225 id_addr_type = BDADDR_LE_PUBLIC;
1227 id_addr_type = BDADDR_LE_RANDOM;
1230 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1234 if (bacmp(&d->bdaddr, dst)) {
1241 hdev = hci_dev_hold(hdev);
1243 read_unlock(&hci_dev_list_lock);
1246 EXPORT_SYMBOL(hci_get_route);
1248 /* This function requires the caller holds hdev->lock */
1249 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1251 struct hci_dev *hdev = conn->hdev;
1253 hci_connect_le_scan_cleanup(conn, status);
1255 /* Enable advertising in case this was a failed connection
1256 * attempt as a peripheral.
1258 hci_enable_advertising(hdev);
1261 /* This function requires the caller holds hdev->lock */
1262 void hci_conn_failed(struct hci_conn *conn, u8 status)
1264 struct hci_dev *hdev = conn->hdev;
1266 bt_dev_dbg(hdev, "status 0x%2.2x", status);
1268 switch (conn->type) {
1270 hci_le_conn_failed(conn, status);
1273 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1274 conn->dst_type, status);
1278 /* In case of BIG/PA sync failed, clear conn flags so that
1279 * the conns will be correctly cleaned up by ISO layer
1281 test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1282 test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1284 conn->state = BT_CLOSED;
1285 hci_connect_cfm(conn, status);
1289 /* This function requires the caller holds hdev->lock */
1290 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1292 struct hci_dev *hdev = conn->hdev;
1294 bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1296 if (conn->handle == handle)
1299 if (handle > HCI_CONN_HANDLE_MAX) {
1300 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1301 handle, HCI_CONN_HANDLE_MAX);
1302 return HCI_ERROR_INVALID_PARAMETERS;
1305 /* If abort_reason has been sent it means the connection is being
1306 * aborted and the handle shall not be changed.
1308 if (conn->abort_reason)
1309 return conn->abort_reason;
1311 if (HCI_CONN_HANDLE_UNSET(conn->handle))
1312 ida_free(&hdev->unset_handle_ida, conn->handle);
1314 conn->handle = handle;
1319 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1321 struct hci_conn *conn;
1322 u16 handle = PTR_UINT(data);
1324 conn = hci_conn_hash_lookup_handle(hdev, handle);
1328 bt_dev_dbg(hdev, "err %d", err);
1333 hci_connect_le_scan_cleanup(conn, 0x00);
1337 /* Check if connection is still pending */
1338 if (conn != hci_lookup_le_connect(hdev))
1341 /* Flush to make sure we send create conn cancel command if needed */
1342 flush_delayed_work(&conn->le_conn_timeout);
1343 hci_conn_failed(conn, bt_status(err));
1346 hci_dev_unlock(hdev);
1349 static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1351 struct hci_conn *conn;
1352 u16 handle = PTR_UINT(data);
1354 conn = hci_conn_hash_lookup_handle(hdev, handle);
1358 bt_dev_dbg(hdev, "conn %p", conn);
1360 clear_bit(HCI_CONN_SCANNING, &conn->flags);
1361 conn->state = BT_CONNECT;
1363 return hci_le_create_conn_sync(hdev, conn);
1366 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1367 u8 dst_type, bool dst_resolved, u8 sec_level,
1368 u16 conn_timeout, u8 role)
1370 struct hci_conn *conn;
1371 struct smp_irk *irk;
1374 /* Let's make sure that le is enabled.*/
1375 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1376 if (lmp_le_capable(hdev))
1377 return ERR_PTR(-ECONNREFUSED);
1379 return ERR_PTR(-EOPNOTSUPP);
1382 /* Since the controller supports only one LE connection attempt at a
1383 * time, we return -EBUSY if there is any connection attempt running.
1385 if (hci_lookup_le_connect(hdev))
1386 return ERR_PTR(-EBUSY);
1388 /* If there's already a connection object but it's not in
1389 * scanning state it means it must already be established, in
1390 * which case we can't do anything else except report a failure
1393 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1394 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1395 return ERR_PTR(-EBUSY);
1398 /* Check if the destination address has been resolved by the controller
1399 * since if it did then the identity address shall be used.
1401 if (!dst_resolved) {
1402 /* When given an identity address with existing identity
1403 * resolving key, the connection needs to be established
1404 * to a resolvable random address.
1406 * Storing the resolvable random address is required here
1407 * to handle connection failures. The address will later
1408 * be resolved back into the original identity address
1409 * from the connect request.
1411 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1412 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1414 dst_type = ADDR_LE_DEV_RANDOM;
1419 bacpy(&conn->dst, dst);
1421 conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1423 return ERR_PTR(-ENOMEM);
1424 hci_conn_hold(conn);
1425 conn->pending_sec_level = sec_level;
1428 conn->dst_type = dst_type;
1429 conn->sec_level = BT_SECURITY_LOW;
1430 conn->conn_timeout = conn_timeout;
1432 err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1433 UINT_PTR(conn->handle),
1434 create_le_conn_complete);
1437 return ERR_PTR(err);
1443 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1445 struct hci_conn *conn;
1447 conn = hci_conn_hash_lookup_le(hdev, addr, type);
1451 if (conn->state != BT_CONNECTED)
1457 /* This function requires the caller holds hdev->lock */
1458 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1459 bdaddr_t *addr, u8 addr_type)
1461 struct hci_conn_params *params;
1463 if (is_connected(hdev, addr, addr_type))
1466 params = hci_conn_params_lookup(hdev, addr, addr_type);
1468 params = hci_conn_params_add(hdev, addr, addr_type);
1472 /* If we created new params, mark them to be deleted in
1473 * hci_connect_le_scan_cleanup. It's different case than
1474 * existing disabled params, those will stay after cleanup.
1476 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1479 /* We're trying to connect, so make sure params are at pend_le_conns */
1480 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1481 params->auto_connect == HCI_AUTO_CONN_REPORT ||
1482 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1483 hci_pend_le_list_del_init(params);
1484 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1487 params->explicit_connect = true;
1489 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1490 params->auto_connect);
1495 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1497 struct hci_conn *conn;
1500 /* Allocate a BIG if not set */
1501 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1502 for (big = 0x00; big < 0xef; big++) {
1504 conn = hci_conn_hash_lookup_big(hdev, big);
1510 return -EADDRNOTAVAIL;
1513 qos->bcast.big = big;
1519 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1521 struct hci_conn *conn;
1524 /* Allocate BIS if not set */
1525 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1526 if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1527 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1530 /* If the BIG handle is already matched to an advertising
1531 * handle, do not allocate a new one.
1533 qos->bcast.bis = conn->iso_qos.bcast.bis;
1538 /* Find an unused adv set to advertise BIS, skip instance 0x00
1539 * since it is reserved as general purpose set.
1541 for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1544 conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1549 if (bis == hdev->le_num_of_adv_sets)
1550 return -EADDRNOTAVAIL;
1553 qos->bcast.bis = bis;
1559 /* This function requires the caller holds hdev->lock */
1560 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1561 struct bt_iso_qos *qos, __u8 base_len,
1564 struct hci_conn *conn;
1567 /* Let's make sure that le is enabled.*/
1568 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1569 if (lmp_le_capable(hdev))
1570 return ERR_PTR(-ECONNREFUSED);
1571 return ERR_PTR(-EOPNOTSUPP);
1574 err = qos_set_big(hdev, qos);
1576 return ERR_PTR(err);
1578 err = qos_set_bis(hdev, qos);
1580 return ERR_PTR(err);
1582 /* Check if the LE Create BIG command has already been sent */
1583 conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1586 return ERR_PTR(-EADDRINUSE);
1588 /* Check BIS settings against other bound BISes, since all
1589 * BISes in a BIG must have the same value for all parameters
1591 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1593 if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1594 base_len != conn->le_per_adv_data_len ||
1595 memcmp(conn->le_per_adv_data, base, base_len)))
1596 return ERR_PTR(-EADDRINUSE);
1598 conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1600 return ERR_PTR(-ENOMEM);
1602 conn->state = BT_CONNECT;
1604 hci_conn_hold(conn);
1608 /* This function requires the caller holds hdev->lock */
1609 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1610 u8 dst_type, u8 sec_level,
1612 enum conn_reasons conn_reason)
1614 struct hci_conn *conn;
1616 /* Let's make sure that le is enabled.*/
1617 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1618 if (lmp_le_capable(hdev))
1619 return ERR_PTR(-ECONNREFUSED);
1621 return ERR_PTR(-EOPNOTSUPP);
1624 /* Some devices send ATT messages as soon as the physical link is
1625 * established. To be able to handle these ATT messages, the user-
1626 * space first establishes the connection and then starts the pairing
1629 * So if a hci_conn object already exists for the following connection
1630 * attempt, we simply update pending_sec_level and auth_type fields
1631 * and return the object found.
1633 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1635 if (conn->pending_sec_level < sec_level)
1636 conn->pending_sec_level = sec_level;
1640 BT_DBG("requesting refresh of dst_addr");
1642 conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1644 return ERR_PTR(-ENOMEM);
1646 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1648 return ERR_PTR(-EBUSY);
1651 conn->state = BT_CONNECT;
1652 set_bit(HCI_CONN_SCANNING, &conn->flags);
1653 conn->dst_type = dst_type;
1654 conn->sec_level = BT_SECURITY_LOW;
1655 conn->pending_sec_level = sec_level;
1656 conn->conn_timeout = conn_timeout;
1657 conn->conn_reason = conn_reason;
1659 hci_update_passive_scan(hdev);
1662 hci_conn_hold(conn);
1666 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1667 u8 sec_level, u8 auth_type,
1668 enum conn_reasons conn_reason)
1670 struct hci_conn *acl;
1672 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1673 if (lmp_bredr_capable(hdev))
1674 return ERR_PTR(-ECONNREFUSED);
1676 return ERR_PTR(-EOPNOTSUPP);
1679 /* Reject outgoing connection to device with same BD ADDR against
1682 if (!bacmp(&hdev->bdaddr, dst)) {
1683 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1685 return ERR_PTR(-ECONNREFUSED);
1688 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1690 acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1692 return ERR_PTR(-ENOMEM);
1697 acl->conn_reason = conn_reason;
1698 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1699 acl->sec_level = BT_SECURITY_LOW;
1700 acl->pending_sec_level = sec_level;
1701 acl->auth_type = auth_type;
1702 hci_acl_create_connection(acl);
1708 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1709 struct hci_conn *conn)
1711 struct hci_dev *hdev = parent->hdev;
1712 struct hci_link *link;
1714 bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1722 link = kzalloc(sizeof(*link), GFP_KERNEL);
1726 link->conn = hci_conn_hold(conn);
1728 conn->parent = hci_conn_get(parent);
1730 /* Use list_add_tail_rcu append to the list */
1731 list_add_tail_rcu(&link->list, &parent->link_list);
1736 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1737 __u16 setting, struct bt_codec *codec)
1739 struct hci_conn *acl;
1740 struct hci_conn *sco;
1741 struct hci_link *link;
1743 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1744 CONN_REASON_SCO_CONNECT);
1748 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1750 sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1753 return ERR_PTR(-ENOMEM);
1757 link = hci_conn_link(acl, sco);
1761 return ERR_PTR(-ENOLINK);
1764 sco->setting = setting;
1765 sco->codec = *codec;
1767 if (acl->state == BT_CONNECTED &&
1768 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1769 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1770 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1772 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1773 /* defer SCO setup until mode change completed */
1774 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1778 hci_sco_setup(acl, 0x00);
1784 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1786 struct hci_dev *hdev = conn->hdev;
1787 struct hci_cp_le_create_big cp;
1788 struct iso_list_data data;
1790 memset(&cp, 0, sizeof(cp));
1792 data.big = qos->bcast.big;
1793 data.bis = qos->bcast.bis;
1796 /* Create a BIS for each bound connection */
1797 hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1800 cp.handle = qos->bcast.big;
1801 cp.adv_handle = qos->bcast.bis;
1802 cp.num_bis = data.count;
1803 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1804 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1805 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency);
1806 cp.bis.rtn = qos->bcast.out.rtn;
1807 cp.bis.phy = qos->bcast.out.phy;
1808 cp.bis.packing = qos->bcast.packing;
1809 cp.bis.framing = qos->bcast.framing;
1810 cp.bis.encryption = qos->bcast.encryption;
1811 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1813 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1816 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1818 u8 cig_id = PTR_UINT(data);
1819 struct hci_conn *conn;
1820 struct bt_iso_qos *qos;
1821 struct iso_cig_params pdu;
1824 conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1828 memset(&pdu, 0, sizeof(pdu));
1830 qos = &conn->iso_qos;
1831 pdu.cp.cig_id = cig_id;
1832 hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1833 hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1834 pdu.cp.sca = qos->ucast.sca;
1835 pdu.cp.packing = qos->ucast.packing;
1836 pdu.cp.framing = qos->ucast.framing;
1837 pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1838 pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1840 /* Reprogram all CIS(s) with the same CIG, valid range are:
1841 * num_cis: 0x00 to 0x1F
1842 * cis_id: 0x00 to 0xEF
1844 for (cis_id = 0x00; cis_id < 0xf0 &&
1845 pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1846 struct hci_cis_params *cis;
1848 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1852 qos = &conn->iso_qos;
1854 cis = &pdu.cis[pdu.cp.num_cis++];
1855 cis->cis_id = cis_id;
1856 cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1857 cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1858 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy :
1860 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy :
1862 cis->c_rtn = qos->ucast.out.rtn;
1863 cis->p_rtn = qos->ucast.in.rtn;
1866 if (!pdu.cp.num_cis)
1869 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1871 pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1875 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1877 struct hci_dev *hdev = conn->hdev;
1878 struct iso_list_data data;
1880 memset(&data, 0, sizeof(data));
1882 /* Allocate first still reconfigurable CIG if not set */
1883 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1884 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1887 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1892 hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1893 BT_CONNECTED, &data);
1898 if (data.cig == 0xf0)
1902 qos->ucast.cig = data.cig;
1905 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1906 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1912 /* Allocate first available CIS if not set */
1913 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1915 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1918 qos->ucast.cis = data.cis;
1923 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1927 if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1928 UINT_PTR(qos->ucast.cig), NULL) < 0)
1934 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1935 __u8 dst_type, struct bt_iso_qos *qos)
1937 struct hci_conn *cis;
1939 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1942 cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1944 return ERR_PTR(-ENOMEM);
1945 cis->cleanup = cis_cleanup;
1946 cis->dst_type = dst_type;
1947 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1948 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1951 if (cis->state == BT_CONNECTED)
1954 /* Check if CIS has been set and the settings matches */
1955 if (cis->state == BT_BOUND &&
1956 !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1959 /* Update LINK PHYs according to QoS preference */
1960 cis->le_tx_phy = qos->ucast.out.phy;
1961 cis->le_rx_phy = qos->ucast.in.phy;
1963 /* If output interval is not set use the input interval as it cannot be
1966 if (!qos->ucast.out.interval)
1967 qos->ucast.out.interval = qos->ucast.in.interval;
1969 /* If input interval is not set use the output interval as it cannot be
1972 if (!qos->ucast.in.interval)
1973 qos->ucast.in.interval = qos->ucast.out.interval;
1975 /* If output latency is not set use the input latency as it cannot be
1978 if (!qos->ucast.out.latency)
1979 qos->ucast.out.latency = qos->ucast.in.latency;
1981 /* If input latency is not set use the output latency as it cannot be
1984 if (!qos->ucast.in.latency)
1985 qos->ucast.in.latency = qos->ucast.out.latency;
1987 if (!hci_le_set_cig_params(cis, qos)) {
1989 return ERR_PTR(-EINVAL);
1994 cis->iso_qos = *qos;
1995 cis->state = BT_BOUND;
2000 bool hci_iso_setup_path(struct hci_conn *conn)
2002 struct hci_dev *hdev = conn->hdev;
2003 struct hci_cp_le_setup_iso_path cmd;
2005 memset(&cmd, 0, sizeof(cmd));
2007 if (conn->iso_qos.ucast.out.sdu) {
2008 cmd.handle = cpu_to_le16(conn->handle);
2009 cmd.direction = 0x00; /* Input (Host to Controller) */
2010 cmd.path = 0x00; /* HCI path if enabled */
2011 cmd.codec = 0x03; /* Transparent Data */
2013 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2018 if (conn->iso_qos.ucast.in.sdu) {
2019 cmd.handle = cpu_to_le16(conn->handle);
2020 cmd.direction = 0x01; /* Output (Controller to Host) */
2021 cmd.path = 0x00; /* HCI path if enabled */
2022 cmd.codec = 0x03; /* Transparent Data */
2024 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
2032 int hci_conn_check_create_cis(struct hci_conn *conn)
2034 if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
2037 if (!conn->parent || conn->parent->state != BT_CONNECTED ||
2038 conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
2044 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
2046 return hci_le_create_cis_sync(hdev);
2049 int hci_le_create_cis_pending(struct hci_dev *hdev)
2051 struct hci_conn *conn;
2052 bool pending = false;
2056 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2057 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2062 if (!hci_conn_check_create_cis(conn))
2071 /* Queue Create CIS */
2072 return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2075 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2076 struct bt_iso_io_qos *qos, __u8 phy)
2078 /* Only set MTU if PHY is enabled */
2079 if (!qos->sdu && qos->phy) {
2080 if (hdev->iso_mtu > 0)
2081 qos->sdu = hdev->iso_mtu;
2082 else if (hdev->le_mtu > 0)
2083 qos->sdu = hdev->le_mtu;
2085 qos->sdu = hdev->acl_mtu;
2088 /* Use the same PHY as ACL if set to any */
2089 if (qos->phy == BT_ISO_PHY_ANY)
2092 /* Use LE ACL connection interval if not set */
2094 /* ACL interval unit in 1.25 ms to us */
2095 qos->interval = conn->le_conn_interval * 1250;
2097 /* Use LE ACL connection latency if not set */
2099 qos->latency = conn->le_conn_latency;
2102 static int create_big_sync(struct hci_dev *hdev, void *data)
2104 struct hci_conn *conn = data;
2105 struct bt_iso_qos *qos = &conn->iso_qos;
2106 u16 interval, sync_interval = 0;
2110 if (qos->bcast.out.phy == 0x02)
2111 flags |= MGMT_ADV_FLAG_SEC_2M;
2113 /* Align intervals */
2114 interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2117 sync_interval = interval * 4;
2119 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2120 conn->le_per_adv_data, flags, interval,
2121 interval, sync_interval);
2125 return hci_le_create_big(conn, &conn->iso_qos);
2128 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2130 struct hci_cp_le_pa_create_sync *cp = data;
2132 bt_dev_dbg(hdev, "");
2135 bt_dev_err(hdev, "Unable to create PA: %d", err);
2140 static int create_pa_sync(struct hci_dev *hdev, void *data)
2142 struct hci_cp_le_pa_create_sync *cp = data;
2145 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2146 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2148 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2152 return hci_update_passive_scan_sync(hdev);
2155 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2156 __u8 sid, struct bt_iso_qos *qos)
2158 struct hci_cp_le_pa_create_sync *cp;
2160 if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2163 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2165 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2169 cp->options = qos->bcast.options;
2171 cp->addr_type = dst_type;
2172 bacpy(&cp->addr, dst);
2173 cp->skip = cpu_to_le16(qos->bcast.skip);
2174 cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2175 cp->sync_cte_type = qos->bcast.sync_cte_type;
2177 /* Queue start pa_create_sync and scan */
2178 return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2181 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2182 struct bt_iso_qos *qos,
2183 __u16 sync_handle, __u8 num_bis, __u8 bis[])
2186 struct hci_cp_le_big_create_sync cp;
2191 if (num_bis < 0x01 || num_bis > sizeof(pdu.bis))
2194 err = qos_set_big(hdev, qos);
2199 hcon->iso_qos.bcast.big = qos->bcast.big;
2201 memset(&pdu, 0, sizeof(pdu));
2202 pdu.cp.handle = qos->bcast.big;
2203 pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2204 pdu.cp.encryption = qos->bcast.encryption;
2205 memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2206 pdu.cp.mse = qos->bcast.mse;
2207 pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2208 pdu.cp.num_bis = num_bis;
2209 memcpy(pdu.bis, bis, num_bis);
2211 return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2212 sizeof(pdu.cp) + num_bis, &pdu);
2215 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2217 struct hci_conn *conn = data;
2219 bt_dev_dbg(hdev, "conn %p", conn);
2222 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2223 hci_connect_cfm(conn, err);
2228 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2229 struct bt_iso_qos *qos,
2230 __u8 base_len, __u8 *base)
2232 struct hci_conn *conn;
2233 struct hci_conn *parent;
2234 __u8 eir[HCI_MAX_PER_AD_LENGTH];
2235 struct hci_link *link;
2237 /* Look for any BIS that is open for rebinding */
2238 conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN);
2240 memcpy(qos, &conn->iso_qos, sizeof(*qos));
2241 conn->state = BT_CONNECTED;
2245 if (base_len && base)
2246 base_len = eir_append_service_data(eir, 0, 0x1851,
2249 /* We need hci_conn object using the BDADDR_ANY as dst */
2250 conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2254 /* Update LINK PHYs according to QoS preference */
2255 conn->le_tx_phy = qos->bcast.out.phy;
2256 conn->le_tx_phy = qos->bcast.out.phy;
2258 /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2259 if (base_len && base) {
2260 memcpy(conn->le_per_adv_data, eir, sizeof(eir));
2261 conn->le_per_adv_data_len = base_len;
2264 hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2265 conn->le_tx_phy ? conn->le_tx_phy :
2266 hdev->le_tx_def_phys);
2268 conn->iso_qos = *qos;
2269 conn->state = BT_BOUND;
2271 /* Link BISes together */
2272 parent = hci_conn_hash_lookup_big(hdev,
2273 conn->iso_qos.bcast.big);
2274 if (parent && parent != conn) {
2275 link = hci_conn_link(parent, conn);
2277 hci_conn_drop(conn);
2278 return ERR_PTR(-ENOLINK);
2281 /* Link takes the refcount */
2282 hci_conn_drop(conn);
2288 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2290 struct iso_list_data *d = data;
2292 /* Skip if not broadcast/ANY address */
2293 if (bacmp(&conn->dst, BDADDR_ANY))
2296 if (d->big != conn->iso_qos.bcast.big ||
2297 d->bis == BT_ISO_QOS_BIS_UNSET ||
2298 d->bis != conn->iso_qos.bcast.bis)
2301 set_bit(HCI_CONN_PER_ADV, &conn->flags);
2304 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2305 __u8 dst_type, struct bt_iso_qos *qos,
2306 __u8 base_len, __u8 *base)
2308 struct hci_conn *conn;
2310 struct iso_list_data data;
2312 conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2316 if (conn->state == BT_CONNECTED)
2319 data.big = qos->bcast.big;
2320 data.bis = qos->bcast.bis;
2322 /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2323 * the start periodic advertising and create BIG commands have
2326 hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2329 /* Queue start periodic advertising and create BIG */
2330 err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2331 create_big_complete);
2333 hci_conn_drop(conn);
2334 return ERR_PTR(err);
2340 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2341 __u8 dst_type, struct bt_iso_qos *qos)
2343 struct hci_conn *le;
2344 struct hci_conn *cis;
2345 struct hci_link *link;
2347 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2348 le = hci_connect_le(hdev, dst, dst_type, false,
2350 HCI_LE_CONN_TIMEOUT,
2353 le = hci_connect_le_scan(hdev, dst, dst_type,
2355 HCI_LE_CONN_TIMEOUT,
2356 CONN_REASON_ISO_CONNECT);
2360 hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2361 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2362 hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2363 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2365 cis = hci_bind_cis(hdev, dst, dst_type, qos);
2371 link = hci_conn_link(le, cis);
2375 return ERR_PTR(-ENOLINK);
2378 /* Link takes the refcount */
2381 cis->state = BT_CONNECT;
2383 hci_le_create_cis_pending(hdev);
2388 /* Check link security requirement */
2389 int hci_conn_check_link_mode(struct hci_conn *conn)
2391 BT_DBG("hcon %p", conn);
2393 /* In Secure Connections Only mode, it is required that Secure
2394 * Connections is used and the link is encrypted with AES-CCM
2395 * using a P-256 authenticated combination key.
2397 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2398 if (!hci_conn_sc_enabled(conn) ||
2399 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2400 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2404 /* AES encryption is required for Level 4:
2406 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2409 * 128-bit equivalent strength for link and encryption keys
2410 * required using FIPS approved algorithms (E0 not allowed,
2411 * SAFER+ not allowed, and P-192 not allowed; encryption key
2414 if (conn->sec_level == BT_SECURITY_FIPS &&
2415 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2416 bt_dev_err(conn->hdev,
2417 "Invalid security: Missing AES-CCM usage");
2421 if (hci_conn_ssp_enabled(conn) &&
2422 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2428 /* Authenticate remote device */
2429 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2431 BT_DBG("hcon %p", conn);
2433 if (conn->pending_sec_level > sec_level)
2434 sec_level = conn->pending_sec_level;
2436 if (sec_level > conn->sec_level)
2437 conn->pending_sec_level = sec_level;
2438 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2441 /* Make sure we preserve an existing MITM requirement*/
2442 auth_type |= (conn->auth_type & 0x01);
2444 conn->auth_type = auth_type;
2446 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2447 struct hci_cp_auth_requested cp;
2449 cp.handle = cpu_to_le16(conn->handle);
2450 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2453 /* Set the ENCRYPT_PEND to trigger encryption after
2456 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2457 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2463 /* Encrypt the link */
2464 static void hci_conn_encrypt(struct hci_conn *conn)
2466 BT_DBG("hcon %p", conn);
2468 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2469 struct hci_cp_set_conn_encrypt cp;
2470 cp.handle = cpu_to_le16(conn->handle);
2472 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2477 /* Enable security */
2478 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2481 BT_DBG("hcon %p", conn);
2483 if (conn->type == LE_LINK)
2484 return smp_conn_security(conn, sec_level);
2486 /* For sdp we don't need the link key. */
2487 if (sec_level == BT_SECURITY_SDP)
2490 /* For non 2.1 devices and low security level we don't need the link
2492 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2495 /* For other security levels we need the link key. */
2496 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2499 switch (conn->key_type) {
2500 case HCI_LK_AUTH_COMBINATION_P256:
2501 /* An authenticated FIPS approved combination key has
2502 * sufficient security for security level 4 or lower.
2504 if (sec_level <= BT_SECURITY_FIPS)
2507 case HCI_LK_AUTH_COMBINATION_P192:
2508 /* An authenticated combination key has sufficient security for
2509 * security level 3 or lower.
2511 if (sec_level <= BT_SECURITY_HIGH)
2514 case HCI_LK_UNAUTH_COMBINATION_P192:
2515 case HCI_LK_UNAUTH_COMBINATION_P256:
2516 /* An unauthenticated combination key has sufficient security
2517 * for security level 2 or lower.
2519 if (sec_level <= BT_SECURITY_MEDIUM)
2522 case HCI_LK_COMBINATION:
2523 /* A combination key has always sufficient security for the
2524 * security levels 2 or lower. High security level requires the
2525 * combination key is generated using maximum PIN code length
2526 * (16). For pre 2.1 units.
2528 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2536 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2540 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2542 if (!hci_conn_auth(conn, sec_level, auth_type))
2546 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2547 /* Ensure that the encryption key size has been read,
2548 * otherwise stall the upper layer responses.
2550 if (!conn->enc_key_size)
2553 /* Nothing else needed, all requirements are met */
2557 hci_conn_encrypt(conn);
2560 EXPORT_SYMBOL(hci_conn_security);
2562 /* Check secure link requirement */
2563 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2565 BT_DBG("hcon %p", conn);
2567 /* Accept if non-secure or higher security level is required */
2568 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2571 /* Accept if secure or higher security level is already present */
2572 if (conn->sec_level == BT_SECURITY_HIGH ||
2573 conn->sec_level == BT_SECURITY_FIPS)
2576 /* Reject not secure link */
2579 EXPORT_SYMBOL(hci_conn_check_secure);
2582 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2584 BT_DBG("hcon %p", conn);
2586 if (role == conn->role)
2589 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2590 struct hci_cp_switch_role cp;
2591 bacpy(&cp.bdaddr, &conn->dst);
2593 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2598 EXPORT_SYMBOL(hci_conn_switch_role);
2600 /* Enter active mode */
2601 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2603 struct hci_dev *hdev = conn->hdev;
2605 BT_DBG("hcon %p mode %d", conn, conn->mode);
2607 if (conn->mode != HCI_CM_SNIFF)
2610 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2613 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2614 struct hci_cp_exit_sniff_mode cp;
2615 cp.handle = cpu_to_le16(conn->handle);
2616 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2620 if (hdev->idle_timeout > 0)
2621 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2622 msecs_to_jiffies(hdev->idle_timeout));
2625 /* Drop all connection on the device */
2626 void hci_conn_hash_flush(struct hci_dev *hdev)
2628 struct list_head *head = &hdev->conn_hash.list;
2629 struct hci_conn *conn;
2631 BT_DBG("hdev %s", hdev->name);
2633 /* We should not traverse the list here, because hci_conn_del
2634 * can remove extra links, which may cause the list traversal
2635 * to hit items that have already been released.
2637 while ((conn = list_first_entry_or_null(head,
2640 conn->state = BT_CLOSED;
2641 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2646 /* Check pending connect attempts */
2647 void hci_conn_check_pending(struct hci_dev *hdev)
2649 struct hci_conn *conn;
2651 BT_DBG("hdev %s", hdev->name);
2655 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2657 hci_acl_create_connection(conn);
2659 hci_dev_unlock(hdev);
2662 static u32 get_link_mode(struct hci_conn *conn)
2666 if (conn->role == HCI_ROLE_MASTER)
2667 link_mode |= HCI_LM_MASTER;
2669 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2670 link_mode |= HCI_LM_ENCRYPT;
2672 if (test_bit(HCI_CONN_AUTH, &conn->flags))
2673 link_mode |= HCI_LM_AUTH;
2675 if (test_bit(HCI_CONN_SECURE, &conn->flags))
2676 link_mode |= HCI_LM_SECURE;
2678 if (test_bit(HCI_CONN_FIPS, &conn->flags))
2679 link_mode |= HCI_LM_FIPS;
2684 int hci_get_conn_list(void __user *arg)
2687 struct hci_conn_list_req req, *cl;
2688 struct hci_conn_info *ci;
2689 struct hci_dev *hdev;
2690 int n = 0, size, err;
2692 if (copy_from_user(&req, arg, sizeof(req)))
2695 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2698 size = sizeof(req) + req.conn_num * sizeof(*ci);
2700 cl = kmalloc(size, GFP_KERNEL);
2704 hdev = hci_dev_get(req.dev_id);
2713 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2714 bacpy(&(ci + n)->bdaddr, &c->dst);
2715 (ci + n)->handle = c->handle;
2716 (ci + n)->type = c->type;
2717 (ci + n)->out = c->out;
2718 (ci + n)->state = c->state;
2719 (ci + n)->link_mode = get_link_mode(c);
2720 if (++n >= req.conn_num)
2723 hci_dev_unlock(hdev);
2725 cl->dev_id = hdev->id;
2727 size = sizeof(req) + n * sizeof(*ci);
2731 err = copy_to_user(arg, cl, size);
2734 return err ? -EFAULT : 0;
2737 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2739 struct hci_conn_info_req req;
2740 struct hci_conn_info ci;
2741 struct hci_conn *conn;
2742 char __user *ptr = arg + sizeof(req);
2744 if (copy_from_user(&req, arg, sizeof(req)))
2748 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2750 bacpy(&ci.bdaddr, &conn->dst);
2751 ci.handle = conn->handle;
2752 ci.type = conn->type;
2754 ci.state = conn->state;
2755 ci.link_mode = get_link_mode(conn);
2757 hci_dev_unlock(hdev);
2762 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2765 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2767 struct hci_auth_info_req req;
2768 struct hci_conn *conn;
2770 if (copy_from_user(&req, arg, sizeof(req)))
2774 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2776 req.type = conn->auth_type;
2777 hci_dev_unlock(hdev);
2782 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2785 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2787 struct hci_dev *hdev = conn->hdev;
2788 struct hci_chan *chan;
2790 BT_DBG("%s hcon %p", hdev->name, conn);
2792 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2793 BT_DBG("Refusing to create new hci_chan");
2797 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2801 chan->conn = hci_conn_get(conn);
2802 skb_queue_head_init(&chan->data_q);
2803 chan->state = BT_CONNECTED;
2805 list_add_rcu(&chan->list, &conn->chan_list);
2810 void hci_chan_del(struct hci_chan *chan)
2812 struct hci_conn *conn = chan->conn;
2813 struct hci_dev *hdev = conn->hdev;
2815 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2817 list_del_rcu(&chan->list);
2821 /* Prevent new hci_chan's to be created for this hci_conn */
2822 set_bit(HCI_CONN_DROP, &conn->flags);
2826 skb_queue_purge(&chan->data_q);
2830 void hci_chan_list_flush(struct hci_conn *conn)
2832 struct hci_chan *chan, *n;
2834 BT_DBG("hcon %p", conn);
2836 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2840 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2843 struct hci_chan *hchan;
2845 list_for_each_entry(hchan, &hcon->chan_list, list) {
2846 if (hchan->handle == handle)
2853 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2855 struct hci_conn_hash *h = &hdev->conn_hash;
2856 struct hci_conn *hcon;
2857 struct hci_chan *hchan = NULL;
2861 list_for_each_entry_rcu(hcon, &h->list, list) {
2862 hchan = __hci_chan_lookup_handle(hcon, handle);
2872 u32 hci_conn_get_phy(struct hci_conn *conn)
2876 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2877 * Table 6.2: Packets defined for synchronous, asynchronous, and
2878 * CPB logical transport types.
2880 switch (conn->type) {
2882 /* SCO logical transport (1 Mb/s):
2883 * HV1, HV2, HV3 and DV.
2885 phys |= BT_PHY_BR_1M_1SLOT;
2890 /* ACL logical transport (1 Mb/s) ptt=0:
2891 * DH1, DM3, DH3, DM5 and DH5.
2893 phys |= BT_PHY_BR_1M_1SLOT;
2895 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2896 phys |= BT_PHY_BR_1M_3SLOT;
2898 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2899 phys |= BT_PHY_BR_1M_5SLOT;
2901 /* ACL logical transport (2 Mb/s) ptt=1:
2902 * 2-DH1, 2-DH3 and 2-DH5.
2904 if (!(conn->pkt_type & HCI_2DH1))
2905 phys |= BT_PHY_EDR_2M_1SLOT;
2907 if (!(conn->pkt_type & HCI_2DH3))
2908 phys |= BT_PHY_EDR_2M_3SLOT;
2910 if (!(conn->pkt_type & HCI_2DH5))
2911 phys |= BT_PHY_EDR_2M_5SLOT;
2913 /* ACL logical transport (3 Mb/s) ptt=1:
2914 * 3-DH1, 3-DH3 and 3-DH5.
2916 if (!(conn->pkt_type & HCI_3DH1))
2917 phys |= BT_PHY_EDR_3M_1SLOT;
2919 if (!(conn->pkt_type & HCI_3DH3))
2920 phys |= BT_PHY_EDR_3M_3SLOT;
2922 if (!(conn->pkt_type & HCI_3DH5))
2923 phys |= BT_PHY_EDR_3M_5SLOT;
2928 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2929 phys |= BT_PHY_BR_1M_1SLOT;
2931 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2932 phys |= BT_PHY_BR_1M_3SLOT;
2934 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2935 if (!(conn->pkt_type & ESCO_2EV3))
2936 phys |= BT_PHY_EDR_2M_1SLOT;
2938 if (!(conn->pkt_type & ESCO_2EV5))
2939 phys |= BT_PHY_EDR_2M_3SLOT;
2941 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2942 if (!(conn->pkt_type & ESCO_3EV3))
2943 phys |= BT_PHY_EDR_3M_1SLOT;
2945 if (!(conn->pkt_type & ESCO_3EV5))
2946 phys |= BT_PHY_EDR_3M_3SLOT;
2951 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2952 phys |= BT_PHY_LE_1M_TX;
2954 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2955 phys |= BT_PHY_LE_1M_RX;
2957 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2958 phys |= BT_PHY_LE_2M_TX;
2960 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2961 phys |= BT_PHY_LE_2M_RX;
2963 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2964 phys |= BT_PHY_LE_CODED_TX;
2966 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2967 phys |= BT_PHY_LE_CODED_RX;
2975 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2977 struct hci_conn *conn;
2978 u16 handle = PTR_UINT(data);
2980 conn = hci_conn_hash_lookup_handle(hdev, handle);
2984 return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2987 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2989 struct hci_dev *hdev = conn->hdev;
2991 /* If abort_reason has already been set it means the connection is
2992 * already being aborted so don't attempt to overwrite it.
2994 if (conn->abort_reason)
2997 bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2999 conn->abort_reason = reason;
3001 /* If the connection is pending check the command opcode since that
3002 * might be blocking on hci_cmd_sync_work while waiting its respective
3003 * event so we need to hci_cmd_sync_cancel to cancel it.
3005 * hci_connect_le serializes the connection attempts so only one
3006 * connection can be in BT_CONNECT at time.
3008 if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
3009 switch (hci_skb_event(hdev->sent_cmd)) {
3010 case HCI_EV_LE_CONN_COMPLETE:
3011 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
3012 case HCI_EVT_LE_CIS_ESTABLISHED:
3013 hci_cmd_sync_cancel(hdev, ECANCELED);
3018 return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),