Mention branches and keyring.
[releases.git] / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4    Copyright 2023 NXP
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI connection handling. */
27
28 #include <linux/export.h>
29 #include <linux/debugfs.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/iso.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "hci_request.h"
38 #include "smp.h"
39 #include "eir.h"
40
41 struct sco_param {
42         u16 pkt_type;
43         u16 max_latency;
44         u8  retrans_effort;
45 };
46
47 struct conn_handle_t {
48         struct hci_conn *conn;
49         __u16 handle;
50 };
51
52 static const struct sco_param esco_param_cvsd[] = {
53         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
54         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
55         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
56         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
57         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
58 };
59
60 static const struct sco_param sco_param_cvsd[] = {
61         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
62         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
63 };
64
65 static const struct sco_param esco_param_msbc[] = {
66         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
67         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
68 };
69
70 /* This function requires the caller holds hdev->lock */
71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72 {
73         struct hci_conn_params *params;
74         struct hci_dev *hdev = conn->hdev;
75         struct smp_irk *irk;
76         bdaddr_t *bdaddr;
77         u8 bdaddr_type;
78
79         bdaddr = &conn->dst;
80         bdaddr_type = conn->dst_type;
81
82         /* Check if we need to convert to identity address */
83         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84         if (irk) {
85                 bdaddr = &irk->bdaddr;
86                 bdaddr_type = irk->addr_type;
87         }
88
89         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90                                            bdaddr_type);
91         if (!params)
92                 return;
93
94         if (params->conn) {
95                 hci_conn_drop(params->conn);
96                 hci_conn_put(params->conn);
97                 params->conn = NULL;
98         }
99
100         if (!params->explicit_connect)
101                 return;
102
103         /* If the status indicates successful cancellation of
104          * the attempt (i.e. Unknown Connection Id) there's no point of
105          * notifying failure since we'll go back to keep trying to
106          * connect. The only exception is explicit connect requests
107          * where a timeout + cancel does indicate an actual failure.
108          */
109         if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
111                                     conn->dst_type, status);
112
113         /* The connection attempt was doing scan for new RPA, and is
114          * in scan phase. If params are not associated with any other
115          * autoconnect action, remove them completely. If they are, just unmark
116          * them as waiting for connection, by clearing explicit_connect field.
117          */
118         params->explicit_connect = false;
119
120         hci_pend_le_list_del_init(params);
121
122         switch (params->auto_connect) {
123         case HCI_AUTO_CONN_EXPLICIT:
124                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125                 /* return instead of break to avoid duplicate scan update */
126                 return;
127         case HCI_AUTO_CONN_DIRECT:
128         case HCI_AUTO_CONN_ALWAYS:
129                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
130                 break;
131         case HCI_AUTO_CONN_REPORT:
132                 hci_pend_le_list_add(params, &hdev->pend_le_reports);
133                 break;
134         default:
135                 break;
136         }
137
138         hci_update_passive_scan(hdev);
139 }
140
141 static void hci_conn_cleanup(struct hci_conn *conn)
142 {
143         struct hci_dev *hdev = conn->hdev;
144
145         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147
148         if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149                 hci_remove_link_key(hdev, &conn->dst);
150
151         hci_chan_list_flush(conn);
152
153         hci_conn_hash_del(hdev, conn);
154
155         if (HCI_CONN_HANDLE_UNSET(conn->handle))
156                 ida_free(&hdev->unset_handle_ida, conn->handle);
157
158         if (conn->cleanup)
159                 conn->cleanup(conn);
160
161         if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162                 switch (conn->setting & SCO_AIRMODE_MASK) {
163                 case SCO_AIRMODE_CVSD:
164                 case SCO_AIRMODE_TRANSP:
165                         if (hdev->notify)
166                                 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
167                         break;
168                 }
169         } else {
170                 if (hdev->notify)
171                         hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172         }
173
174         debugfs_remove_recursive(conn->debugfs);
175
176         hci_conn_del_sysfs(conn);
177
178         hci_dev_put(hdev);
179 }
180
181 int hci_disconnect(struct hci_conn *conn, __u8 reason)
182 {
183         BT_DBG("hcon %p", conn);
184
185         /* When we are central of an established connection and it enters
186          * the disconnect timeout, then go ahead and try to read the
187          * current clock offset.  Processing of the result is done
188          * within the event handling and hci_clock_offset_evt function.
189          */
190         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
191             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
192                 struct hci_dev *hdev = conn->hdev;
193                 struct hci_cp_read_clock_offset clkoff_cp;
194
195                 clkoff_cp.handle = cpu_to_le16(conn->handle);
196                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
197                              &clkoff_cp);
198         }
199
200         return hci_abort_conn(conn, reason);
201 }
202
203 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
204 {
205         struct hci_dev *hdev = conn->hdev;
206         struct hci_cp_add_sco cp;
207
208         BT_DBG("hcon %p", conn);
209
210         conn->state = BT_CONNECT;
211         conn->out = true;
212
213         conn->attempt++;
214
215         cp.handle   = cpu_to_le16(handle);
216         cp.pkt_type = cpu_to_le16(conn->pkt_type);
217
218         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
219 }
220
221 static bool find_next_esco_param(struct hci_conn *conn,
222                                  const struct sco_param *esco_param, int size)
223 {
224         if (!conn->parent)
225                 return false;
226
227         for (; conn->attempt <= size; conn->attempt++) {
228                 if (lmp_esco_2m_capable(conn->parent) ||
229                     (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
230                         break;
231                 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
232                        conn, conn->attempt);
233         }
234
235         return conn->attempt <= size;
236 }
237
238 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
239 {
240         int err;
241         __u8 vnd_len, *vnd_data = NULL;
242         struct hci_op_configure_data_path *cmd = NULL;
243
244         if (!codec->data_path || !hdev->get_codec_config_data)
245                 return 0;
246
247         /* Do not take me as error */
248         if (!hdev->get_codec_config_data)
249                 return 0;
250
251         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
252                                           &vnd_data);
253         if (err < 0)
254                 goto error;
255
256         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
257         if (!cmd) {
258                 err = -ENOMEM;
259                 goto error;
260         }
261
262         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
263         if (err < 0)
264                 goto error;
265
266         cmd->vnd_len = vnd_len;
267         memcpy(cmd->vnd_data, vnd_data, vnd_len);
268
269         cmd->direction = 0x00;
270         __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
271                               sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
272
273         cmd->direction = 0x01;
274         err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
275                                     sizeof(*cmd) + vnd_len, cmd,
276                                     HCI_CMD_TIMEOUT);
277 error:
278
279         kfree(cmd);
280         kfree(vnd_data);
281         return err;
282 }
283
284 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
285 {
286         struct conn_handle_t *conn_handle = data;
287         struct hci_conn *conn = conn_handle->conn;
288         __u16 handle = conn_handle->handle;
289         struct hci_cp_enhanced_setup_sync_conn cp;
290         const struct sco_param *param;
291
292         kfree(conn_handle);
293
294         bt_dev_dbg(hdev, "hcon %p", conn);
295
296         configure_datapath_sync(hdev, &conn->codec);
297
298         conn->state = BT_CONNECT;
299         conn->out = true;
300
301         conn->attempt++;
302
303         memset(&cp, 0x00, sizeof(cp));
304
305         cp.handle   = cpu_to_le16(handle);
306
307         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
308         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
309
310         switch (conn->codec.id) {
311         case BT_CODEC_MSBC:
312                 if (!find_next_esco_param(conn, esco_param_msbc,
313                                           ARRAY_SIZE(esco_param_msbc)))
314                         return -EINVAL;
315
316                 param = &esco_param_msbc[conn->attempt - 1];
317                 cp.tx_coding_format.id = 0x05;
318                 cp.rx_coding_format.id = 0x05;
319                 cp.tx_codec_frame_size = __cpu_to_le16(60);
320                 cp.rx_codec_frame_size = __cpu_to_le16(60);
321                 cp.in_bandwidth = __cpu_to_le32(32000);
322                 cp.out_bandwidth = __cpu_to_le32(32000);
323                 cp.in_coding_format.id = 0x04;
324                 cp.out_coding_format.id = 0x04;
325                 cp.in_coded_data_size = __cpu_to_le16(16);
326                 cp.out_coded_data_size = __cpu_to_le16(16);
327                 cp.in_pcm_data_format = 2;
328                 cp.out_pcm_data_format = 2;
329                 cp.in_pcm_sample_payload_msb_pos = 0;
330                 cp.out_pcm_sample_payload_msb_pos = 0;
331                 cp.in_data_path = conn->codec.data_path;
332                 cp.out_data_path = conn->codec.data_path;
333                 cp.in_transport_unit_size = 1;
334                 cp.out_transport_unit_size = 1;
335                 break;
336
337         case BT_CODEC_TRANSPARENT:
338                 if (!find_next_esco_param(conn, esco_param_msbc,
339                                           ARRAY_SIZE(esco_param_msbc)))
340                         return false;
341                 param = &esco_param_msbc[conn->attempt - 1];
342                 cp.tx_coding_format.id = 0x03;
343                 cp.rx_coding_format.id = 0x03;
344                 cp.tx_codec_frame_size = __cpu_to_le16(60);
345                 cp.rx_codec_frame_size = __cpu_to_le16(60);
346                 cp.in_bandwidth = __cpu_to_le32(0x1f40);
347                 cp.out_bandwidth = __cpu_to_le32(0x1f40);
348                 cp.in_coding_format.id = 0x03;
349                 cp.out_coding_format.id = 0x03;
350                 cp.in_coded_data_size = __cpu_to_le16(16);
351                 cp.out_coded_data_size = __cpu_to_le16(16);
352                 cp.in_pcm_data_format = 2;
353                 cp.out_pcm_data_format = 2;
354                 cp.in_pcm_sample_payload_msb_pos = 0;
355                 cp.out_pcm_sample_payload_msb_pos = 0;
356                 cp.in_data_path = conn->codec.data_path;
357                 cp.out_data_path = conn->codec.data_path;
358                 cp.in_transport_unit_size = 1;
359                 cp.out_transport_unit_size = 1;
360                 break;
361
362         case BT_CODEC_CVSD:
363                 if (conn->parent && lmp_esco_capable(conn->parent)) {
364                         if (!find_next_esco_param(conn, esco_param_cvsd,
365                                                   ARRAY_SIZE(esco_param_cvsd)))
366                                 return -EINVAL;
367                         param = &esco_param_cvsd[conn->attempt - 1];
368                 } else {
369                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
370                                 return -EINVAL;
371                         param = &sco_param_cvsd[conn->attempt - 1];
372                 }
373                 cp.tx_coding_format.id = 2;
374                 cp.rx_coding_format.id = 2;
375                 cp.tx_codec_frame_size = __cpu_to_le16(60);
376                 cp.rx_codec_frame_size = __cpu_to_le16(60);
377                 cp.in_bandwidth = __cpu_to_le32(16000);
378                 cp.out_bandwidth = __cpu_to_le32(16000);
379                 cp.in_coding_format.id = 4;
380                 cp.out_coding_format.id = 4;
381                 cp.in_coded_data_size = __cpu_to_le16(16);
382                 cp.out_coded_data_size = __cpu_to_le16(16);
383                 cp.in_pcm_data_format = 2;
384                 cp.out_pcm_data_format = 2;
385                 cp.in_pcm_sample_payload_msb_pos = 0;
386                 cp.out_pcm_sample_payload_msb_pos = 0;
387                 cp.in_data_path = conn->codec.data_path;
388                 cp.out_data_path = conn->codec.data_path;
389                 cp.in_transport_unit_size = 16;
390                 cp.out_transport_unit_size = 16;
391                 break;
392         default:
393                 return -EINVAL;
394         }
395
396         cp.retrans_effort = param->retrans_effort;
397         cp.pkt_type = __cpu_to_le16(param->pkt_type);
398         cp.max_latency = __cpu_to_le16(param->max_latency);
399
400         if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
401                 return -EIO;
402
403         return 0;
404 }
405
406 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
407 {
408         struct hci_dev *hdev = conn->hdev;
409         struct hci_cp_setup_sync_conn cp;
410         const struct sco_param *param;
411
412         bt_dev_dbg(hdev, "hcon %p", conn);
413
414         conn->state = BT_CONNECT;
415         conn->out = true;
416
417         conn->attempt++;
418
419         cp.handle   = cpu_to_le16(handle);
420
421         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
422         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
423         cp.voice_setting  = cpu_to_le16(conn->setting);
424
425         switch (conn->setting & SCO_AIRMODE_MASK) {
426         case SCO_AIRMODE_TRANSP:
427                 if (!find_next_esco_param(conn, esco_param_msbc,
428                                           ARRAY_SIZE(esco_param_msbc)))
429                         return false;
430                 param = &esco_param_msbc[conn->attempt - 1];
431                 break;
432         case SCO_AIRMODE_CVSD:
433                 if (conn->parent && lmp_esco_capable(conn->parent)) {
434                         if (!find_next_esco_param(conn, esco_param_cvsd,
435                                                   ARRAY_SIZE(esco_param_cvsd)))
436                                 return false;
437                         param = &esco_param_cvsd[conn->attempt - 1];
438                 } else {
439                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
440                                 return false;
441                         param = &sco_param_cvsd[conn->attempt - 1];
442                 }
443                 break;
444         default:
445                 return false;
446         }
447
448         cp.retrans_effort = param->retrans_effort;
449         cp.pkt_type = __cpu_to_le16(param->pkt_type);
450         cp.max_latency = __cpu_to_le16(param->max_latency);
451
452         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
453                 return false;
454
455         return true;
456 }
457
458 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
459 {
460         int result;
461         struct conn_handle_t *conn_handle;
462
463         if (enhanced_sync_conn_capable(conn->hdev)) {
464                 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
465
466                 if (!conn_handle)
467                         return false;
468
469                 conn_handle->conn = conn;
470                 conn_handle->handle = handle;
471                 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
472                                             conn_handle, NULL);
473                 if (result < 0)
474                         kfree(conn_handle);
475
476                 return result == 0;
477         }
478
479         return hci_setup_sync_conn(conn, handle);
480 }
481
482 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
483                       u16 to_multiplier)
484 {
485         struct hci_dev *hdev = conn->hdev;
486         struct hci_conn_params *params;
487         struct hci_cp_le_conn_update cp;
488
489         hci_dev_lock(hdev);
490
491         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
492         if (params) {
493                 params->conn_min_interval = min;
494                 params->conn_max_interval = max;
495                 params->conn_latency = latency;
496                 params->supervision_timeout = to_multiplier;
497         }
498
499         hci_dev_unlock(hdev);
500
501         memset(&cp, 0, sizeof(cp));
502         cp.handle               = cpu_to_le16(conn->handle);
503         cp.conn_interval_min    = cpu_to_le16(min);
504         cp.conn_interval_max    = cpu_to_le16(max);
505         cp.conn_latency         = cpu_to_le16(latency);
506         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
507         cp.min_ce_len           = cpu_to_le16(0x0000);
508         cp.max_ce_len           = cpu_to_le16(0x0000);
509
510         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
511
512         if (params)
513                 return 0x01;
514
515         return 0x00;
516 }
517
518 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
519                       __u8 ltk[16], __u8 key_size)
520 {
521         struct hci_dev *hdev = conn->hdev;
522         struct hci_cp_le_start_enc cp;
523
524         BT_DBG("hcon %p", conn);
525
526         memset(&cp, 0, sizeof(cp));
527
528         cp.handle = cpu_to_le16(conn->handle);
529         cp.rand = rand;
530         cp.ediv = ediv;
531         memcpy(cp.ltk, ltk, key_size);
532
533         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
534 }
535
536 /* Device _must_ be locked */
537 void hci_sco_setup(struct hci_conn *conn, __u8 status)
538 {
539         struct hci_link *link;
540
541         link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
542         if (!link || !link->conn)
543                 return;
544
545         BT_DBG("hcon %p", conn);
546
547         if (!status) {
548                 if (lmp_esco_capable(conn->hdev))
549                         hci_setup_sync(link->conn, conn->handle);
550                 else
551                         hci_add_sco(link->conn, conn->handle);
552         } else {
553                 hci_connect_cfm(link->conn, status);
554                 hci_conn_del(link->conn);
555         }
556 }
557
558 static void hci_conn_timeout(struct work_struct *work)
559 {
560         struct hci_conn *conn = container_of(work, struct hci_conn,
561                                              disc_work.work);
562         int refcnt = atomic_read(&conn->refcnt);
563
564         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
565
566         WARN_ON(refcnt < 0);
567
568         /* FIXME: It was observed that in pairing failed scenario, refcnt
569          * drops below 0. Probably this is because l2cap_conn_del calls
570          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
571          * dropped. After that loop hci_chan_del is called which also drops
572          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
573          * otherwise drop it.
574          */
575         if (refcnt > 0)
576                 return;
577
578         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
579 }
580
581 /* Enter sniff mode */
582 static void hci_conn_idle(struct work_struct *work)
583 {
584         struct hci_conn *conn = container_of(work, struct hci_conn,
585                                              idle_work.work);
586         struct hci_dev *hdev = conn->hdev;
587
588         BT_DBG("hcon %p mode %d", conn, conn->mode);
589
590         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
591                 return;
592
593         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
594                 return;
595
596         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
597                 struct hci_cp_sniff_subrate cp;
598                 cp.handle             = cpu_to_le16(conn->handle);
599                 cp.max_latency        = cpu_to_le16(0);
600                 cp.min_remote_timeout = cpu_to_le16(0);
601                 cp.min_local_timeout  = cpu_to_le16(0);
602                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
603         }
604
605         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
606                 struct hci_cp_sniff_mode cp;
607                 cp.handle       = cpu_to_le16(conn->handle);
608                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
609                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
610                 cp.attempt      = cpu_to_le16(4);
611                 cp.timeout      = cpu_to_le16(1);
612                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
613         }
614 }
615
616 static void hci_conn_auto_accept(struct work_struct *work)
617 {
618         struct hci_conn *conn = container_of(work, struct hci_conn,
619                                              auto_accept_work.work);
620
621         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
622                      &conn->dst);
623 }
624
625 static void le_disable_advertising(struct hci_dev *hdev)
626 {
627         if (ext_adv_capable(hdev)) {
628                 struct hci_cp_le_set_ext_adv_enable cp;
629
630                 cp.enable = 0x00;
631                 cp.num_of_sets = 0x00;
632
633                 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
634                              &cp);
635         } else {
636                 u8 enable = 0x00;
637                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
638                              &enable);
639         }
640 }
641
642 static void le_conn_timeout(struct work_struct *work)
643 {
644         struct hci_conn *conn = container_of(work, struct hci_conn,
645                                              le_conn_timeout.work);
646         struct hci_dev *hdev = conn->hdev;
647
648         BT_DBG("");
649
650         /* We could end up here due to having done directed advertising,
651          * so clean up the state if necessary. This should however only
652          * happen with broken hardware or if low duty cycle was used
653          * (which doesn't have a timeout of its own).
654          */
655         if (conn->role == HCI_ROLE_SLAVE) {
656                 /* Disable LE Advertising */
657                 le_disable_advertising(hdev);
658                 hci_dev_lock(hdev);
659                 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
660                 hci_dev_unlock(hdev);
661                 return;
662         }
663
664         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
665 }
666
667 struct iso_cig_params {
668         struct hci_cp_le_set_cig_params cp;
669         struct hci_cis_params cis[0x1f];
670 };
671
672 struct iso_list_data {
673         union {
674                 u8  cig;
675                 u8  big;
676         };
677         union {
678                 u8  cis;
679                 u8  bis;
680                 u16 sync_handle;
681         };
682         int count;
683         bool big_term;
684         bool pa_sync_term;
685         bool big_sync_term;
686 };
687
688 static void bis_list(struct hci_conn *conn, void *data)
689 {
690         struct iso_list_data *d = data;
691
692         /* Skip if not broadcast/ANY address */
693         if (bacmp(&conn->dst, BDADDR_ANY))
694                 return;
695
696         if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
697             d->bis != conn->iso_qos.bcast.bis)
698                 return;
699
700         d->count++;
701 }
702
703 static int terminate_big_sync(struct hci_dev *hdev, void *data)
704 {
705         struct iso_list_data *d = data;
706
707         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
708
709         hci_disable_per_advertising_sync(hdev, d->bis);
710         hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
711
712         /* Only terminate BIG if it has been created */
713         if (!d->big_term)
714                 return 0;
715
716         return hci_le_terminate_big_sync(hdev, d->big,
717                                          HCI_ERROR_LOCAL_HOST_TERM);
718 }
719
720 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
721 {
722         kfree(data);
723 }
724
725 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
726 {
727         struct iso_list_data *d;
728         int ret;
729
730         bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
731                    conn->iso_qos.bcast.bis);
732
733         d = kzalloc(sizeof(*d), GFP_KERNEL);
734         if (!d)
735                 return -ENOMEM;
736
737         d->big = conn->iso_qos.bcast.big;
738         d->bis = conn->iso_qos.bcast.bis;
739         d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
740
741         ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
742                                  terminate_big_destroy);
743         if (ret)
744                 kfree(d);
745
746         return ret;
747 }
748
749 static int big_terminate_sync(struct hci_dev *hdev, void *data)
750 {
751         struct iso_list_data *d = data;
752
753         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
754                    d->sync_handle);
755
756         if (d->big_sync_term)
757                 hci_le_big_terminate_sync(hdev, d->big);
758
759         if (d->pa_sync_term)
760                 return hci_le_pa_terminate_sync(hdev, d->sync_handle);
761
762         return 0;
763 }
764
765 static void find_bis(struct hci_conn *conn, void *data)
766 {
767         struct iso_list_data *d = data;
768
769         /* Ignore if BIG doesn't match */
770         if (d->big != conn->iso_qos.bcast.big)
771                 return;
772
773         d->count++;
774 }
775
776 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
777 {
778         struct iso_list_data *d;
779         int ret;
780
781         bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
782
783         d = kzalloc(sizeof(*d), GFP_KERNEL);
784         if (!d)
785                 return -ENOMEM;
786
787         memset(d, 0, sizeof(*d));
788         d->big = big;
789         d->sync_handle = conn->sync_handle;
790
791         if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
792                 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
793                                         HCI_CONN_PA_SYNC, d);
794
795                 if (!d->count)
796                         d->pa_sync_term = true;
797
798                 d->count = 0;
799         }
800
801         if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) {
802                 hci_conn_hash_list_flag(hdev, find_bis, ISO_LINK,
803                                         HCI_CONN_BIG_SYNC, d);
804
805                 if (!d->count)
806                         d->big_sync_term = true;
807         }
808
809         ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
810                                  terminate_big_destroy);
811         if (ret)
812                 kfree(d);
813
814         return ret;
815 }
816
817 /* Cleanup BIS connection
818  *
819  * Detects if there any BIS left connected in a BIG
820  * broadcaster: Remove advertising instance and terminate BIG.
821  * broadcaster receiver: Teminate BIG sync and terminate PA sync.
822  */
823 static void bis_cleanup(struct hci_conn *conn)
824 {
825         struct hci_dev *hdev = conn->hdev;
826         struct hci_conn *bis;
827
828         bt_dev_dbg(hdev, "conn %p", conn);
829
830         if (conn->role == HCI_ROLE_MASTER) {
831                 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
832                         return;
833
834                 /* Check if ISO connection is a BIS and terminate advertising
835                  * set and BIG if there are no other connections using it.
836                  */
837                 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
838                 if (bis)
839                         return;
840
841                 hci_le_terminate_big(hdev, conn);
842         } else {
843                 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
844                                      conn);
845         }
846 }
847
848 static int remove_cig_sync(struct hci_dev *hdev, void *data)
849 {
850         u8 handle = PTR_UINT(data);
851
852         return hci_le_remove_cig_sync(hdev, handle);
853 }
854
855 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
856 {
857         bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
858
859         return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
860                                   NULL);
861 }
862
863 static void find_cis(struct hci_conn *conn, void *data)
864 {
865         struct iso_list_data *d = data;
866
867         /* Ignore broadcast or if CIG don't match */
868         if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
869                 return;
870
871         d->count++;
872 }
873
874 /* Cleanup CIS connection:
875  *
876  * Detects if there any CIS left connected in a CIG and remove it.
877  */
878 static void cis_cleanup(struct hci_conn *conn)
879 {
880         struct hci_dev *hdev = conn->hdev;
881         struct iso_list_data d;
882
883         if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
884                 return;
885
886         memset(&d, 0, sizeof(d));
887         d.cig = conn->iso_qos.ucast.cig;
888
889         /* Check if ISO connection is a CIS and remove CIG if there are
890          * no other connections using it.
891          */
892         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
893         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
894         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
895         if (d.count)
896                 return;
897
898         hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
899 }
900
901 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
902 {
903         return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
904                                U16_MAX, GFP_ATOMIC);
905 }
906
907 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
908                               u8 role, u16 handle)
909 {
910         struct hci_conn *conn;
911
912         bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
913
914         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
915         if (!conn)
916                 return NULL;
917
918         bacpy(&conn->dst, dst);
919         bacpy(&conn->src, &hdev->bdaddr);
920         conn->handle = handle;
921         conn->hdev  = hdev;
922         conn->type  = type;
923         conn->role  = role;
924         conn->mode  = HCI_CM_ACTIVE;
925         conn->state = BT_OPEN;
926         conn->auth_type = HCI_AT_GENERAL_BONDING;
927         conn->io_capability = hdev->io_capability;
928         conn->remote_auth = 0xff;
929         conn->key_type = 0xff;
930         conn->rssi = HCI_RSSI_INVALID;
931         conn->tx_power = HCI_TX_POWER_INVALID;
932         conn->max_tx_power = HCI_TX_POWER_INVALID;
933         conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
934
935         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
936         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
937
938         /* Set Default Authenticated payload timeout to 30s */
939         conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
940
941         if (conn->role == HCI_ROLE_MASTER)
942                 conn->out = true;
943
944         switch (type) {
945         case ACL_LINK:
946                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
947                 break;
948         case LE_LINK:
949                 /* conn->src should reflect the local identity address */
950                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
951                 break;
952         case ISO_LINK:
953                 /* conn->src should reflect the local identity address */
954                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
955
956                 /* set proper cleanup function */
957                 if (!bacmp(dst, BDADDR_ANY))
958                         conn->cleanup = bis_cleanup;
959                 else if (conn->role == HCI_ROLE_MASTER)
960                         conn->cleanup = cis_cleanup;
961
962                 break;
963         case SCO_LINK:
964                 if (lmp_esco_capable(hdev))
965                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
966                                         (hdev->esco_type & EDR_ESCO_MASK);
967                 else
968                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
969                 break;
970         case ESCO_LINK:
971                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
972                 break;
973         }
974
975         skb_queue_head_init(&conn->data_q);
976
977         INIT_LIST_HEAD(&conn->chan_list);
978         INIT_LIST_HEAD(&conn->link_list);
979
980         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
981         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
982         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
983         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
984
985         atomic_set(&conn->refcnt, 0);
986
987         hci_dev_hold(hdev);
988
989         hci_conn_hash_add(hdev, conn);
990
991         /* The SCO and eSCO connections will only be notified when their
992          * setup has been completed. This is different to ACL links which
993          * can be notified right away.
994          */
995         if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
996                 if (hdev->notify)
997                         hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
998         }
999
1000         hci_conn_init_sysfs(conn);
1001
1002         return conn;
1003 }
1004
1005 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1006                                     bdaddr_t *dst, u8 role)
1007 {
1008         int handle;
1009
1010         bt_dev_dbg(hdev, "dst %pMR", dst);
1011
1012         handle = hci_conn_hash_alloc_unset(hdev);
1013         if (unlikely(handle < 0))
1014                 return NULL;
1015
1016         return hci_conn_add(hdev, type, dst, role, handle);
1017 }
1018
1019 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1020 {
1021         if (!reason)
1022                 reason = HCI_ERROR_REMOTE_USER_TERM;
1023
1024         /* Due to race, SCO/ISO conn might be not established yet at this point,
1025          * and nothing else will clean it up. In other cases it is done via HCI
1026          * events.
1027          */
1028         switch (conn->type) {
1029         case SCO_LINK:
1030         case ESCO_LINK:
1031                 if (HCI_CONN_HANDLE_UNSET(conn->handle))
1032                         hci_conn_failed(conn, reason);
1033                 break;
1034         case ISO_LINK:
1035                 if ((conn->state != BT_CONNECTED &&
1036                     !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
1037                     test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
1038                         hci_conn_failed(conn, reason);
1039                 break;
1040         }
1041 }
1042
1043 static void hci_conn_unlink(struct hci_conn *conn)
1044 {
1045         struct hci_dev *hdev = conn->hdev;
1046
1047         bt_dev_dbg(hdev, "hcon %p", conn);
1048
1049         if (!conn->parent) {
1050                 struct hci_link *link, *t;
1051
1052                 list_for_each_entry_safe(link, t, &conn->link_list, list) {
1053                         struct hci_conn *child = link->conn;
1054
1055                         hci_conn_unlink(child);
1056
1057                         /* If hdev is down it means
1058                          * hci_dev_close_sync/hci_conn_hash_flush is in progress
1059                          * and links don't need to be cleanup as all connections
1060                          * would be cleanup.
1061                          */
1062                         if (!test_bit(HCI_UP, &hdev->flags))
1063                                 continue;
1064
1065                         hci_conn_cleanup_child(child, conn->abort_reason);
1066                 }
1067
1068                 return;
1069         }
1070
1071         if (!conn->link)
1072                 return;
1073
1074         list_del_rcu(&conn->link->list);
1075         synchronize_rcu();
1076
1077         hci_conn_drop(conn->parent);
1078         hci_conn_put(conn->parent);
1079         conn->parent = NULL;
1080
1081         kfree(conn->link);
1082         conn->link = NULL;
1083 }
1084
1085 void hci_conn_del(struct hci_conn *conn)
1086 {
1087         struct hci_dev *hdev = conn->hdev;
1088
1089         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1090
1091         hci_conn_unlink(conn);
1092
1093         cancel_delayed_work_sync(&conn->disc_work);
1094         cancel_delayed_work_sync(&conn->auto_accept_work);
1095         cancel_delayed_work_sync(&conn->idle_work);
1096
1097         if (conn->type == ACL_LINK) {
1098                 /* Unacked frames */
1099                 hdev->acl_cnt += conn->sent;
1100         } else if (conn->type == LE_LINK) {
1101                 cancel_delayed_work(&conn->le_conn_timeout);
1102
1103                 if (hdev->le_pkts)
1104                         hdev->le_cnt += conn->sent;
1105                 else
1106                         hdev->acl_cnt += conn->sent;
1107         } else {
1108                 /* Unacked ISO frames */
1109                 if (conn->type == ISO_LINK) {
1110                         if (hdev->iso_pkts)
1111                                 hdev->iso_cnt += conn->sent;
1112                         else if (hdev->le_pkts)
1113                                 hdev->le_cnt += conn->sent;
1114                         else
1115                                 hdev->acl_cnt += conn->sent;
1116                 }
1117         }
1118
1119         skb_queue_purge(&conn->data_q);
1120
1121         /* Remove the connection from the list and cleanup its remaining
1122          * state. This is a separate function since for some cases like
1123          * BT_CONNECT_SCAN we *only* want the cleanup part without the
1124          * rest of hci_conn_del.
1125          */
1126         hci_conn_cleanup(conn);
1127
1128         /* Dequeue callbacks using connection pointer as data */
1129         hci_cmd_sync_dequeue(hdev, NULL, conn, NULL);
1130 }
1131
1132 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1133 {
1134         int use_src = bacmp(src, BDADDR_ANY);
1135         struct hci_dev *hdev = NULL, *d;
1136
1137         BT_DBG("%pMR -> %pMR", src, dst);
1138
1139         read_lock(&hci_dev_list_lock);
1140
1141         list_for_each_entry(d, &hci_dev_list, list) {
1142                 if (!test_bit(HCI_UP, &d->flags) ||
1143                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1144                     d->dev_type != HCI_PRIMARY)
1145                         continue;
1146
1147                 /* Simple routing:
1148                  *   No source address - find interface with bdaddr != dst
1149                  *   Source address    - find interface with bdaddr == src
1150                  */
1151
1152                 if (use_src) {
1153                         bdaddr_t id_addr;
1154                         u8 id_addr_type;
1155
1156                         if (src_type == BDADDR_BREDR) {
1157                                 if (!lmp_bredr_capable(d))
1158                                         continue;
1159                                 bacpy(&id_addr, &d->bdaddr);
1160                                 id_addr_type = BDADDR_BREDR;
1161                         } else {
1162                                 if (!lmp_le_capable(d))
1163                                         continue;
1164
1165                                 hci_copy_identity_address(d, &id_addr,
1166                                                           &id_addr_type);
1167
1168                                 /* Convert from HCI to three-value type */
1169                                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1170                                         id_addr_type = BDADDR_LE_PUBLIC;
1171                                 else
1172                                         id_addr_type = BDADDR_LE_RANDOM;
1173                         }
1174
1175                         if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1176                                 hdev = d; break;
1177                         }
1178                 } else {
1179                         if (bacmp(&d->bdaddr, dst)) {
1180                                 hdev = d; break;
1181                         }
1182                 }
1183         }
1184
1185         if (hdev)
1186                 hdev = hci_dev_hold(hdev);
1187
1188         read_unlock(&hci_dev_list_lock);
1189         return hdev;
1190 }
1191 EXPORT_SYMBOL(hci_get_route);
1192
1193 /* This function requires the caller holds hdev->lock */
1194 static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1195 {
1196         struct hci_dev *hdev = conn->hdev;
1197
1198         hci_connect_le_scan_cleanup(conn, status);
1199
1200         /* Enable advertising in case this was a failed connection
1201          * attempt as a peripheral.
1202          */
1203         hci_enable_advertising(hdev);
1204 }
1205
1206 /* This function requires the caller holds hdev->lock */
1207 void hci_conn_failed(struct hci_conn *conn, u8 status)
1208 {
1209         struct hci_dev *hdev = conn->hdev;
1210
1211         bt_dev_dbg(hdev, "status 0x%2.2x", status);
1212
1213         switch (conn->type) {
1214         case LE_LINK:
1215                 hci_le_conn_failed(conn, status);
1216                 break;
1217         case ACL_LINK:
1218                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1219                                     conn->dst_type, status);
1220                 break;
1221         }
1222
1223         /* In case of BIG/PA sync failed, clear conn flags so that
1224          * the conns will be correctly cleaned up by ISO layer
1225          */
1226         test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags);
1227         test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags);
1228
1229         conn->state = BT_CLOSED;
1230         hci_connect_cfm(conn, status);
1231         hci_conn_del(conn);
1232 }
1233
1234 /* This function requires the caller holds hdev->lock */
1235 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1236 {
1237         struct hci_dev *hdev = conn->hdev;
1238
1239         bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1240
1241         if (conn->handle == handle)
1242                 return 0;
1243
1244         if (handle > HCI_CONN_HANDLE_MAX) {
1245                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1246                            handle, HCI_CONN_HANDLE_MAX);
1247                 return HCI_ERROR_INVALID_PARAMETERS;
1248         }
1249
1250         /* If abort_reason has been sent it means the connection is being
1251          * aborted and the handle shall not be changed.
1252          */
1253         if (conn->abort_reason)
1254                 return conn->abort_reason;
1255
1256         if (HCI_CONN_HANDLE_UNSET(conn->handle))
1257                 ida_free(&hdev->unset_handle_ida, conn->handle);
1258
1259         conn->handle = handle;
1260
1261         return 0;
1262 }
1263
1264 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1265                                 u8 dst_type, bool dst_resolved, u8 sec_level,
1266                                 u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
1267 {
1268         struct hci_conn *conn;
1269         struct smp_irk *irk;
1270         int err;
1271
1272         /* Let's make sure that le is enabled.*/
1273         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1274                 if (lmp_le_capable(hdev))
1275                         return ERR_PTR(-ECONNREFUSED);
1276
1277                 return ERR_PTR(-EOPNOTSUPP);
1278         }
1279
1280         /* Since the controller supports only one LE connection attempt at a
1281          * time, we return -EBUSY if there is any connection attempt running.
1282          */
1283         if (hci_lookup_le_connect(hdev))
1284                 return ERR_PTR(-EBUSY);
1285
1286         /* If there's already a connection object but it's not in
1287          * scanning state it means it must already be established, in
1288          * which case we can't do anything else except report a failure
1289          * to connect.
1290          */
1291         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1292         if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1293                 return ERR_PTR(-EBUSY);
1294         }
1295
1296         /* Check if the destination address has been resolved by the controller
1297          * since if it did then the identity address shall be used.
1298          */
1299         if (!dst_resolved) {
1300                 /* When given an identity address with existing identity
1301                  * resolving key, the connection needs to be established
1302                  * to a resolvable random address.
1303                  *
1304                  * Storing the resolvable random address is required here
1305                  * to handle connection failures. The address will later
1306                  * be resolved back into the original identity address
1307                  * from the connect request.
1308                  */
1309                 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1310                 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1311                         dst = &irk->rpa;
1312                         dst_type = ADDR_LE_DEV_RANDOM;
1313                 }
1314         }
1315
1316         if (conn) {
1317                 bacpy(&conn->dst, dst);
1318         } else {
1319                 conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1320                 if (!conn)
1321                         return ERR_PTR(-ENOMEM);
1322                 hci_conn_hold(conn);
1323                 conn->pending_sec_level = sec_level;
1324         }
1325
1326         conn->dst_type = dst_type;
1327         conn->sec_level = BT_SECURITY_LOW;
1328         conn->conn_timeout = conn_timeout;
1329         conn->le_adv_phy = phy;
1330         conn->le_adv_sec_phy = sec_phy;
1331
1332         err = hci_connect_le_sync(hdev, conn);
1333         if (err) {
1334                 hci_conn_del(conn);
1335                 return ERR_PTR(err);
1336         }
1337
1338         return conn;
1339 }
1340
1341 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1342 {
1343         struct hci_conn *conn;
1344
1345         conn = hci_conn_hash_lookup_le(hdev, addr, type);
1346         if (!conn)
1347                 return false;
1348
1349         if (conn->state != BT_CONNECTED)
1350                 return false;
1351
1352         return true;
1353 }
1354
1355 /* This function requires the caller holds hdev->lock */
1356 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1357                                         bdaddr_t *addr, u8 addr_type)
1358 {
1359         struct hci_conn_params *params;
1360
1361         if (is_connected(hdev, addr, addr_type))
1362                 return -EISCONN;
1363
1364         params = hci_conn_params_lookup(hdev, addr, addr_type);
1365         if (!params) {
1366                 params = hci_conn_params_add(hdev, addr, addr_type);
1367                 if (!params)
1368                         return -ENOMEM;
1369
1370                 /* If we created new params, mark them to be deleted in
1371                  * hci_connect_le_scan_cleanup. It's different case than
1372                  * existing disabled params, those will stay after cleanup.
1373                  */
1374                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1375         }
1376
1377         /* We're trying to connect, so make sure params are at pend_le_conns */
1378         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1379             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1380             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1381                 hci_pend_le_list_del_init(params);
1382                 hci_pend_le_list_add(params, &hdev->pend_le_conns);
1383         }
1384
1385         params->explicit_connect = true;
1386
1387         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1388                params->auto_connect);
1389
1390         return 0;
1391 }
1392
1393 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1394 {
1395         struct hci_conn *conn;
1396         u8  big;
1397
1398         /* Allocate a BIG if not set */
1399         if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1400                 for (big = 0x00; big < 0xef; big++) {
1401
1402                         conn = hci_conn_hash_lookup_big(hdev, big);
1403                         if (!conn)
1404                                 break;
1405                 }
1406
1407                 if (big == 0xef)
1408                         return -EADDRNOTAVAIL;
1409
1410                 /* Update BIG */
1411                 qos->bcast.big = big;
1412         }
1413
1414         return 0;
1415 }
1416
1417 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1418 {
1419         struct hci_conn *conn;
1420         u8  bis;
1421
1422         /* Allocate BIS if not set */
1423         if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1424                 if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) {
1425                         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1426
1427                         if (conn) {
1428                                 /* If the BIG handle is already matched to an advertising
1429                                  * handle, do not allocate a new one.
1430                                  */
1431                                 qos->bcast.bis = conn->iso_qos.bcast.bis;
1432                                 return 0;
1433                         }
1434                 }
1435
1436                 /* Find an unused adv set to advertise BIS, skip instance 0x00
1437                  * since it is reserved as general purpose set.
1438                  */
1439                 for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1440                      bis++) {
1441
1442                         conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1443                         if (!conn)
1444                                 break;
1445                 }
1446
1447                 if (bis == hdev->le_num_of_adv_sets)
1448                         return -EADDRNOTAVAIL;
1449
1450                 /* Update BIS */
1451                 qos->bcast.bis = bis;
1452         }
1453
1454         return 0;
1455 }
1456
1457 /* This function requires the caller holds hdev->lock */
1458 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1459                                     struct bt_iso_qos *qos, __u8 base_len,
1460                                     __u8 *base)
1461 {
1462         struct hci_conn *conn;
1463         int err;
1464
1465         /* Let's make sure that le is enabled.*/
1466         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1467                 if (lmp_le_capable(hdev))
1468                         return ERR_PTR(-ECONNREFUSED);
1469                 return ERR_PTR(-EOPNOTSUPP);
1470         }
1471
1472         err = qos_set_big(hdev, qos);
1473         if (err)
1474                 return ERR_PTR(err);
1475
1476         err = qos_set_bis(hdev, qos);
1477         if (err)
1478                 return ERR_PTR(err);
1479
1480         /* Check if the LE Create BIG command has already been sent */
1481         conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1482                                                 qos->bcast.big);
1483         if (conn)
1484                 return ERR_PTR(-EADDRINUSE);
1485
1486         /* Check BIS settings against other bound BISes, since all
1487          * BISes in a BIG must have the same value for all parameters
1488          */
1489         conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1490
1491         if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1492                      base_len != conn->le_per_adv_data_len ||
1493                      memcmp(conn->le_per_adv_data, base, base_len)))
1494                 return ERR_PTR(-EADDRINUSE);
1495
1496         conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1497         if (!conn)
1498                 return ERR_PTR(-ENOMEM);
1499
1500         conn->state = BT_CONNECT;
1501
1502         hci_conn_hold(conn);
1503         return conn;
1504 }
1505
1506 /* This function requires the caller holds hdev->lock */
1507 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1508                                      u8 dst_type, u8 sec_level,
1509                                      u16 conn_timeout,
1510                                      enum conn_reasons conn_reason)
1511 {
1512         struct hci_conn *conn;
1513
1514         /* Let's make sure that le is enabled.*/
1515         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1516                 if (lmp_le_capable(hdev))
1517                         return ERR_PTR(-ECONNREFUSED);
1518
1519                 return ERR_PTR(-EOPNOTSUPP);
1520         }
1521
1522         /* Some devices send ATT messages as soon as the physical link is
1523          * established. To be able to handle these ATT messages, the user-
1524          * space first establishes the connection and then starts the pairing
1525          * process.
1526          *
1527          * So if a hci_conn object already exists for the following connection
1528          * attempt, we simply update pending_sec_level and auth_type fields
1529          * and return the object found.
1530          */
1531         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1532         if (conn) {
1533                 if (conn->pending_sec_level < sec_level)
1534                         conn->pending_sec_level = sec_level;
1535                 goto done;
1536         }
1537
1538         BT_DBG("requesting refresh of dst_addr");
1539
1540         conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1541         if (!conn)
1542                 return ERR_PTR(-ENOMEM);
1543
1544         if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1545                 hci_conn_del(conn);
1546                 return ERR_PTR(-EBUSY);
1547         }
1548
1549         conn->state = BT_CONNECT;
1550         set_bit(HCI_CONN_SCANNING, &conn->flags);
1551         conn->dst_type = dst_type;
1552         conn->sec_level = BT_SECURITY_LOW;
1553         conn->pending_sec_level = sec_level;
1554         conn->conn_timeout = conn_timeout;
1555         conn->conn_reason = conn_reason;
1556
1557         hci_update_passive_scan(hdev);
1558
1559 done:
1560         hci_conn_hold(conn);
1561         return conn;
1562 }
1563
1564 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1565                                  u8 sec_level, u8 auth_type,
1566                                  enum conn_reasons conn_reason, u16 timeout)
1567 {
1568         struct hci_conn *acl;
1569
1570         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1571                 if (lmp_bredr_capable(hdev))
1572                         return ERR_PTR(-ECONNREFUSED);
1573
1574                 return ERR_PTR(-EOPNOTSUPP);
1575         }
1576
1577         /* Reject outgoing connection to device with same BD ADDR against
1578          * CVE-2020-26555
1579          */
1580         if (!bacmp(&hdev->bdaddr, dst)) {
1581                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1582                            dst);
1583                 return ERR_PTR(-ECONNREFUSED);
1584         }
1585
1586         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1587         if (!acl) {
1588                 acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1589                 if (!acl)
1590                         return ERR_PTR(-ENOMEM);
1591         }
1592
1593         hci_conn_hold(acl);
1594
1595         acl->conn_reason = conn_reason;
1596         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1597                 int err;
1598
1599                 acl->sec_level = BT_SECURITY_LOW;
1600                 acl->pending_sec_level = sec_level;
1601                 acl->auth_type = auth_type;
1602                 acl->conn_timeout = timeout;
1603
1604                 err = hci_connect_acl_sync(hdev, acl);
1605                 if (err) {
1606                         hci_conn_del(acl);
1607                         return ERR_PTR(err);
1608                 }
1609         }
1610
1611         return acl;
1612 }
1613
1614 static struct hci_link *hci_conn_link(struct hci_conn *parent,
1615                                       struct hci_conn *conn)
1616 {
1617         struct hci_dev *hdev = parent->hdev;
1618         struct hci_link *link;
1619
1620         bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1621
1622         if (conn->link)
1623                 return conn->link;
1624
1625         if (conn->parent)
1626                 return NULL;
1627
1628         link = kzalloc(sizeof(*link), GFP_KERNEL);
1629         if (!link)
1630                 return NULL;
1631
1632         link->conn = hci_conn_hold(conn);
1633         conn->link = link;
1634         conn->parent = hci_conn_get(parent);
1635
1636         /* Use list_add_tail_rcu append to the list */
1637         list_add_tail_rcu(&link->list, &parent->link_list);
1638
1639         return link;
1640 }
1641
1642 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1643                                  __u16 setting, struct bt_codec *codec,
1644                                  u16 timeout)
1645 {
1646         struct hci_conn *acl;
1647         struct hci_conn *sco;
1648         struct hci_link *link;
1649
1650         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1651                               CONN_REASON_SCO_CONNECT, timeout);
1652         if (IS_ERR(acl))
1653                 return acl;
1654
1655         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1656         if (!sco) {
1657                 sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1658                 if (!sco) {
1659                         hci_conn_drop(acl);
1660                         return ERR_PTR(-ENOMEM);
1661                 }
1662         }
1663
1664         link = hci_conn_link(acl, sco);
1665         if (!link) {
1666                 hci_conn_drop(acl);
1667                 hci_conn_drop(sco);
1668                 return ERR_PTR(-ENOLINK);
1669         }
1670
1671         sco->setting = setting;
1672         sco->codec = *codec;
1673
1674         if (acl->state == BT_CONNECTED &&
1675             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1676                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1677                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1678
1679                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1680                         /* defer SCO setup until mode change completed */
1681                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1682                         return sco;
1683                 }
1684
1685                 hci_sco_setup(acl, 0x00);
1686         }
1687
1688         return sco;
1689 }
1690
1691 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1692 {
1693         struct hci_dev *hdev = conn->hdev;
1694         struct hci_cp_le_create_big cp;
1695         struct iso_list_data data;
1696
1697         memset(&cp, 0, sizeof(cp));
1698
1699         data.big = qos->bcast.big;
1700         data.bis = qos->bcast.bis;
1701         data.count = 0;
1702
1703         /* Create a BIS for each bound connection */
1704         hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1705                                  BT_BOUND, &data);
1706
1707         cp.handle = qos->bcast.big;
1708         cp.adv_handle = qos->bcast.bis;
1709         cp.num_bis  = data.count;
1710         hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1711         cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1712         cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1713         cp.bis.rtn  = qos->bcast.out.rtn;
1714         cp.bis.phy  = qos->bcast.out.phy;
1715         cp.bis.packing = qos->bcast.packing;
1716         cp.bis.framing = qos->bcast.framing;
1717         cp.bis.encryption = qos->bcast.encryption;
1718         memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1719
1720         return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1721 }
1722
1723 static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1724 {
1725         u8 cig_id = PTR_UINT(data);
1726         struct hci_conn *conn;
1727         struct bt_iso_qos *qos;
1728         struct iso_cig_params pdu;
1729         u8 cis_id;
1730
1731         conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1732         if (!conn)
1733                 return 0;
1734
1735         memset(&pdu, 0, sizeof(pdu));
1736
1737         qos = &conn->iso_qos;
1738         pdu.cp.cig_id = cig_id;
1739         hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1740         hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1741         pdu.cp.sca = qos->ucast.sca;
1742         pdu.cp.packing = qos->ucast.packing;
1743         pdu.cp.framing = qos->ucast.framing;
1744         pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1745         pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1746
1747         /* Reprogram all CIS(s) with the same CIG, valid range are:
1748          * num_cis: 0x00 to 0x1F
1749          * cis_id: 0x00 to 0xEF
1750          */
1751         for (cis_id = 0x00; cis_id < 0xf0 &&
1752              pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1753                 struct hci_cis_params *cis;
1754
1755                 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1756                 if (!conn)
1757                         continue;
1758
1759                 qos = &conn->iso_qos;
1760
1761                 cis = &pdu.cis[pdu.cp.num_cis++];
1762                 cis->cis_id = cis_id;
1763                 cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1764                 cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1765                 cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1766                               qos->ucast.in.phy;
1767                 cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1768                               qos->ucast.out.phy;
1769                 cis->c_rtn  = qos->ucast.out.rtn;
1770                 cis->p_rtn  = qos->ucast.in.rtn;
1771         }
1772
1773         if (!pdu.cp.num_cis)
1774                 return 0;
1775
1776         return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1777                                      sizeof(pdu.cp) +
1778                                      pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1779                                      HCI_CMD_TIMEOUT);
1780 }
1781
1782 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1783 {
1784         struct hci_dev *hdev = conn->hdev;
1785         struct iso_list_data data;
1786
1787         memset(&data, 0, sizeof(data));
1788
1789         /* Allocate first still reconfigurable CIG if not set */
1790         if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1791                 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1792                         data.count = 0;
1793
1794                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1795                                                  BT_CONNECT, &data);
1796                         if (data.count)
1797                                 continue;
1798
1799                         hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1800                                                  BT_CONNECTED, &data);
1801                         if (!data.count)
1802                                 break;
1803                 }
1804
1805                 if (data.cig == 0xf0)
1806                         return false;
1807
1808                 /* Update CIG */
1809                 qos->ucast.cig = data.cig;
1810         }
1811
1812         if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1813                 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1814                                              qos->ucast.cis))
1815                         return false;
1816                 goto done;
1817         }
1818
1819         /* Allocate first available CIS if not set */
1820         for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1821              data.cis++) {
1822                 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1823                                               data.cis)) {
1824                         /* Update CIS */
1825                         qos->ucast.cis = data.cis;
1826                         break;
1827                 }
1828         }
1829
1830         if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1831                 return false;
1832
1833 done:
1834         if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1835                                UINT_PTR(qos->ucast.cig), NULL) < 0)
1836                 return false;
1837
1838         return true;
1839 }
1840
1841 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1842                               __u8 dst_type, struct bt_iso_qos *qos)
1843 {
1844         struct hci_conn *cis;
1845
1846         cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1847                                        qos->ucast.cis);
1848         if (!cis) {
1849                 cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1850                 if (!cis)
1851                         return ERR_PTR(-ENOMEM);
1852                 cis->cleanup = cis_cleanup;
1853                 cis->dst_type = dst_type;
1854                 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1855                 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1856         }
1857
1858         if (cis->state == BT_CONNECTED)
1859                 return cis;
1860
1861         /* Check if CIS has been set and the settings matches */
1862         if (cis->state == BT_BOUND &&
1863             !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1864                 return cis;
1865
1866         /* Update LINK PHYs according to QoS preference */
1867         cis->le_tx_phy = qos->ucast.out.phy;
1868         cis->le_rx_phy = qos->ucast.in.phy;
1869
1870         /* If output interval is not set use the input interval as it cannot be
1871          * 0x000000.
1872          */
1873         if (!qos->ucast.out.interval)
1874                 qos->ucast.out.interval = qos->ucast.in.interval;
1875
1876         /* If input interval is not set use the output interval as it cannot be
1877          * 0x000000.
1878          */
1879         if (!qos->ucast.in.interval)
1880                 qos->ucast.in.interval = qos->ucast.out.interval;
1881
1882         /* If output latency is not set use the input latency as it cannot be
1883          * 0x0000.
1884          */
1885         if (!qos->ucast.out.latency)
1886                 qos->ucast.out.latency = qos->ucast.in.latency;
1887
1888         /* If input latency is not set use the output latency as it cannot be
1889          * 0x0000.
1890          */
1891         if (!qos->ucast.in.latency)
1892                 qos->ucast.in.latency = qos->ucast.out.latency;
1893
1894         if (!hci_le_set_cig_params(cis, qos)) {
1895                 hci_conn_drop(cis);
1896                 return ERR_PTR(-EINVAL);
1897         }
1898
1899         hci_conn_hold(cis);
1900
1901         cis->iso_qos = *qos;
1902         cis->state = BT_BOUND;
1903
1904         return cis;
1905 }
1906
1907 bool hci_iso_setup_path(struct hci_conn *conn)
1908 {
1909         struct hci_dev *hdev = conn->hdev;
1910         struct hci_cp_le_setup_iso_path cmd;
1911
1912         memset(&cmd, 0, sizeof(cmd));
1913
1914         if (conn->iso_qos.ucast.out.sdu) {
1915                 cmd.handle = cpu_to_le16(conn->handle);
1916                 cmd.direction = 0x00; /* Input (Host to Controller) */
1917                 cmd.path = 0x00; /* HCI path if enabled */
1918                 cmd.codec = 0x03; /* Transparent Data */
1919
1920                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1921                                  &cmd) < 0)
1922                         return false;
1923         }
1924
1925         if (conn->iso_qos.ucast.in.sdu) {
1926                 cmd.handle = cpu_to_le16(conn->handle);
1927                 cmd.direction = 0x01; /* Output (Controller to Host) */
1928                 cmd.path = 0x00; /* HCI path if enabled */
1929                 cmd.codec = 0x03; /* Transparent Data */
1930
1931                 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1932                                  &cmd) < 0)
1933                         return false;
1934         }
1935
1936         return true;
1937 }
1938
1939 int hci_conn_check_create_cis(struct hci_conn *conn)
1940 {
1941         if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1942                 return -EINVAL;
1943
1944         if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1945             conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1946                 return 1;
1947
1948         return 0;
1949 }
1950
1951 static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1952 {
1953         return hci_le_create_cis_sync(hdev);
1954 }
1955
1956 int hci_le_create_cis_pending(struct hci_dev *hdev)
1957 {
1958         struct hci_conn *conn;
1959         bool pending = false;
1960
1961         rcu_read_lock();
1962
1963         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
1964                 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
1965                         rcu_read_unlock();
1966                         return -EBUSY;
1967                 }
1968
1969                 if (!hci_conn_check_create_cis(conn))
1970                         pending = true;
1971         }
1972
1973         rcu_read_unlock();
1974
1975         if (!pending)
1976                 return 0;
1977
1978         /* Queue Create CIS */
1979         return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
1980 }
1981
1982 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
1983                               struct bt_iso_io_qos *qos, __u8 phy)
1984 {
1985         /* Only set MTU if PHY is enabled */
1986         if (!qos->sdu && qos->phy) {
1987                 if (hdev->iso_mtu > 0)
1988                         qos->sdu = hdev->iso_mtu;
1989                 else if (hdev->le_mtu > 0)
1990                         qos->sdu = hdev->le_mtu;
1991                 else
1992                         qos->sdu = hdev->acl_mtu;
1993         }
1994
1995         /* Use the same PHY as ACL if set to any */
1996         if (qos->phy == BT_ISO_PHY_ANY)
1997                 qos->phy = phy;
1998
1999         /* Use LE ACL connection interval if not set */
2000         if (!qos->interval)
2001                 /* ACL interval unit in 1.25 ms to us */
2002                 qos->interval = conn->le_conn_interval * 1250;
2003
2004         /* Use LE ACL connection latency if not set */
2005         if (!qos->latency)
2006                 qos->latency = conn->le_conn_latency;
2007 }
2008
2009 static int create_big_sync(struct hci_dev *hdev, void *data)
2010 {
2011         struct hci_conn *conn = data;
2012         struct bt_iso_qos *qos = &conn->iso_qos;
2013         u16 interval, sync_interval = 0;
2014         u32 flags = 0;
2015         int err;
2016
2017         if (qos->bcast.out.phy == 0x02)
2018                 flags |= MGMT_ADV_FLAG_SEC_2M;
2019
2020         /* Align intervals */
2021         interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2022
2023         if (qos->bcast.bis)
2024                 sync_interval = interval * 4;
2025
2026         err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2027                                      conn->le_per_adv_data, flags, interval,
2028                                      interval, sync_interval);
2029         if (err)
2030                 return err;
2031
2032         return hci_le_create_big(conn, &conn->iso_qos);
2033 }
2034
2035 static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2036 {
2037         struct hci_cp_le_pa_create_sync *cp = data;
2038
2039         bt_dev_dbg(hdev, "");
2040
2041         if (err)
2042                 bt_dev_err(hdev, "Unable to create PA: %d", err);
2043
2044         kfree(cp);
2045 }
2046
2047 static int create_pa_sync(struct hci_dev *hdev, void *data)
2048 {
2049         struct hci_cp_le_pa_create_sync *cp = data;
2050         int err;
2051
2052         err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2053                                     sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2054         if (err) {
2055                 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2056                 return err;
2057         }
2058
2059         return hci_update_passive_scan_sync(hdev);
2060 }
2061
2062 int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2063                        __u8 sid, struct bt_iso_qos *qos)
2064 {
2065         struct hci_cp_le_pa_create_sync *cp;
2066
2067         if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2068                 return -EBUSY;
2069
2070         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2071         if (!cp) {
2072                 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2073                 return -ENOMEM;
2074         }
2075
2076         cp->options = qos->bcast.options;
2077         cp->sid = sid;
2078         cp->addr_type = dst_type;
2079         bacpy(&cp->addr, dst);
2080         cp->skip = cpu_to_le16(qos->bcast.skip);
2081         cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2082         cp->sync_cte_type = qos->bcast.sync_cte_type;
2083
2084         /* Queue start pa_create_sync and scan */
2085         return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2086 }
2087
2088 int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2089                            struct bt_iso_qos *qos,
2090                            __u16 sync_handle, __u8 num_bis, __u8 bis[])
2091 {
2092         struct _packed {
2093                 struct hci_cp_le_big_create_sync cp;
2094                 __u8  bis[0x11];
2095         } pdu;
2096         int err;
2097
2098         if (num_bis < 0x01 || num_bis > sizeof(pdu.bis))
2099                 return -EINVAL;
2100
2101         err = qos_set_big(hdev, qos);
2102         if (err)
2103                 return err;
2104
2105         if (hcon)
2106                 hcon->iso_qos.bcast.big = qos->bcast.big;
2107
2108         memset(&pdu, 0, sizeof(pdu));
2109         pdu.cp.handle = qos->bcast.big;
2110         pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2111         pdu.cp.encryption = qos->bcast.encryption;
2112         memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2113         pdu.cp.mse = qos->bcast.mse;
2114         pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2115         pdu.cp.num_bis = num_bis;
2116         memcpy(pdu.bis, bis, num_bis);
2117
2118         return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2119                             sizeof(pdu.cp) + num_bis, &pdu);
2120 }
2121
2122 static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2123 {
2124         struct hci_conn *conn = data;
2125
2126         bt_dev_dbg(hdev, "conn %p", conn);
2127
2128         if (err) {
2129                 bt_dev_err(hdev, "Unable to create BIG: %d", err);
2130                 hci_connect_cfm(conn, err);
2131                 hci_conn_del(conn);
2132         }
2133 }
2134
2135 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2136                               struct bt_iso_qos *qos,
2137                               __u8 base_len, __u8 *base)
2138 {
2139         struct hci_conn *conn;
2140         struct hci_conn *parent;
2141         __u8 eir[HCI_MAX_PER_AD_LENGTH];
2142         struct hci_link *link;
2143
2144         /* Look for any BIS that is open for rebinding */
2145         conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN);
2146         if (conn) {
2147                 memcpy(qos, &conn->iso_qos, sizeof(*qos));
2148                 conn->state = BT_CONNECTED;
2149                 return conn;
2150         }
2151
2152         if (base_len && base)
2153                 base_len = eir_append_service_data(eir, 0,  0x1851,
2154                                                    base, base_len);
2155
2156         /* We need hci_conn object using the BDADDR_ANY as dst */
2157         conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2158         if (IS_ERR(conn))
2159                 return conn;
2160
2161         /* Update LINK PHYs according to QoS preference */
2162         conn->le_tx_phy = qos->bcast.out.phy;
2163         conn->le_tx_phy = qos->bcast.out.phy;
2164
2165         /* Add Basic Announcement into Peridic Adv Data if BASE is set */
2166         if (base_len && base) {
2167                 memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2168                 conn->le_per_adv_data_len = base_len;
2169         }
2170
2171         hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2172                           conn->le_tx_phy ? conn->le_tx_phy :
2173                           hdev->le_tx_def_phys);
2174
2175         conn->iso_qos = *qos;
2176         conn->state = BT_BOUND;
2177
2178         /* Link BISes together */
2179         parent = hci_conn_hash_lookup_big(hdev,
2180                                           conn->iso_qos.bcast.big);
2181         if (parent && parent != conn) {
2182                 link = hci_conn_link(parent, conn);
2183                 if (!link) {
2184                         hci_conn_drop(conn);
2185                         return ERR_PTR(-ENOLINK);
2186                 }
2187
2188                 /* Link takes the refcount */
2189                 hci_conn_drop(conn);
2190         }
2191
2192         return conn;
2193 }
2194
2195 static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2196 {
2197         struct iso_list_data *d = data;
2198
2199         /* Skip if not broadcast/ANY address */
2200         if (bacmp(&conn->dst, BDADDR_ANY))
2201                 return;
2202
2203         if (d->big != conn->iso_qos.bcast.big ||
2204             d->bis == BT_ISO_QOS_BIS_UNSET ||
2205             d->bis != conn->iso_qos.bcast.bis)
2206                 return;
2207
2208         set_bit(HCI_CONN_PER_ADV, &conn->flags);
2209 }
2210
2211 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2212                                  __u8 dst_type, struct bt_iso_qos *qos,
2213                                  __u8 base_len, __u8 *base)
2214 {
2215         struct hci_conn *conn;
2216         int err;
2217         struct iso_list_data data;
2218
2219         conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2220         if (IS_ERR(conn))
2221                 return conn;
2222
2223         if (conn->state == BT_CONNECTED)
2224                 return conn;
2225
2226         data.big = qos->bcast.big;
2227         data.bis = qos->bcast.bis;
2228
2229         /* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2230          * the start periodic advertising and create BIG commands have
2231          * been queued
2232          */
2233         hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2234                                  BT_BOUND, &data);
2235
2236         /* Queue start periodic advertising and create BIG */
2237         err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2238                                  create_big_complete);
2239         if (err < 0) {
2240                 hci_conn_drop(conn);
2241                 return ERR_PTR(err);
2242         }
2243
2244         return conn;
2245 }
2246
2247 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2248                                  __u8 dst_type, struct bt_iso_qos *qos)
2249 {
2250         struct hci_conn *le;
2251         struct hci_conn *cis;
2252         struct hci_link *link;
2253
2254         if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2255                 le = hci_connect_le(hdev, dst, dst_type, false,
2256                                     BT_SECURITY_LOW,
2257                                     HCI_LE_CONN_TIMEOUT,
2258                                     HCI_ROLE_SLAVE, 0, 0);
2259         else
2260                 le = hci_connect_le_scan(hdev, dst, dst_type,
2261                                          BT_SECURITY_LOW,
2262                                          HCI_LE_CONN_TIMEOUT,
2263                                          CONN_REASON_ISO_CONNECT);
2264         if (IS_ERR(le))
2265                 return le;
2266
2267         hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2268                           le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2269         hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2270                           le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2271
2272         cis = hci_bind_cis(hdev, dst, dst_type, qos);
2273         if (IS_ERR(cis)) {
2274                 hci_conn_drop(le);
2275                 return cis;
2276         }
2277
2278         link = hci_conn_link(le, cis);
2279         if (!link) {
2280                 hci_conn_drop(le);
2281                 hci_conn_drop(cis);
2282                 return ERR_PTR(-ENOLINK);
2283         }
2284
2285         /* Link takes the refcount */
2286         hci_conn_drop(cis);
2287
2288         cis->state = BT_CONNECT;
2289
2290         hci_le_create_cis_pending(hdev);
2291
2292         return cis;
2293 }
2294
2295 /* Check link security requirement */
2296 int hci_conn_check_link_mode(struct hci_conn *conn)
2297 {
2298         BT_DBG("hcon %p", conn);
2299
2300         /* In Secure Connections Only mode, it is required that Secure
2301          * Connections is used and the link is encrypted with AES-CCM
2302          * using a P-256 authenticated combination key.
2303          */
2304         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2305                 if (!hci_conn_sc_enabled(conn) ||
2306                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2307                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2308                         return 0;
2309         }
2310
2311          /* AES encryption is required for Level 4:
2312           *
2313           * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2314           * page 1319:
2315           *
2316           * 128-bit equivalent strength for link and encryption keys
2317           * required using FIPS approved algorithms (E0 not allowed,
2318           * SAFER+ not allowed, and P-192 not allowed; encryption key
2319           * not shortened)
2320           */
2321         if (conn->sec_level == BT_SECURITY_FIPS &&
2322             !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2323                 bt_dev_err(conn->hdev,
2324                            "Invalid security: Missing AES-CCM usage");
2325                 return 0;
2326         }
2327
2328         if (hci_conn_ssp_enabled(conn) &&
2329             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2330                 return 0;
2331
2332         return 1;
2333 }
2334
2335 /* Authenticate remote device */
2336 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2337 {
2338         BT_DBG("hcon %p", conn);
2339
2340         if (conn->pending_sec_level > sec_level)
2341                 sec_level = conn->pending_sec_level;
2342
2343         if (sec_level > conn->sec_level)
2344                 conn->pending_sec_level = sec_level;
2345         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2346                 return 1;
2347
2348         /* Make sure we preserve an existing MITM requirement*/
2349         auth_type |= (conn->auth_type & 0x01);
2350
2351         conn->auth_type = auth_type;
2352
2353         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2354                 struct hci_cp_auth_requested cp;
2355
2356                 cp.handle = cpu_to_le16(conn->handle);
2357                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2358                              sizeof(cp), &cp);
2359
2360                 /* Set the ENCRYPT_PEND to trigger encryption after
2361                  * authentication.
2362                  */
2363                 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2364                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2365         }
2366
2367         return 0;
2368 }
2369
2370 /* Encrypt the link */
2371 static void hci_conn_encrypt(struct hci_conn *conn)
2372 {
2373         BT_DBG("hcon %p", conn);
2374
2375         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2376                 struct hci_cp_set_conn_encrypt cp;
2377                 cp.handle  = cpu_to_le16(conn->handle);
2378                 cp.encrypt = 0x01;
2379                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2380                              &cp);
2381         }
2382 }
2383
2384 /* Enable security */
2385 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2386                       bool initiator)
2387 {
2388         BT_DBG("hcon %p", conn);
2389
2390         if (conn->type == LE_LINK)
2391                 return smp_conn_security(conn, sec_level);
2392
2393         /* For sdp we don't need the link key. */
2394         if (sec_level == BT_SECURITY_SDP)
2395                 return 1;
2396
2397         /* For non 2.1 devices and low security level we don't need the link
2398            key. */
2399         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2400                 return 1;
2401
2402         /* For other security levels we need the link key. */
2403         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2404                 goto auth;
2405
2406         switch (conn->key_type) {
2407         case HCI_LK_AUTH_COMBINATION_P256:
2408                 /* An authenticated FIPS approved combination key has
2409                  * sufficient security for security level 4 or lower.
2410                  */
2411                 if (sec_level <= BT_SECURITY_FIPS)
2412                         goto encrypt;
2413                 break;
2414         case HCI_LK_AUTH_COMBINATION_P192:
2415                 /* An authenticated combination key has sufficient security for
2416                  * security level 3 or lower.
2417                  */
2418                 if (sec_level <= BT_SECURITY_HIGH)
2419                         goto encrypt;
2420                 break;
2421         case HCI_LK_UNAUTH_COMBINATION_P192:
2422         case HCI_LK_UNAUTH_COMBINATION_P256:
2423                 /* An unauthenticated combination key has sufficient security
2424                  * for security level 2 or lower.
2425                  */
2426                 if (sec_level <= BT_SECURITY_MEDIUM)
2427                         goto encrypt;
2428                 break;
2429         case HCI_LK_COMBINATION:
2430                 /* A combination key has always sufficient security for the
2431                  * security levels 2 or lower. High security level requires the
2432                  * combination key is generated using maximum PIN code length
2433                  * (16). For pre 2.1 units.
2434                  */
2435                 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2436                         goto encrypt;
2437                 break;
2438         default:
2439                 break;
2440         }
2441
2442 auth:
2443         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2444                 return 0;
2445
2446         if (initiator)
2447                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2448
2449         if (!hci_conn_auth(conn, sec_level, auth_type))
2450                 return 0;
2451
2452 encrypt:
2453         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2454                 /* Ensure that the encryption key size has been read,
2455                  * otherwise stall the upper layer responses.
2456                  */
2457                 if (!conn->enc_key_size)
2458                         return 0;
2459
2460                 /* Nothing else needed, all requirements are met */
2461                 return 1;
2462         }
2463
2464         hci_conn_encrypt(conn);
2465         return 0;
2466 }
2467 EXPORT_SYMBOL(hci_conn_security);
2468
2469 /* Check secure link requirement */
2470 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2471 {
2472         BT_DBG("hcon %p", conn);
2473
2474         /* Accept if non-secure or higher security level is required */
2475         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2476                 return 1;
2477
2478         /* Accept if secure or higher security level is already present */
2479         if (conn->sec_level == BT_SECURITY_HIGH ||
2480             conn->sec_level == BT_SECURITY_FIPS)
2481                 return 1;
2482
2483         /* Reject not secure link */
2484         return 0;
2485 }
2486 EXPORT_SYMBOL(hci_conn_check_secure);
2487
2488 /* Switch role */
2489 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2490 {
2491         BT_DBG("hcon %p", conn);
2492
2493         if (role == conn->role)
2494                 return 1;
2495
2496         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2497                 struct hci_cp_switch_role cp;
2498                 bacpy(&cp.bdaddr, &conn->dst);
2499                 cp.role = role;
2500                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2501         }
2502
2503         return 0;
2504 }
2505 EXPORT_SYMBOL(hci_conn_switch_role);
2506
2507 /* Enter active mode */
2508 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2509 {
2510         struct hci_dev *hdev = conn->hdev;
2511
2512         BT_DBG("hcon %p mode %d", conn, conn->mode);
2513
2514         if (conn->mode != HCI_CM_SNIFF)
2515                 goto timer;
2516
2517         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2518                 goto timer;
2519
2520         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2521                 struct hci_cp_exit_sniff_mode cp;
2522                 cp.handle = cpu_to_le16(conn->handle);
2523                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2524         }
2525
2526 timer:
2527         if (hdev->idle_timeout > 0)
2528                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
2529                                    msecs_to_jiffies(hdev->idle_timeout));
2530 }
2531
2532 /* Drop all connection on the device */
2533 void hci_conn_hash_flush(struct hci_dev *hdev)
2534 {
2535         struct list_head *head = &hdev->conn_hash.list;
2536         struct hci_conn *conn;
2537
2538         BT_DBG("hdev %s", hdev->name);
2539
2540         /* We should not traverse the list here, because hci_conn_del
2541          * can remove extra links, which may cause the list traversal
2542          * to hit items that have already been released.
2543          */
2544         while ((conn = list_first_entry_or_null(head,
2545                                                 struct hci_conn,
2546                                                 list)) != NULL) {
2547                 conn->state = BT_CLOSED;
2548                 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2549                 hci_conn_del(conn);
2550         }
2551 }
2552
2553 static u32 get_link_mode(struct hci_conn *conn)
2554 {
2555         u32 link_mode = 0;
2556
2557         if (conn->role == HCI_ROLE_MASTER)
2558                 link_mode |= HCI_LM_MASTER;
2559
2560         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2561                 link_mode |= HCI_LM_ENCRYPT;
2562
2563         if (test_bit(HCI_CONN_AUTH, &conn->flags))
2564                 link_mode |= HCI_LM_AUTH;
2565
2566         if (test_bit(HCI_CONN_SECURE, &conn->flags))
2567                 link_mode |= HCI_LM_SECURE;
2568
2569         if (test_bit(HCI_CONN_FIPS, &conn->flags))
2570                 link_mode |= HCI_LM_FIPS;
2571
2572         return link_mode;
2573 }
2574
2575 int hci_get_conn_list(void __user *arg)
2576 {
2577         struct hci_conn *c;
2578         struct hci_conn_list_req req, *cl;
2579         struct hci_conn_info *ci;
2580         struct hci_dev *hdev;
2581         int n = 0, size, err;
2582
2583         if (copy_from_user(&req, arg, sizeof(req)))
2584                 return -EFAULT;
2585
2586         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2587                 return -EINVAL;
2588
2589         size = sizeof(req) + req.conn_num * sizeof(*ci);
2590
2591         cl = kmalloc(size, GFP_KERNEL);
2592         if (!cl)
2593                 return -ENOMEM;
2594
2595         hdev = hci_dev_get(req.dev_id);
2596         if (!hdev) {
2597                 kfree(cl);
2598                 return -ENODEV;
2599         }
2600
2601         ci = cl->conn_info;
2602
2603         hci_dev_lock(hdev);
2604         list_for_each_entry(c, &hdev->conn_hash.list, list) {
2605                 bacpy(&(ci + n)->bdaddr, &c->dst);
2606                 (ci + n)->handle = c->handle;
2607                 (ci + n)->type  = c->type;
2608                 (ci + n)->out   = c->out;
2609                 (ci + n)->state = c->state;
2610                 (ci + n)->link_mode = get_link_mode(c);
2611                 if (++n >= req.conn_num)
2612                         break;
2613         }
2614         hci_dev_unlock(hdev);
2615
2616         cl->dev_id = hdev->id;
2617         cl->conn_num = n;
2618         size = sizeof(req) + n * sizeof(*ci);
2619
2620         hci_dev_put(hdev);
2621
2622         err = copy_to_user(arg, cl, size);
2623         kfree(cl);
2624
2625         return err ? -EFAULT : 0;
2626 }
2627
2628 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2629 {
2630         struct hci_conn_info_req req;
2631         struct hci_conn_info ci;
2632         struct hci_conn *conn;
2633         char __user *ptr = arg + sizeof(req);
2634
2635         if (copy_from_user(&req, arg, sizeof(req)))
2636                 return -EFAULT;
2637
2638         hci_dev_lock(hdev);
2639         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2640         if (conn) {
2641                 bacpy(&ci.bdaddr, &conn->dst);
2642                 ci.handle = conn->handle;
2643                 ci.type  = conn->type;
2644                 ci.out   = conn->out;
2645                 ci.state = conn->state;
2646                 ci.link_mode = get_link_mode(conn);
2647         }
2648         hci_dev_unlock(hdev);
2649
2650         if (!conn)
2651                 return -ENOENT;
2652
2653         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2654 }
2655
2656 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2657 {
2658         struct hci_auth_info_req req;
2659         struct hci_conn *conn;
2660
2661         if (copy_from_user(&req, arg, sizeof(req)))
2662                 return -EFAULT;
2663
2664         hci_dev_lock(hdev);
2665         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2666         if (conn)
2667                 req.type = conn->auth_type;
2668         hci_dev_unlock(hdev);
2669
2670         if (!conn)
2671                 return -ENOENT;
2672
2673         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2674 }
2675
2676 struct hci_chan *hci_chan_create(struct hci_conn *conn)
2677 {
2678         struct hci_dev *hdev = conn->hdev;
2679         struct hci_chan *chan;
2680
2681         BT_DBG("%s hcon %p", hdev->name, conn);
2682
2683         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2684                 BT_DBG("Refusing to create new hci_chan");
2685                 return NULL;
2686         }
2687
2688         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2689         if (!chan)
2690                 return NULL;
2691
2692         chan->conn = hci_conn_get(conn);
2693         skb_queue_head_init(&chan->data_q);
2694         chan->state = BT_CONNECTED;
2695
2696         list_add_rcu(&chan->list, &conn->chan_list);
2697
2698         return chan;
2699 }
2700
2701 void hci_chan_del(struct hci_chan *chan)
2702 {
2703         struct hci_conn *conn = chan->conn;
2704         struct hci_dev *hdev = conn->hdev;
2705
2706         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2707
2708         list_del_rcu(&chan->list);
2709
2710         synchronize_rcu();
2711
2712         /* Prevent new hci_chan's to be created for this hci_conn */
2713         set_bit(HCI_CONN_DROP, &conn->flags);
2714
2715         hci_conn_put(conn);
2716
2717         skb_queue_purge(&chan->data_q);
2718         kfree(chan);
2719 }
2720
2721 void hci_chan_list_flush(struct hci_conn *conn)
2722 {
2723         struct hci_chan *chan, *n;
2724
2725         BT_DBG("hcon %p", conn);
2726
2727         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2728                 hci_chan_del(chan);
2729 }
2730
2731 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2732                                                  __u16 handle)
2733 {
2734         struct hci_chan *hchan;
2735
2736         list_for_each_entry(hchan, &hcon->chan_list, list) {
2737                 if (hchan->handle == handle)
2738                         return hchan;
2739         }
2740
2741         return NULL;
2742 }
2743
2744 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2745 {
2746         struct hci_conn_hash *h = &hdev->conn_hash;
2747         struct hci_conn *hcon;
2748         struct hci_chan *hchan = NULL;
2749
2750         rcu_read_lock();
2751
2752         list_for_each_entry_rcu(hcon, &h->list, list) {
2753                 hchan = __hci_chan_lookup_handle(hcon, handle);
2754                 if (hchan)
2755                         break;
2756         }
2757
2758         rcu_read_unlock();
2759
2760         return hchan;
2761 }
2762
2763 u32 hci_conn_get_phy(struct hci_conn *conn)
2764 {
2765         u32 phys = 0;
2766
2767         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2768          * Table 6.2: Packets defined for synchronous, asynchronous, and
2769          * CPB logical transport types.
2770          */
2771         switch (conn->type) {
2772         case SCO_LINK:
2773                 /* SCO logical transport (1 Mb/s):
2774                  * HV1, HV2, HV3 and DV.
2775                  */
2776                 phys |= BT_PHY_BR_1M_1SLOT;
2777
2778                 break;
2779
2780         case ACL_LINK:
2781                 /* ACL logical transport (1 Mb/s) ptt=0:
2782                  * DH1, DM3, DH3, DM5 and DH5.
2783                  */
2784                 phys |= BT_PHY_BR_1M_1SLOT;
2785
2786                 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2787                         phys |= BT_PHY_BR_1M_3SLOT;
2788
2789                 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2790                         phys |= BT_PHY_BR_1M_5SLOT;
2791
2792                 /* ACL logical transport (2 Mb/s) ptt=1:
2793                  * 2-DH1, 2-DH3 and 2-DH5.
2794                  */
2795                 if (!(conn->pkt_type & HCI_2DH1))
2796                         phys |= BT_PHY_EDR_2M_1SLOT;
2797
2798                 if (!(conn->pkt_type & HCI_2DH3))
2799                         phys |= BT_PHY_EDR_2M_3SLOT;
2800
2801                 if (!(conn->pkt_type & HCI_2DH5))
2802                         phys |= BT_PHY_EDR_2M_5SLOT;
2803
2804                 /* ACL logical transport (3 Mb/s) ptt=1:
2805                  * 3-DH1, 3-DH3 and 3-DH5.
2806                  */
2807                 if (!(conn->pkt_type & HCI_3DH1))
2808                         phys |= BT_PHY_EDR_3M_1SLOT;
2809
2810                 if (!(conn->pkt_type & HCI_3DH3))
2811                         phys |= BT_PHY_EDR_3M_3SLOT;
2812
2813                 if (!(conn->pkt_type & HCI_3DH5))
2814                         phys |= BT_PHY_EDR_3M_5SLOT;
2815
2816                 break;
2817
2818         case ESCO_LINK:
2819                 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2820                 phys |= BT_PHY_BR_1M_1SLOT;
2821
2822                 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2823                         phys |= BT_PHY_BR_1M_3SLOT;
2824
2825                 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2826                 if (!(conn->pkt_type & ESCO_2EV3))
2827                         phys |= BT_PHY_EDR_2M_1SLOT;
2828
2829                 if (!(conn->pkt_type & ESCO_2EV5))
2830                         phys |= BT_PHY_EDR_2M_3SLOT;
2831
2832                 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2833                 if (!(conn->pkt_type & ESCO_3EV3))
2834                         phys |= BT_PHY_EDR_3M_1SLOT;
2835
2836                 if (!(conn->pkt_type & ESCO_3EV5))
2837                         phys |= BT_PHY_EDR_3M_3SLOT;
2838
2839                 break;
2840
2841         case LE_LINK:
2842                 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2843                         phys |= BT_PHY_LE_1M_TX;
2844
2845                 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2846                         phys |= BT_PHY_LE_1M_RX;
2847
2848                 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2849                         phys |= BT_PHY_LE_2M_TX;
2850
2851                 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2852                         phys |= BT_PHY_LE_2M_RX;
2853
2854                 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2855                         phys |= BT_PHY_LE_CODED_TX;
2856
2857                 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2858                         phys |= BT_PHY_LE_CODED_RX;
2859
2860                 break;
2861         }
2862
2863         return phys;
2864 }
2865
2866 static int abort_conn_sync(struct hci_dev *hdev, void *data)
2867 {
2868         struct hci_conn *conn = data;
2869
2870         if (!hci_conn_valid(hdev, conn))
2871                 return -ECANCELED;
2872
2873         return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2874 }
2875
2876 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2877 {
2878         struct hci_dev *hdev = conn->hdev;
2879
2880         /* If abort_reason has already been set it means the connection is
2881          * already being aborted so don't attempt to overwrite it.
2882          */
2883         if (conn->abort_reason)
2884                 return 0;
2885
2886         bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2887
2888         conn->abort_reason = reason;
2889
2890         /* If the connection is pending check the command opcode since that
2891          * might be blocking on hci_cmd_sync_work while waiting its respective
2892          * event so we need to hci_cmd_sync_cancel to cancel it.
2893          *
2894          * hci_connect_le serializes the connection attempts so only one
2895          * connection can be in BT_CONNECT at time.
2896          */
2897         if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2898                 switch (hci_skb_event(hdev->sent_cmd)) {
2899                 case HCI_EV_CONN_COMPLETE:
2900                 case HCI_EV_LE_CONN_COMPLETE:
2901                 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2902                 case HCI_EVT_LE_CIS_ESTABLISHED:
2903                         hci_cmd_sync_cancel(hdev, ECANCELED);
2904                         break;
2905                 }
2906         /* Cancel connect attempt if still queued/pending */
2907         } else if (!hci_cancel_connect_sync(hdev, conn)) {
2908                 return 0;
2909         }
2910
2911         return hci_cmd_sync_queue_once(hdev, abort_conn_sync, conn, NULL);
2912 }