GNU Linux-libre 5.15.137-gnu
[releases.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39         u16 pkt_type;
40         u16 max_latency;
41         u8  retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
46         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
47         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
48         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
49         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
54         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
59         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
60 };
61
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65         struct hci_conn_params *params;
66         struct hci_dev *hdev = conn->hdev;
67         struct smp_irk *irk;
68         bdaddr_t *bdaddr;
69         u8 bdaddr_type;
70
71         bdaddr = &conn->dst;
72         bdaddr_type = conn->dst_type;
73
74         /* Check if we need to convert to identity address */
75         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76         if (irk) {
77                 bdaddr = &irk->bdaddr;
78                 bdaddr_type = irk->addr_type;
79         }
80
81         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82                                            bdaddr_type);
83         if (!params || !params->explicit_connect)
84                 return;
85
86         /* The connection attempt was doing scan for new RPA, and is
87          * in scan phase. If params are not associated with any other
88          * autoconnect action, remove them completely. If they are, just unmark
89          * them as waiting for connection, by clearing explicit_connect field.
90          */
91         params->explicit_connect = false;
92
93         list_del_init(&params->action);
94
95         switch (params->auto_connect) {
96         case HCI_AUTO_CONN_EXPLICIT:
97                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98                 /* return instead of break to avoid duplicate scan update */
99                 return;
100         case HCI_AUTO_CONN_DIRECT:
101         case HCI_AUTO_CONN_ALWAYS:
102                 list_add(&params->action, &hdev->pend_le_conns);
103                 break;
104         case HCI_AUTO_CONN_REPORT:
105                 list_add(&params->action, &hdev->pend_le_reports);
106                 break;
107         default:
108                 break;
109         }
110
111         hci_update_background_scan(hdev);
112 }
113
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116         struct hci_dev *hdev = conn->hdev;
117
118         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120
121         hci_chan_list_flush(conn);
122
123         hci_conn_hash_del(hdev, conn);
124
125         if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
126                 switch (conn->setting & SCO_AIRMODE_MASK) {
127                 case SCO_AIRMODE_CVSD:
128                 case SCO_AIRMODE_TRANSP:
129                         if (hdev->notify)
130                                 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
131                         break;
132                 }
133         } else {
134                 if (hdev->notify)
135                         hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
136         }
137
138         hci_conn_del_sysfs(conn);
139
140         debugfs_remove_recursive(conn->debugfs);
141
142         hci_dev_put(hdev);
143
144         hci_conn_put(conn);
145 }
146
147 static void le_scan_cleanup(struct work_struct *work)
148 {
149         struct hci_conn *conn = container_of(work, struct hci_conn,
150                                              le_scan_cleanup);
151         struct hci_dev *hdev = conn->hdev;
152         struct hci_conn *c = NULL;
153
154         BT_DBG("%s hcon %p", hdev->name, conn);
155
156         hci_dev_lock(hdev);
157
158         /* Check that the hci_conn is still around */
159         rcu_read_lock();
160         list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
161                 if (c == conn)
162                         break;
163         }
164         rcu_read_unlock();
165
166         if (c == conn) {
167                 hci_connect_le_scan_cleanup(conn);
168                 hci_conn_cleanup(conn);
169         }
170
171         hci_dev_unlock(hdev);
172         hci_dev_put(hdev);
173         hci_conn_put(conn);
174 }
175
176 static void hci_connect_le_scan_remove(struct hci_conn *conn)
177 {
178         BT_DBG("%s hcon %p", conn->hdev->name, conn);
179
180         /* We can't call hci_conn_del/hci_conn_cleanup here since that
181          * could deadlock with another hci_conn_del() call that's holding
182          * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
183          * Instead, grab temporary extra references to the hci_dev and
184          * hci_conn and perform the necessary cleanup in a separate work
185          * callback.
186          */
187
188         hci_dev_hold(conn->hdev);
189         hci_conn_get(conn);
190
191         /* Even though we hold a reference to the hdev, many other
192          * things might get cleaned up meanwhile, including the hdev's
193          * own workqueue, so we can't use that for scheduling.
194          */
195         schedule_work(&conn->le_scan_cleanup);
196 }
197
198 static void hci_acl_create_connection(struct hci_conn *conn)
199 {
200         struct hci_dev *hdev = conn->hdev;
201         struct inquiry_entry *ie;
202         struct hci_cp_create_conn cp;
203
204         BT_DBG("hcon %p", conn);
205
206         /* Many controllers disallow HCI Create Connection while it is doing
207          * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
208          * Connection. This may cause the MGMT discovering state to become false
209          * without user space's request but it is okay since the MGMT Discovery
210          * APIs do not promise that discovery should be done forever. Instead,
211          * the user space monitors the status of MGMT discovering and it may
212          * request for discovery again when this flag becomes false.
213          */
214         if (test_bit(HCI_INQUIRY, &hdev->flags)) {
215                 /* Put this connection to "pending" state so that it will be
216                  * executed after the inquiry cancel command complete event.
217                  */
218                 conn->state = BT_CONNECT2;
219                 hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
220                 return;
221         }
222
223         conn->state = BT_CONNECT;
224         conn->out = true;
225         conn->role = HCI_ROLE_MASTER;
226
227         conn->attempt++;
228
229         conn->link_policy = hdev->link_policy;
230
231         memset(&cp, 0, sizeof(cp));
232         bacpy(&cp.bdaddr, &conn->dst);
233         cp.pscan_rep_mode = 0x02;
234
235         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
236         if (ie) {
237                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
238                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
239                         cp.pscan_mode     = ie->data.pscan_mode;
240                         cp.clock_offset   = ie->data.clock_offset |
241                                             cpu_to_le16(0x8000);
242                 }
243
244                 memcpy(conn->dev_class, ie->data.dev_class, 3);
245         }
246
247         cp.pkt_type = cpu_to_le16(conn->pkt_type);
248         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
249                 cp.role_switch = 0x01;
250         else
251                 cp.role_switch = 0x00;
252
253         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
254 }
255
256 int hci_disconnect(struct hci_conn *conn, __u8 reason)
257 {
258         BT_DBG("hcon %p", conn);
259
260         /* When we are central of an established connection and it enters
261          * the disconnect timeout, then go ahead and try to read the
262          * current clock offset.  Processing of the result is done
263          * within the event handling and hci_clock_offset_evt function.
264          */
265         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
266             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
267                 struct hci_dev *hdev = conn->hdev;
268                 struct hci_cp_read_clock_offset clkoff_cp;
269
270                 clkoff_cp.handle = cpu_to_le16(conn->handle);
271                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
272                              &clkoff_cp);
273         }
274
275         return hci_abort_conn(conn, reason);
276 }
277
278 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
279 {
280         struct hci_dev *hdev = conn->hdev;
281         struct hci_cp_add_sco cp;
282
283         BT_DBG("hcon %p", conn);
284
285         conn->state = BT_CONNECT;
286         conn->out = true;
287
288         conn->attempt++;
289
290         cp.handle   = cpu_to_le16(handle);
291         cp.pkt_type = cpu_to_le16(conn->pkt_type);
292
293         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
294 }
295
296 static bool find_next_esco_param(struct hci_conn *conn,
297                                  const struct sco_param *esco_param, int size)
298 {
299         for (; conn->attempt <= size; conn->attempt++) {
300                 if (lmp_esco_2m_capable(conn->link) ||
301                     (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
302                         break;
303                 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
304                        conn, conn->attempt);
305         }
306
307         return conn->attempt <= size;
308 }
309
310 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
311 {
312         struct hci_dev *hdev = conn->hdev;
313         struct hci_cp_setup_sync_conn cp;
314         const struct sco_param *param;
315
316         BT_DBG("hcon %p", conn);
317
318         conn->state = BT_CONNECT;
319         conn->out = true;
320
321         conn->attempt++;
322
323         cp.handle   = cpu_to_le16(handle);
324
325         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
326         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
327         cp.voice_setting  = cpu_to_le16(conn->setting);
328
329         switch (conn->setting & SCO_AIRMODE_MASK) {
330         case SCO_AIRMODE_TRANSP:
331                 if (!find_next_esco_param(conn, esco_param_msbc,
332                                           ARRAY_SIZE(esco_param_msbc)))
333                         return false;
334                 param = &esco_param_msbc[conn->attempt - 1];
335                 break;
336         case SCO_AIRMODE_CVSD:
337                 if (lmp_esco_capable(conn->link)) {
338                         if (!find_next_esco_param(conn, esco_param_cvsd,
339                                                   ARRAY_SIZE(esco_param_cvsd)))
340                                 return false;
341                         param = &esco_param_cvsd[conn->attempt - 1];
342                 } else {
343                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
344                                 return false;
345                         param = &sco_param_cvsd[conn->attempt - 1];
346                 }
347                 break;
348         default:
349                 return false;
350         }
351
352         cp.retrans_effort = param->retrans_effort;
353         cp.pkt_type = __cpu_to_le16(param->pkt_type);
354         cp.max_latency = __cpu_to_le16(param->max_latency);
355
356         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
357                 return false;
358
359         return true;
360 }
361
362 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
363                       u16 to_multiplier)
364 {
365         struct hci_dev *hdev = conn->hdev;
366         struct hci_conn_params *params;
367         struct hci_cp_le_conn_update cp;
368
369         hci_dev_lock(hdev);
370
371         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
372         if (params) {
373                 params->conn_min_interval = min;
374                 params->conn_max_interval = max;
375                 params->conn_latency = latency;
376                 params->supervision_timeout = to_multiplier;
377         }
378
379         hci_dev_unlock(hdev);
380
381         memset(&cp, 0, sizeof(cp));
382         cp.handle               = cpu_to_le16(conn->handle);
383         cp.conn_interval_min    = cpu_to_le16(min);
384         cp.conn_interval_max    = cpu_to_le16(max);
385         cp.conn_latency         = cpu_to_le16(latency);
386         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
387         cp.min_ce_len           = cpu_to_le16(0x0000);
388         cp.max_ce_len           = cpu_to_le16(0x0000);
389
390         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
391
392         if (params)
393                 return 0x01;
394
395         return 0x00;
396 }
397
398 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
399                       __u8 ltk[16], __u8 key_size)
400 {
401         struct hci_dev *hdev = conn->hdev;
402         struct hci_cp_le_start_enc cp;
403
404         BT_DBG("hcon %p", conn);
405
406         memset(&cp, 0, sizeof(cp));
407
408         cp.handle = cpu_to_le16(conn->handle);
409         cp.rand = rand;
410         cp.ediv = ediv;
411         memcpy(cp.ltk, ltk, key_size);
412
413         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
414 }
415
416 /* Device _must_ be locked */
417 void hci_sco_setup(struct hci_conn *conn, __u8 status)
418 {
419         struct hci_conn *sco = conn->link;
420
421         if (!sco)
422                 return;
423
424         BT_DBG("hcon %p", conn);
425
426         if (!status) {
427                 if (lmp_esco_capable(conn->hdev))
428                         hci_setup_sync(sco, conn->handle);
429                 else
430                         hci_add_sco(sco, conn->handle);
431         } else {
432                 hci_connect_cfm(sco, status);
433                 hci_conn_del(sco);
434         }
435 }
436
437 static void hci_conn_timeout(struct work_struct *work)
438 {
439         struct hci_conn *conn = container_of(work, struct hci_conn,
440                                              disc_work.work);
441         int refcnt = atomic_read(&conn->refcnt);
442
443         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
444
445         WARN_ON(refcnt < 0);
446
447         /* FIXME: It was observed that in pairing failed scenario, refcnt
448          * drops below 0. Probably this is because l2cap_conn_del calls
449          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
450          * dropped. After that loop hci_chan_del is called which also drops
451          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
452          * otherwise drop it.
453          */
454         if (refcnt > 0)
455                 return;
456
457         /* LE connections in scanning state need special handling */
458         if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
459             test_bit(HCI_CONN_SCANNING, &conn->flags)) {
460                 hci_connect_le_scan_remove(conn);
461                 return;
462         }
463
464         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
465 }
466
467 /* Enter sniff mode */
468 static void hci_conn_idle(struct work_struct *work)
469 {
470         struct hci_conn *conn = container_of(work, struct hci_conn,
471                                              idle_work.work);
472         struct hci_dev *hdev = conn->hdev;
473
474         BT_DBG("hcon %p mode %d", conn, conn->mode);
475
476         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
477                 return;
478
479         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
480                 return;
481
482         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
483                 struct hci_cp_sniff_subrate cp;
484                 cp.handle             = cpu_to_le16(conn->handle);
485                 cp.max_latency        = cpu_to_le16(0);
486                 cp.min_remote_timeout = cpu_to_le16(0);
487                 cp.min_local_timeout  = cpu_to_le16(0);
488                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
489         }
490
491         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
492                 struct hci_cp_sniff_mode cp;
493                 cp.handle       = cpu_to_le16(conn->handle);
494                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
495                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
496                 cp.attempt      = cpu_to_le16(4);
497                 cp.timeout      = cpu_to_le16(1);
498                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
499         }
500 }
501
502 static void hci_conn_auto_accept(struct work_struct *work)
503 {
504         struct hci_conn *conn = container_of(work, struct hci_conn,
505                                              auto_accept_work.work);
506
507         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
508                      &conn->dst);
509 }
510
511 static void le_disable_advertising(struct hci_dev *hdev)
512 {
513         if (ext_adv_capable(hdev)) {
514                 struct hci_cp_le_set_ext_adv_enable cp;
515
516                 cp.enable = 0x00;
517                 cp.num_of_sets = 0x00;
518
519                 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
520                              &cp);
521         } else {
522                 u8 enable = 0x00;
523                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
524                              &enable);
525         }
526 }
527
528 static void le_conn_timeout(struct work_struct *work)
529 {
530         struct hci_conn *conn = container_of(work, struct hci_conn,
531                                              le_conn_timeout.work);
532         struct hci_dev *hdev = conn->hdev;
533
534         BT_DBG("");
535
536         /* We could end up here due to having done directed advertising,
537          * so clean up the state if necessary. This should however only
538          * happen with broken hardware or if low duty cycle was used
539          * (which doesn't have a timeout of its own).
540          */
541         if (conn->role == HCI_ROLE_SLAVE) {
542                 /* Disable LE Advertising */
543                 le_disable_advertising(hdev);
544                 hci_dev_lock(hdev);
545                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
546                 hci_dev_unlock(hdev);
547                 return;
548         }
549
550         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
551 }
552
553 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
554                               u8 role)
555 {
556         struct hci_conn *conn;
557
558         BT_DBG("%s dst %pMR", hdev->name, dst);
559
560         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
561         if (!conn)
562                 return NULL;
563
564         bacpy(&conn->dst, dst);
565         bacpy(&conn->src, &hdev->bdaddr);
566         conn->hdev  = hdev;
567         conn->type  = type;
568         conn->role  = role;
569         conn->mode  = HCI_CM_ACTIVE;
570         conn->state = BT_OPEN;
571         conn->auth_type = HCI_AT_GENERAL_BONDING;
572         conn->io_capability = hdev->io_capability;
573         conn->remote_auth = 0xff;
574         conn->key_type = 0xff;
575         conn->rssi = HCI_RSSI_INVALID;
576         conn->tx_power = HCI_TX_POWER_INVALID;
577         conn->max_tx_power = HCI_TX_POWER_INVALID;
578
579         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
580         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
581
582         /* Set Default Authenticated payload timeout to 30s */
583         conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
584
585         if (conn->role == HCI_ROLE_MASTER)
586                 conn->out = true;
587
588         switch (type) {
589         case ACL_LINK:
590                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
591                 break;
592         case LE_LINK:
593                 /* conn->src should reflect the local identity address */
594                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
595                 break;
596         case SCO_LINK:
597                 if (lmp_esco_capable(hdev))
598                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
599                                         (hdev->esco_type & EDR_ESCO_MASK);
600                 else
601                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
602                 break;
603         case ESCO_LINK:
604                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
605                 break;
606         }
607
608         skb_queue_head_init(&conn->data_q);
609
610         INIT_LIST_HEAD(&conn->chan_list);
611
612         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
613         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
614         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
615         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
616         INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
617
618         atomic_set(&conn->refcnt, 0);
619
620         hci_dev_hold(hdev);
621
622         hci_conn_hash_add(hdev, conn);
623
624         /* The SCO and eSCO connections will only be notified when their
625          * setup has been completed. This is different to ACL links which
626          * can be notified right away.
627          */
628         if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
629                 if (hdev->notify)
630                         hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
631         }
632
633         hci_conn_init_sysfs(conn);
634
635         return conn;
636 }
637
638 int hci_conn_del(struct hci_conn *conn)
639 {
640         struct hci_dev *hdev = conn->hdev;
641
642         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
643
644         cancel_delayed_work_sync(&conn->disc_work);
645         cancel_delayed_work_sync(&conn->auto_accept_work);
646         cancel_delayed_work_sync(&conn->idle_work);
647
648         if (conn->type == ACL_LINK) {
649                 struct hci_conn *sco = conn->link;
650                 if (sco)
651                         sco->link = NULL;
652
653                 /* Unacked frames */
654                 hdev->acl_cnt += conn->sent;
655         } else if (conn->type == LE_LINK) {
656                 cancel_delayed_work(&conn->le_conn_timeout);
657
658                 if (hdev->le_pkts)
659                         hdev->le_cnt += conn->sent;
660                 else
661                         hdev->acl_cnt += conn->sent;
662         } else {
663                 struct hci_conn *acl = conn->link;
664                 if (acl) {
665                         acl->link = NULL;
666                         hci_conn_drop(acl);
667                 }
668         }
669
670         if (conn->amp_mgr)
671                 amp_mgr_put(conn->amp_mgr);
672
673         skb_queue_purge(&conn->data_q);
674
675         /* Remove the connection from the list and cleanup its remaining
676          * state. This is a separate function since for some cases like
677          * BT_CONNECT_SCAN we *only* want the cleanup part without the
678          * rest of hci_conn_del.
679          */
680         hci_conn_cleanup(conn);
681
682         return 0;
683 }
684
685 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
686 {
687         int use_src = bacmp(src, BDADDR_ANY);
688         struct hci_dev *hdev = NULL, *d;
689
690         BT_DBG("%pMR -> %pMR", src, dst);
691
692         read_lock(&hci_dev_list_lock);
693
694         list_for_each_entry(d, &hci_dev_list, list) {
695                 if (!test_bit(HCI_UP, &d->flags) ||
696                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
697                     d->dev_type != HCI_PRIMARY)
698                         continue;
699
700                 /* Simple routing:
701                  *   No source address - find interface with bdaddr != dst
702                  *   Source address    - find interface with bdaddr == src
703                  */
704
705                 if (use_src) {
706                         bdaddr_t id_addr;
707                         u8 id_addr_type;
708
709                         if (src_type == BDADDR_BREDR) {
710                                 if (!lmp_bredr_capable(d))
711                                         continue;
712                                 bacpy(&id_addr, &d->bdaddr);
713                                 id_addr_type = BDADDR_BREDR;
714                         } else {
715                                 if (!lmp_le_capable(d))
716                                         continue;
717
718                                 hci_copy_identity_address(d, &id_addr,
719                                                           &id_addr_type);
720
721                                 /* Convert from HCI to three-value type */
722                                 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
723                                         id_addr_type = BDADDR_LE_PUBLIC;
724                                 else
725                                         id_addr_type = BDADDR_LE_RANDOM;
726                         }
727
728                         if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
729                                 hdev = d; break;
730                         }
731                 } else {
732                         if (bacmp(&d->bdaddr, dst)) {
733                                 hdev = d; break;
734                         }
735                 }
736         }
737
738         if (hdev)
739                 hdev = hci_dev_hold(hdev);
740
741         read_unlock(&hci_dev_list_lock);
742         return hdev;
743 }
744 EXPORT_SYMBOL(hci_get_route);
745
746 /* This function requires the caller holds hdev->lock */
747 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
748 {
749         struct hci_dev *hdev = conn->hdev;
750         struct hci_conn_params *params;
751
752         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
753                                            conn->dst_type);
754         if (params && params->conn) {
755                 hci_conn_drop(params->conn);
756                 hci_conn_put(params->conn);
757                 params->conn = NULL;
758         }
759
760         conn->state = BT_CLOSED;
761
762         /* If the status indicates successful cancellation of
763          * the attempt (i.e. Unknown Connection Id) there's no point of
764          * notifying failure since we'll go back to keep trying to
765          * connect. The only exception is explicit connect requests
766          * where a timeout + cancel does indicate an actual failure.
767          */
768         if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
769             (params && params->explicit_connect))
770                 mgmt_connect_failed(hdev, &conn->dst, conn->type,
771                                     conn->dst_type, status);
772
773         hci_connect_cfm(conn, status);
774
775         hci_conn_del(conn);
776
777         /* The suspend notifier is waiting for all devices to disconnect and an
778          * LE connect cancel will result in an hci_le_conn_failed. Once the last
779          * connection is deleted, we should also wake the suspend queue to
780          * complete suspend operations.
781          */
782         if (list_empty(&hdev->conn_hash.list) &&
783             test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
784                 wake_up(&hdev->suspend_wait_q);
785         }
786
787         /* Since we may have temporarily stopped the background scanning in
788          * favor of connection establishment, we should restart it.
789          */
790         hci_update_background_scan(hdev);
791
792         /* Re-enable advertising in case this was a failed connection
793          * attempt as a peripheral.
794          */
795         hci_req_reenable_advertising(hdev);
796 }
797
798 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
799 {
800         struct hci_conn *conn;
801
802         hci_dev_lock(hdev);
803
804         conn = hci_lookup_le_connect(hdev);
805
806         if (hdev->adv_instance_cnt)
807                 hci_req_resume_adv_instances(hdev);
808
809         if (!status) {
810                 hci_connect_le_scan_cleanup(conn);
811                 goto done;
812         }
813
814         bt_dev_err(hdev, "request failed to create LE connection: "
815                    "status 0x%2.2x", status);
816
817         if (!conn)
818                 goto done;
819
820         hci_le_conn_failed(conn, status);
821
822 done:
823         hci_dev_unlock(hdev);
824 }
825
826 static bool conn_use_rpa(struct hci_conn *conn)
827 {
828         struct hci_dev *hdev = conn->hdev;
829
830         return hci_dev_test_flag(hdev, HCI_PRIVACY);
831 }
832
833 static void set_ext_conn_params(struct hci_conn *conn,
834                                 struct hci_cp_le_ext_conn_param *p)
835 {
836         struct hci_dev *hdev = conn->hdev;
837
838         memset(p, 0, sizeof(*p));
839
840         p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
841         p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
842         p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
843         p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
844         p->conn_latency = cpu_to_le16(conn->le_conn_latency);
845         p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
846         p->min_ce_len = cpu_to_le16(0x0000);
847         p->max_ce_len = cpu_to_le16(0x0000);
848 }
849
850 static void hci_req_add_le_create_conn(struct hci_request *req,
851                                        struct hci_conn *conn,
852                                        bdaddr_t *direct_rpa)
853 {
854         struct hci_dev *hdev = conn->hdev;
855         u8 own_addr_type;
856
857         /* If direct address was provided we use it instead of current
858          * address.
859          */
860         if (direct_rpa) {
861                 if (bacmp(&req->hdev->random_addr, direct_rpa))
862                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
863                                                                 direct_rpa);
864
865                 /* direct address is always RPA */
866                 own_addr_type = ADDR_LE_DEV_RANDOM;
867         } else {
868                 /* Update random address, but set require_privacy to false so
869                  * that we never connect with an non-resolvable address.
870                  */
871                 if (hci_update_random_address(req, false, conn_use_rpa(conn),
872                                               &own_addr_type))
873                         return;
874         }
875
876         if (use_ext_conn(hdev)) {
877                 struct hci_cp_le_ext_create_conn *cp;
878                 struct hci_cp_le_ext_conn_param *p;
879                 u8 data[sizeof(*cp) + sizeof(*p) * 3];
880                 u32 plen;
881
882                 cp = (void *) data;
883                 p = (void *) cp->data;
884
885                 memset(cp, 0, sizeof(*cp));
886
887                 bacpy(&cp->peer_addr, &conn->dst);
888                 cp->peer_addr_type = conn->dst_type;
889                 cp->own_addr_type = own_addr_type;
890
891                 plen = sizeof(*cp);
892
893                 if (scan_1m(hdev)) {
894                         cp->phys |= LE_SCAN_PHY_1M;
895                         set_ext_conn_params(conn, p);
896
897                         p++;
898                         plen += sizeof(*p);
899                 }
900
901                 if (scan_2m(hdev)) {
902                         cp->phys |= LE_SCAN_PHY_2M;
903                         set_ext_conn_params(conn, p);
904
905                         p++;
906                         plen += sizeof(*p);
907                 }
908
909                 if (scan_coded(hdev)) {
910                         cp->phys |= LE_SCAN_PHY_CODED;
911                         set_ext_conn_params(conn, p);
912
913                         plen += sizeof(*p);
914                 }
915
916                 hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
917
918         } else {
919                 struct hci_cp_le_create_conn cp;
920
921                 memset(&cp, 0, sizeof(cp));
922
923                 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
924                 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
925
926                 bacpy(&cp.peer_addr, &conn->dst);
927                 cp.peer_addr_type = conn->dst_type;
928                 cp.own_address_type = own_addr_type;
929                 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
930                 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
931                 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
932                 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
933                 cp.min_ce_len = cpu_to_le16(0x0000);
934                 cp.max_ce_len = cpu_to_le16(0x0000);
935
936                 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
937         }
938
939         conn->state = BT_CONNECT;
940         clear_bit(HCI_CONN_SCANNING, &conn->flags);
941 }
942
943 static void hci_req_directed_advertising(struct hci_request *req,
944                                          struct hci_conn *conn)
945 {
946         struct hci_dev *hdev = req->hdev;
947         u8 own_addr_type;
948         u8 enable;
949
950         if (ext_adv_capable(hdev)) {
951                 struct hci_cp_le_set_ext_adv_params cp;
952                 bdaddr_t random_addr;
953
954                 /* Set require_privacy to false so that the remote device has a
955                  * chance of identifying us.
956                  */
957                 if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
958                                            &own_addr_type, &random_addr) < 0)
959                         return;
960
961                 memset(&cp, 0, sizeof(cp));
962
963                 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
964                 cp.own_addr_type = own_addr_type;
965                 cp.channel_map = hdev->le_adv_channel_map;
966                 cp.tx_power = HCI_TX_POWER_INVALID;
967                 cp.primary_phy = HCI_ADV_PHY_1M;
968                 cp.secondary_phy = HCI_ADV_PHY_1M;
969                 cp.handle = 0; /* Use instance 0 for directed adv */
970                 cp.own_addr_type = own_addr_type;
971                 cp.peer_addr_type = conn->dst_type;
972                 bacpy(&cp.peer_addr, &conn->dst);
973
974                 /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
975                  * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
976                  * does not supports advertising data when the advertising set already
977                  * contains some, the controller shall return erroc code 'Invalid
978                  * HCI Command Parameters(0x12).
979                  * So it is required to remove adv set for handle 0x00. since we use
980                  * instance 0 for directed adv.
981                  */
982                 __hci_req_remove_ext_adv_instance(req, cp.handle);
983
984                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
985
986                 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
987                     bacmp(&random_addr, BDADDR_ANY) &&
988                     bacmp(&random_addr, &hdev->random_addr)) {
989                         struct hci_cp_le_set_adv_set_rand_addr cp;
990
991                         memset(&cp, 0, sizeof(cp));
992
993                         cp.handle = 0;
994                         bacpy(&cp.bdaddr, &random_addr);
995
996                         hci_req_add(req,
997                                     HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
998                                     sizeof(cp), &cp);
999                 }
1000
1001                 __hci_req_enable_ext_advertising(req, 0x00);
1002         } else {
1003                 struct hci_cp_le_set_adv_param cp;
1004
1005                 /* Clear the HCI_LE_ADV bit temporarily so that the
1006                  * hci_update_random_address knows that it's safe to go ahead
1007                  * and write a new random address. The flag will be set back on
1008                  * as soon as the SET_ADV_ENABLE HCI command completes.
1009                  */
1010                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1011
1012                 /* Set require_privacy to false so that the remote device has a
1013                  * chance of identifying us.
1014                  */
1015                 if (hci_update_random_address(req, false, conn_use_rpa(conn),
1016                                               &own_addr_type) < 0)
1017                         return;
1018
1019                 memset(&cp, 0, sizeof(cp));
1020
1021                 /* Some controllers might reject command if intervals are not
1022                  * within range for undirected advertising.
1023                  * BCM20702A0 is known to be affected by this.
1024                  */
1025                 cp.min_interval = cpu_to_le16(0x0020);
1026                 cp.max_interval = cpu_to_le16(0x0020);
1027
1028                 cp.type = LE_ADV_DIRECT_IND;
1029                 cp.own_address_type = own_addr_type;
1030                 cp.direct_addr_type = conn->dst_type;
1031                 bacpy(&cp.direct_addr, &conn->dst);
1032                 cp.channel_map = hdev->le_adv_channel_map;
1033
1034                 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1035
1036                 enable = 0x01;
1037                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
1038                             &enable);
1039         }
1040
1041         conn->state = BT_CONNECT;
1042 }
1043
1044 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1045                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
1046                                 u8 role, bdaddr_t *direct_rpa)
1047 {
1048         struct hci_conn_params *params;
1049         struct hci_conn *conn;
1050         struct smp_irk *irk;
1051         struct hci_request req;
1052         int err;
1053
1054         /* This ensures that during disable le_scan address resolution
1055          * will not be disabled if it is followed by le_create_conn
1056          */
1057         bool rpa_le_conn = true;
1058
1059         /* Let's make sure that le is enabled.*/
1060         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1061                 if (lmp_le_capable(hdev))
1062                         return ERR_PTR(-ECONNREFUSED);
1063
1064                 return ERR_PTR(-EOPNOTSUPP);
1065         }
1066
1067         /* Since the controller supports only one LE connection attempt at a
1068          * time, we return -EBUSY if there is any connection attempt running.
1069          */
1070         if (hci_lookup_le_connect(hdev))
1071                 return ERR_PTR(-EBUSY);
1072
1073         /* If there's already a connection object but it's not in
1074          * scanning state it means it must already be established, in
1075          * which case we can't do anything else except report a failure
1076          * to connect.
1077          */
1078         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1079         if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1080                 return ERR_PTR(-EBUSY);
1081         }
1082
1083         /* When given an identity address with existing identity
1084          * resolving key, the connection needs to be established
1085          * to a resolvable random address.
1086          *
1087          * Storing the resolvable random address is required here
1088          * to handle connection failures. The address will later
1089          * be resolved back into the original identity address
1090          * from the connect request.
1091          */
1092         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1093         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1094                 dst = &irk->rpa;
1095                 dst_type = ADDR_LE_DEV_RANDOM;
1096         }
1097
1098         if (conn) {
1099                 bacpy(&conn->dst, dst);
1100         } else {
1101                 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1102                 if (!conn)
1103                         return ERR_PTR(-ENOMEM);
1104                 hci_conn_hold(conn);
1105                 conn->pending_sec_level = sec_level;
1106         }
1107
1108         conn->dst_type = dst_type;
1109         conn->sec_level = BT_SECURITY_LOW;
1110         conn->conn_timeout = conn_timeout;
1111
1112         hci_req_init(&req, hdev);
1113
1114         /* Disable advertising if we're active. For central role
1115          * connections most controllers will refuse to connect if
1116          * advertising is enabled, and for peripheral role connections we
1117          * anyway have to disable it in order to start directed
1118          * advertising. Any registered advertisements will be
1119          * re-enabled after the connection attempt is finished.
1120          */
1121         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1122                 __hci_req_pause_adv_instances(&req);
1123
1124         /* If requested to connect as peripheral use directed advertising */
1125         if (conn->role == HCI_ROLE_SLAVE) {
1126                 /* If we're active scanning most controllers are unable
1127                  * to initiate advertising. Simply reject the attempt.
1128                  */
1129                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
1130                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
1131                         hci_req_purge(&req);
1132                         hci_conn_del(conn);
1133                         return ERR_PTR(-EBUSY);
1134                 }
1135
1136                 hci_req_directed_advertising(&req, conn);
1137                 goto create_conn;
1138         }
1139
1140         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1141         if (params) {
1142                 conn->le_conn_min_interval = params->conn_min_interval;
1143                 conn->le_conn_max_interval = params->conn_max_interval;
1144                 conn->le_conn_latency = params->conn_latency;
1145                 conn->le_supv_timeout = params->supervision_timeout;
1146         } else {
1147                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
1148                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
1149                 conn->le_conn_latency = hdev->le_conn_latency;
1150                 conn->le_supv_timeout = hdev->le_supv_timeout;
1151         }
1152
1153         /* If controller is scanning, we stop it since some controllers are
1154          * not able to scan and connect at the same time. Also set the
1155          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
1156          * handler for scan disabling knows to set the correct discovery
1157          * state.
1158          */
1159         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1160                 hci_req_add_le_scan_disable(&req, rpa_le_conn);
1161                 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
1162         }
1163
1164         hci_req_add_le_create_conn(&req, conn, direct_rpa);
1165
1166 create_conn:
1167         err = hci_req_run(&req, create_le_conn_complete);
1168         if (err) {
1169                 hci_conn_del(conn);
1170
1171                 if (hdev->adv_instance_cnt)
1172                         hci_req_resume_adv_instances(hdev);
1173
1174                 return ERR_PTR(err);
1175         }
1176
1177         return conn;
1178 }
1179
1180 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1181 {
1182         struct hci_conn *conn;
1183
1184         conn = hci_conn_hash_lookup_le(hdev, addr, type);
1185         if (!conn)
1186                 return false;
1187
1188         if (conn->state != BT_CONNECTED)
1189                 return false;
1190
1191         return true;
1192 }
1193
1194 /* This function requires the caller holds hdev->lock */
1195 static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1196                                         bdaddr_t *addr, u8 addr_type)
1197 {
1198         struct hci_conn_params *params;
1199
1200         if (is_connected(hdev, addr, addr_type))
1201                 return -EISCONN;
1202
1203         params = hci_conn_params_lookup(hdev, addr, addr_type);
1204         if (!params) {
1205                 params = hci_conn_params_add(hdev, addr, addr_type);
1206                 if (!params)
1207                         return -ENOMEM;
1208
1209                 /* If we created new params, mark them to be deleted in
1210                  * hci_connect_le_scan_cleanup. It's different case than
1211                  * existing disabled params, those will stay after cleanup.
1212                  */
1213                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1214         }
1215
1216         /* We're trying to connect, so make sure params are at pend_le_conns */
1217         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1218             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1219             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1220                 list_del_init(&params->action);
1221                 list_add(&params->action, &hdev->pend_le_conns);
1222         }
1223
1224         params->explicit_connect = true;
1225
1226         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1227                params->auto_connect);
1228
1229         return 0;
1230 }
1231
1232 /* This function requires the caller holds hdev->lock */
1233 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1234                                      u8 dst_type, u8 sec_level,
1235                                      u16 conn_timeout,
1236                                      enum conn_reasons conn_reason)
1237 {
1238         struct hci_conn *conn;
1239
1240         /* Let's make sure that le is enabled.*/
1241         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1242                 if (lmp_le_capable(hdev))
1243                         return ERR_PTR(-ECONNREFUSED);
1244
1245                 return ERR_PTR(-EOPNOTSUPP);
1246         }
1247
1248         /* Some devices send ATT messages as soon as the physical link is
1249          * established. To be able to handle these ATT messages, the user-
1250          * space first establishes the connection and then starts the pairing
1251          * process.
1252          *
1253          * So if a hci_conn object already exists for the following connection
1254          * attempt, we simply update pending_sec_level and auth_type fields
1255          * and return the object found.
1256          */
1257         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1258         if (conn) {
1259                 if (conn->pending_sec_level < sec_level)
1260                         conn->pending_sec_level = sec_level;
1261                 goto done;
1262         }
1263
1264         BT_DBG("requesting refresh of dst_addr");
1265
1266         conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1267         if (!conn)
1268                 return ERR_PTR(-ENOMEM);
1269
1270         if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1271                 hci_conn_del(conn);
1272                 return ERR_PTR(-EBUSY);
1273         }
1274
1275         conn->state = BT_CONNECT;
1276         set_bit(HCI_CONN_SCANNING, &conn->flags);
1277         conn->dst_type = dst_type;
1278         conn->sec_level = BT_SECURITY_LOW;
1279         conn->pending_sec_level = sec_level;
1280         conn->conn_timeout = conn_timeout;
1281         conn->conn_reason = conn_reason;
1282
1283         hci_update_background_scan(hdev);
1284
1285 done:
1286         hci_conn_hold(conn);
1287         return conn;
1288 }
1289
1290 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1291                                  u8 sec_level, u8 auth_type,
1292                                  enum conn_reasons conn_reason)
1293 {
1294         struct hci_conn *acl;
1295
1296         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1297                 if (lmp_bredr_capable(hdev))
1298                         return ERR_PTR(-ECONNREFUSED);
1299
1300                 return ERR_PTR(-EOPNOTSUPP);
1301         }
1302
1303         /* Reject outgoing connection to device with same BD ADDR against
1304          * CVE-2020-26555
1305          */
1306         if (!bacmp(&hdev->bdaddr, dst)) {
1307                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1308                            dst);
1309                 return ERR_PTR(-ECONNREFUSED);
1310         }
1311
1312         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1313         if (!acl) {
1314                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1315                 if (!acl)
1316                         return ERR_PTR(-ENOMEM);
1317         }
1318
1319         hci_conn_hold(acl);
1320
1321         acl->conn_reason = conn_reason;
1322         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1323                 acl->sec_level = BT_SECURITY_LOW;
1324                 acl->pending_sec_level = sec_level;
1325                 acl->auth_type = auth_type;
1326                 hci_acl_create_connection(acl);
1327         }
1328
1329         return acl;
1330 }
1331
1332 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1333                                  __u16 setting)
1334 {
1335         struct hci_conn *acl;
1336         struct hci_conn *sco;
1337
1338         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1339                               CONN_REASON_SCO_CONNECT);
1340         if (IS_ERR(acl))
1341                 return acl;
1342
1343         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1344         if (!sco) {
1345                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1346                 if (!sco) {
1347                         hci_conn_drop(acl);
1348                         return ERR_PTR(-ENOMEM);
1349                 }
1350         }
1351
1352         acl->link = sco;
1353         sco->link = acl;
1354
1355         hci_conn_hold(sco);
1356
1357         sco->setting = setting;
1358
1359         if (acl->state == BT_CONNECTED &&
1360             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1361                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1362                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1363
1364                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1365                         /* defer SCO setup until mode change completed */
1366                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1367                         return sco;
1368                 }
1369
1370                 hci_sco_setup(acl, 0x00);
1371         }
1372
1373         return sco;
1374 }
1375
1376 /* Check link security requirement */
1377 int hci_conn_check_link_mode(struct hci_conn *conn)
1378 {
1379         BT_DBG("hcon %p", conn);
1380
1381         /* In Secure Connections Only mode, it is required that Secure
1382          * Connections is used and the link is encrypted with AES-CCM
1383          * using a P-256 authenticated combination key.
1384          */
1385         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1386                 if (!hci_conn_sc_enabled(conn) ||
1387                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1388                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1389                         return 0;
1390         }
1391
1392          /* AES encryption is required for Level 4:
1393           *
1394           * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
1395           * page 1319:
1396           *
1397           * 128-bit equivalent strength for link and encryption keys
1398           * required using FIPS approved algorithms (E0 not allowed,
1399           * SAFER+ not allowed, and P-192 not allowed; encryption key
1400           * not shortened)
1401           */
1402         if (conn->sec_level == BT_SECURITY_FIPS &&
1403             !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
1404                 bt_dev_err(conn->hdev,
1405                            "Invalid security: Missing AES-CCM usage");
1406                 return 0;
1407         }
1408
1409         if (hci_conn_ssp_enabled(conn) &&
1410             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1411                 return 0;
1412
1413         return 1;
1414 }
1415
1416 /* Authenticate remote device */
1417 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1418 {
1419         BT_DBG("hcon %p", conn);
1420
1421         if (conn->pending_sec_level > sec_level)
1422                 sec_level = conn->pending_sec_level;
1423
1424         if (sec_level > conn->sec_level)
1425                 conn->pending_sec_level = sec_level;
1426         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1427                 return 1;
1428
1429         /* Make sure we preserve an existing MITM requirement*/
1430         auth_type |= (conn->auth_type & 0x01);
1431
1432         conn->auth_type = auth_type;
1433
1434         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1435                 struct hci_cp_auth_requested cp;
1436
1437                 cp.handle = cpu_to_le16(conn->handle);
1438                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1439                              sizeof(cp), &cp);
1440
1441                 /* If we're already encrypted set the REAUTH_PEND flag,
1442                  * otherwise set the ENCRYPT_PEND.
1443                  */
1444                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1445                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1446                 else
1447                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1448         }
1449
1450         return 0;
1451 }
1452
1453 /* Encrypt the link */
1454 static void hci_conn_encrypt(struct hci_conn *conn)
1455 {
1456         BT_DBG("hcon %p", conn);
1457
1458         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1459                 struct hci_cp_set_conn_encrypt cp;
1460                 cp.handle  = cpu_to_le16(conn->handle);
1461                 cp.encrypt = 0x01;
1462                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1463                              &cp);
1464         }
1465 }
1466
1467 /* Enable security */
1468 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1469                       bool initiator)
1470 {
1471         BT_DBG("hcon %p", conn);
1472
1473         if (conn->type == LE_LINK)
1474                 return smp_conn_security(conn, sec_level);
1475
1476         /* For sdp we don't need the link key. */
1477         if (sec_level == BT_SECURITY_SDP)
1478                 return 1;
1479
1480         /* For non 2.1 devices and low security level we don't need the link
1481            key. */
1482         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1483                 return 1;
1484
1485         /* For other security levels we need the link key. */
1486         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1487                 goto auth;
1488
1489         switch (conn->key_type) {
1490         case HCI_LK_AUTH_COMBINATION_P256:
1491                 /* An authenticated FIPS approved combination key has
1492                  * sufficient security for security level 4 or lower.
1493                  */
1494                 if (sec_level <= BT_SECURITY_FIPS)
1495                         goto encrypt;
1496                 break;
1497         case HCI_LK_AUTH_COMBINATION_P192:
1498                 /* An authenticated combination key has sufficient security for
1499                  * security level 3 or lower.
1500                  */
1501                 if (sec_level <= BT_SECURITY_HIGH)
1502                         goto encrypt;
1503                 break;
1504         case HCI_LK_UNAUTH_COMBINATION_P192:
1505         case HCI_LK_UNAUTH_COMBINATION_P256:
1506                 /* An unauthenticated combination key has sufficient security
1507                  * for security level 2 or lower.
1508                  */
1509                 if (sec_level <= BT_SECURITY_MEDIUM)
1510                         goto encrypt;
1511                 break;
1512         case HCI_LK_COMBINATION:
1513                 /* A combination key has always sufficient security for the
1514                  * security levels 2 or lower. High security level requires the
1515                  * combination key is generated using maximum PIN code length
1516                  * (16). For pre 2.1 units.
1517                  */
1518                 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
1519                         goto encrypt;
1520                 break;
1521         default:
1522                 break;
1523         }
1524
1525 auth:
1526         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1527                 return 0;
1528
1529         if (initiator)
1530                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1531
1532         if (!hci_conn_auth(conn, sec_level, auth_type))
1533                 return 0;
1534
1535 encrypt:
1536         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
1537                 /* Ensure that the encryption key size has been read,
1538                  * otherwise stall the upper layer responses.
1539                  */
1540                 if (!conn->enc_key_size)
1541                         return 0;
1542
1543                 /* Nothing else needed, all requirements are met */
1544                 return 1;
1545         }
1546
1547         hci_conn_encrypt(conn);
1548         return 0;
1549 }
1550 EXPORT_SYMBOL(hci_conn_security);
1551
1552 /* Check secure link requirement */
1553 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1554 {
1555         BT_DBG("hcon %p", conn);
1556
1557         /* Accept if non-secure or higher security level is required */
1558         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1559                 return 1;
1560
1561         /* Accept if secure or higher security level is already present */
1562         if (conn->sec_level == BT_SECURITY_HIGH ||
1563             conn->sec_level == BT_SECURITY_FIPS)
1564                 return 1;
1565
1566         /* Reject not secure link */
1567         return 0;
1568 }
1569 EXPORT_SYMBOL(hci_conn_check_secure);
1570
1571 /* Switch role */
1572 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1573 {
1574         BT_DBG("hcon %p", conn);
1575
1576         if (role == conn->role)
1577                 return 1;
1578
1579         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1580                 struct hci_cp_switch_role cp;
1581                 bacpy(&cp.bdaddr, &conn->dst);
1582                 cp.role = role;
1583                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1584         }
1585
1586         return 0;
1587 }
1588 EXPORT_SYMBOL(hci_conn_switch_role);
1589
1590 /* Enter active mode */
1591 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1592 {
1593         struct hci_dev *hdev = conn->hdev;
1594
1595         BT_DBG("hcon %p mode %d", conn, conn->mode);
1596
1597         if (conn->mode != HCI_CM_SNIFF)
1598                 goto timer;
1599
1600         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1601                 goto timer;
1602
1603         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1604                 struct hci_cp_exit_sniff_mode cp;
1605                 cp.handle = cpu_to_le16(conn->handle);
1606                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1607         }
1608
1609 timer:
1610         if (hdev->idle_timeout > 0)
1611                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1612                                    msecs_to_jiffies(hdev->idle_timeout));
1613 }
1614
1615 /* Drop all connection on the device */
1616 void hci_conn_hash_flush(struct hci_dev *hdev)
1617 {
1618         struct hci_conn_hash *h = &hdev->conn_hash;
1619         struct hci_conn *c, *n;
1620
1621         BT_DBG("hdev %s", hdev->name);
1622
1623         list_for_each_entry_safe(c, n, &h->list, list) {
1624                 c->state = BT_CLOSED;
1625
1626                 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1627                 hci_conn_del(c);
1628         }
1629 }
1630
1631 /* Check pending connect attempts */
1632 void hci_conn_check_pending(struct hci_dev *hdev)
1633 {
1634         struct hci_conn *conn;
1635
1636         BT_DBG("hdev %s", hdev->name);
1637
1638         hci_dev_lock(hdev);
1639
1640         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1641         if (conn)
1642                 hci_acl_create_connection(conn);
1643
1644         hci_dev_unlock(hdev);
1645 }
1646
1647 static u32 get_link_mode(struct hci_conn *conn)
1648 {
1649         u32 link_mode = 0;
1650
1651         if (conn->role == HCI_ROLE_MASTER)
1652                 link_mode |= HCI_LM_MASTER;
1653
1654         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1655                 link_mode |= HCI_LM_ENCRYPT;
1656
1657         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1658                 link_mode |= HCI_LM_AUTH;
1659
1660         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1661                 link_mode |= HCI_LM_SECURE;
1662
1663         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1664                 link_mode |= HCI_LM_FIPS;
1665
1666         return link_mode;
1667 }
1668
1669 int hci_get_conn_list(void __user *arg)
1670 {
1671         struct hci_conn *c;
1672         struct hci_conn_list_req req, *cl;
1673         struct hci_conn_info *ci;
1674         struct hci_dev *hdev;
1675         int n = 0, size, err;
1676
1677         if (copy_from_user(&req, arg, sizeof(req)))
1678                 return -EFAULT;
1679
1680         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1681                 return -EINVAL;
1682
1683         size = sizeof(req) + req.conn_num * sizeof(*ci);
1684
1685         cl = kmalloc(size, GFP_KERNEL);
1686         if (!cl)
1687                 return -ENOMEM;
1688
1689         hdev = hci_dev_get(req.dev_id);
1690         if (!hdev) {
1691                 kfree(cl);
1692                 return -ENODEV;
1693         }
1694
1695         ci = cl->conn_info;
1696
1697         hci_dev_lock(hdev);
1698         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1699                 bacpy(&(ci + n)->bdaddr, &c->dst);
1700                 (ci + n)->handle = c->handle;
1701                 (ci + n)->type  = c->type;
1702                 (ci + n)->out   = c->out;
1703                 (ci + n)->state = c->state;
1704                 (ci + n)->link_mode = get_link_mode(c);
1705                 if (++n >= req.conn_num)
1706                         break;
1707         }
1708         hci_dev_unlock(hdev);
1709
1710         cl->dev_id = hdev->id;
1711         cl->conn_num = n;
1712         size = sizeof(req) + n * sizeof(*ci);
1713
1714         hci_dev_put(hdev);
1715
1716         err = copy_to_user(arg, cl, size);
1717         kfree(cl);
1718
1719         return err ? -EFAULT : 0;
1720 }
1721
1722 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1723 {
1724         struct hci_conn_info_req req;
1725         struct hci_conn_info ci;
1726         struct hci_conn *conn;
1727         char __user *ptr = arg + sizeof(req);
1728
1729         if (copy_from_user(&req, arg, sizeof(req)))
1730                 return -EFAULT;
1731
1732         hci_dev_lock(hdev);
1733         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1734         if (conn) {
1735                 bacpy(&ci.bdaddr, &conn->dst);
1736                 ci.handle = conn->handle;
1737                 ci.type  = conn->type;
1738                 ci.out   = conn->out;
1739                 ci.state = conn->state;
1740                 ci.link_mode = get_link_mode(conn);
1741         }
1742         hci_dev_unlock(hdev);
1743
1744         if (!conn)
1745                 return -ENOENT;
1746
1747         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1748 }
1749
1750 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1751 {
1752         struct hci_auth_info_req req;
1753         struct hci_conn *conn;
1754
1755         if (copy_from_user(&req, arg, sizeof(req)))
1756                 return -EFAULT;
1757
1758         hci_dev_lock(hdev);
1759         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1760         if (conn)
1761                 req.type = conn->auth_type;
1762         hci_dev_unlock(hdev);
1763
1764         if (!conn)
1765                 return -ENOENT;
1766
1767         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1768 }
1769
1770 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1771 {
1772         struct hci_dev *hdev = conn->hdev;
1773         struct hci_chan *chan;
1774
1775         BT_DBG("%s hcon %p", hdev->name, conn);
1776
1777         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1778                 BT_DBG("Refusing to create new hci_chan");
1779                 return NULL;
1780         }
1781
1782         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1783         if (!chan)
1784                 return NULL;
1785
1786         chan->conn = hci_conn_get(conn);
1787         skb_queue_head_init(&chan->data_q);
1788         chan->state = BT_CONNECTED;
1789
1790         list_add_rcu(&chan->list, &conn->chan_list);
1791
1792         return chan;
1793 }
1794
1795 void hci_chan_del(struct hci_chan *chan)
1796 {
1797         struct hci_conn *conn = chan->conn;
1798         struct hci_dev *hdev = conn->hdev;
1799
1800         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1801
1802         list_del_rcu(&chan->list);
1803
1804         synchronize_rcu();
1805
1806         /* Prevent new hci_chan's to be created for this hci_conn */
1807         set_bit(HCI_CONN_DROP, &conn->flags);
1808
1809         hci_conn_put(conn);
1810
1811         skb_queue_purge(&chan->data_q);
1812         kfree(chan);
1813 }
1814
1815 void hci_chan_list_flush(struct hci_conn *conn)
1816 {
1817         struct hci_chan *chan, *n;
1818
1819         BT_DBG("hcon %p", conn);
1820
1821         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1822                 hci_chan_del(chan);
1823 }
1824
1825 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1826                                                  __u16 handle)
1827 {
1828         struct hci_chan *hchan;
1829
1830         list_for_each_entry(hchan, &hcon->chan_list, list) {
1831                 if (hchan->handle == handle)
1832                         return hchan;
1833         }
1834
1835         return NULL;
1836 }
1837
1838 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1839 {
1840         struct hci_conn_hash *h = &hdev->conn_hash;
1841         struct hci_conn *hcon;
1842         struct hci_chan *hchan = NULL;
1843
1844         rcu_read_lock();
1845
1846         list_for_each_entry_rcu(hcon, &h->list, list) {
1847                 hchan = __hci_chan_lookup_handle(hcon, handle);
1848                 if (hchan)
1849                         break;
1850         }
1851
1852         rcu_read_unlock();
1853
1854         return hchan;
1855 }
1856
1857 u32 hci_conn_get_phy(struct hci_conn *conn)
1858 {
1859         u32 phys = 0;
1860
1861         /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
1862          * Table 6.2: Packets defined for synchronous, asynchronous, and
1863          * CPB logical transport types.
1864          */
1865         switch (conn->type) {
1866         case SCO_LINK:
1867                 /* SCO logical transport (1 Mb/s):
1868                  * HV1, HV2, HV3 and DV.
1869                  */
1870                 phys |= BT_PHY_BR_1M_1SLOT;
1871
1872                 break;
1873
1874         case ACL_LINK:
1875                 /* ACL logical transport (1 Mb/s) ptt=0:
1876                  * DH1, DM3, DH3, DM5 and DH5.
1877                  */
1878                 phys |= BT_PHY_BR_1M_1SLOT;
1879
1880                 if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
1881                         phys |= BT_PHY_BR_1M_3SLOT;
1882
1883                 if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
1884                         phys |= BT_PHY_BR_1M_5SLOT;
1885
1886                 /* ACL logical transport (2 Mb/s) ptt=1:
1887                  * 2-DH1, 2-DH3 and 2-DH5.
1888                  */
1889                 if (!(conn->pkt_type & HCI_2DH1))
1890                         phys |= BT_PHY_EDR_2M_1SLOT;
1891
1892                 if (!(conn->pkt_type & HCI_2DH3))
1893                         phys |= BT_PHY_EDR_2M_3SLOT;
1894
1895                 if (!(conn->pkt_type & HCI_2DH5))
1896                         phys |= BT_PHY_EDR_2M_5SLOT;
1897
1898                 /* ACL logical transport (3 Mb/s) ptt=1:
1899                  * 3-DH1, 3-DH3 and 3-DH5.
1900                  */
1901                 if (!(conn->pkt_type & HCI_3DH1))
1902                         phys |= BT_PHY_EDR_3M_1SLOT;
1903
1904                 if (!(conn->pkt_type & HCI_3DH3))
1905                         phys |= BT_PHY_EDR_3M_3SLOT;
1906
1907                 if (!(conn->pkt_type & HCI_3DH5))
1908                         phys |= BT_PHY_EDR_3M_5SLOT;
1909
1910                 break;
1911
1912         case ESCO_LINK:
1913                 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
1914                 phys |= BT_PHY_BR_1M_1SLOT;
1915
1916                 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
1917                         phys |= BT_PHY_BR_1M_3SLOT;
1918
1919                 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
1920                 if (!(conn->pkt_type & ESCO_2EV3))
1921                         phys |= BT_PHY_EDR_2M_1SLOT;
1922
1923                 if (!(conn->pkt_type & ESCO_2EV5))
1924                         phys |= BT_PHY_EDR_2M_3SLOT;
1925
1926                 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
1927                 if (!(conn->pkt_type & ESCO_3EV3))
1928                         phys |= BT_PHY_EDR_3M_1SLOT;
1929
1930                 if (!(conn->pkt_type & ESCO_3EV5))
1931                         phys |= BT_PHY_EDR_3M_3SLOT;
1932
1933                 break;
1934
1935         case LE_LINK:
1936                 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
1937                         phys |= BT_PHY_LE_1M_TX;
1938
1939                 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
1940                         phys |= BT_PHY_LE_1M_RX;
1941
1942                 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
1943                         phys |= BT_PHY_LE_2M_TX;
1944
1945                 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
1946                         phys |= BT_PHY_LE_2M_RX;
1947
1948                 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
1949                         phys |= BT_PHY_LE_CODED_TX;
1950
1951                 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
1952                         phys |= BT_PHY_LE_CODED_RX;
1953
1954                 break;
1955         }
1956
1957         return phys;
1958 }