GNU Linux-libre 6.1.90-gnu
[releases.git] / net / bluetooth / hci_event.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28 #include <linux/crypto.h>
29 #include <crypto/algapi.h>
30
31 #include <net/bluetooth/bluetooth.h>
32 #include <net/bluetooth/hci_core.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "hci_request.h"
36 #include "hci_debugfs.h"
37 #include "hci_codec.h"
38 #include "a2mp.h"
39 #include "amp.h"
40 #include "smp.h"
41 #include "msft.h"
42 #include "eir.h"
43
44 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
45                  "\x00\x00\x00\x00\x00\x00\x00\x00"
46
47 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
48
49 /* Handle HCI Event packets */
50
51 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
52                              u8 ev, size_t len)
53 {
54         void *data;
55
56         data = skb_pull_data(skb, len);
57         if (!data)
58                 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
59
60         return data;
61 }
62
63 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
64                              u16 op, size_t len)
65 {
66         void *data;
67
68         data = skb_pull_data(skb, len);
69         if (!data)
70                 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
71
72         return data;
73 }
74
75 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
76                                 u8 ev, size_t len)
77 {
78         void *data;
79
80         data = skb_pull_data(skb, len);
81         if (!data)
82                 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
83
84         return data;
85 }
86
87 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
88                                 struct sk_buff *skb)
89 {
90         struct hci_ev_status *rp = data;
91
92         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
93
94         /* It is possible that we receive Inquiry Complete event right
95          * before we receive Inquiry Cancel Command Complete event, in
96          * which case the latter event should have status of Command
97          * Disallowed (0x0c). This should not be treated as error, since
98          * we actually achieve what Inquiry Cancel wants to achieve,
99          * which is to end the last Inquiry session.
100          */
101         if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
102                 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
103                 rp->status = 0x00;
104         }
105
106         if (rp->status)
107                 return rp->status;
108
109         clear_bit(HCI_INQUIRY, &hdev->flags);
110         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
111         wake_up_bit(&hdev->flags, HCI_INQUIRY);
112
113         hci_dev_lock(hdev);
114         /* Set discovery state to stopped if we're not doing LE active
115          * scanning.
116          */
117         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
118             hdev->le_scan_type != LE_SCAN_ACTIVE)
119                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
120         hci_dev_unlock(hdev);
121
122         hci_conn_check_pending(hdev);
123
124         return rp->status;
125 }
126
127 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
128                               struct sk_buff *skb)
129 {
130         struct hci_ev_status *rp = data;
131
132         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
133
134         if (rp->status)
135                 return rp->status;
136
137         hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
138
139         return rp->status;
140 }
141
142 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
143                                    struct sk_buff *skb)
144 {
145         struct hci_ev_status *rp = data;
146
147         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
148
149         if (rp->status)
150                 return rp->status;
151
152         hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
153
154         hci_conn_check_pending(hdev);
155
156         return rp->status;
157 }
158
159 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
160                                         struct sk_buff *skb)
161 {
162         struct hci_ev_status *rp = data;
163
164         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
165
166         return rp->status;
167 }
168
169 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
170                                 struct sk_buff *skb)
171 {
172         struct hci_rp_role_discovery *rp = data;
173         struct hci_conn *conn;
174
175         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
176
177         if (rp->status)
178                 return rp->status;
179
180         hci_dev_lock(hdev);
181
182         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
183         if (conn)
184                 conn->role = rp->role;
185
186         hci_dev_unlock(hdev);
187
188         return rp->status;
189 }
190
191 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
192                                   struct sk_buff *skb)
193 {
194         struct hci_rp_read_link_policy *rp = data;
195         struct hci_conn *conn;
196
197         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
198
199         if (rp->status)
200                 return rp->status;
201
202         hci_dev_lock(hdev);
203
204         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
205         if (conn)
206                 conn->link_policy = __le16_to_cpu(rp->policy);
207
208         hci_dev_unlock(hdev);
209
210         return rp->status;
211 }
212
213 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
214                                    struct sk_buff *skb)
215 {
216         struct hci_rp_write_link_policy *rp = data;
217         struct hci_conn *conn;
218         void *sent;
219
220         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
221
222         if (rp->status)
223                 return rp->status;
224
225         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
226         if (!sent)
227                 return rp->status;
228
229         hci_dev_lock(hdev);
230
231         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
232         if (conn)
233                 conn->link_policy = get_unaligned_le16(sent + 2);
234
235         hci_dev_unlock(hdev);
236
237         return rp->status;
238 }
239
240 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
241                                       struct sk_buff *skb)
242 {
243         struct hci_rp_read_def_link_policy *rp = data;
244
245         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
246
247         if (rp->status)
248                 return rp->status;
249
250         hdev->link_policy = __le16_to_cpu(rp->policy);
251
252         return rp->status;
253 }
254
255 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
256                                        struct sk_buff *skb)
257 {
258         struct hci_ev_status *rp = data;
259         void *sent;
260
261         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
262
263         if (rp->status)
264                 return rp->status;
265
266         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
267         if (!sent)
268                 return rp->status;
269
270         hdev->link_policy = get_unaligned_le16(sent);
271
272         return rp->status;
273 }
274
275 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
276 {
277         struct hci_ev_status *rp = data;
278
279         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
280
281         clear_bit(HCI_RESET, &hdev->flags);
282
283         if (rp->status)
284                 return rp->status;
285
286         /* Reset all non-persistent flags */
287         hci_dev_clear_volatile_flags(hdev);
288
289         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
290
291         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
292         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
293
294         memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
295         hdev->adv_data_len = 0;
296
297         memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
298         hdev->scan_rsp_data_len = 0;
299
300         hdev->le_scan_type = LE_SCAN_PASSIVE;
301
302         hdev->ssp_debug_mode = 0;
303
304         hci_bdaddr_list_clear(&hdev->le_accept_list);
305         hci_bdaddr_list_clear(&hdev->le_resolv_list);
306
307         return rp->status;
308 }
309
310 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
311                                       struct sk_buff *skb)
312 {
313         struct hci_rp_read_stored_link_key *rp = data;
314         struct hci_cp_read_stored_link_key *sent;
315
316         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
317
318         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
319         if (!sent)
320                 return rp->status;
321
322         if (!rp->status && sent->read_all == 0x01) {
323                 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
324                 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
325         }
326
327         return rp->status;
328 }
329
330 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
331                                         struct sk_buff *skb)
332 {
333         struct hci_rp_delete_stored_link_key *rp = data;
334         u16 num_keys;
335
336         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
337
338         if (rp->status)
339                 return rp->status;
340
341         num_keys = le16_to_cpu(rp->num_keys);
342
343         if (num_keys <= hdev->stored_num_keys)
344                 hdev->stored_num_keys -= num_keys;
345         else
346                 hdev->stored_num_keys = 0;
347
348         return rp->status;
349 }
350
351 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
352                                   struct sk_buff *skb)
353 {
354         struct hci_ev_status *rp = data;
355         void *sent;
356
357         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
358
359         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
360         if (!sent)
361                 return rp->status;
362
363         hci_dev_lock(hdev);
364
365         if (hci_dev_test_flag(hdev, HCI_MGMT))
366                 mgmt_set_local_name_complete(hdev, sent, rp->status);
367         else if (!rp->status)
368                 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
369
370         hci_dev_unlock(hdev);
371
372         return rp->status;
373 }
374
375 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
376                                  struct sk_buff *skb)
377 {
378         struct hci_rp_read_local_name *rp = data;
379
380         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
381
382         if (rp->status)
383                 return rp->status;
384
385         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
386             hci_dev_test_flag(hdev, HCI_CONFIG))
387                 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
388
389         return rp->status;
390 }
391
392 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
393                                    struct sk_buff *skb)
394 {
395         struct hci_ev_status *rp = data;
396         void *sent;
397
398         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
399
400         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
401         if (!sent)
402                 return rp->status;
403
404         hci_dev_lock(hdev);
405
406         if (!rp->status) {
407                 __u8 param = *((__u8 *) sent);
408
409                 if (param == AUTH_ENABLED)
410                         set_bit(HCI_AUTH, &hdev->flags);
411                 else
412                         clear_bit(HCI_AUTH, &hdev->flags);
413         }
414
415         if (hci_dev_test_flag(hdev, HCI_MGMT))
416                 mgmt_auth_enable_complete(hdev, rp->status);
417
418         hci_dev_unlock(hdev);
419
420         return rp->status;
421 }
422
423 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
424                                     struct sk_buff *skb)
425 {
426         struct hci_ev_status *rp = data;
427         __u8 param;
428         void *sent;
429
430         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
431
432         if (rp->status)
433                 return rp->status;
434
435         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
436         if (!sent)
437                 return rp->status;
438
439         param = *((__u8 *) sent);
440
441         if (param)
442                 set_bit(HCI_ENCRYPT, &hdev->flags);
443         else
444                 clear_bit(HCI_ENCRYPT, &hdev->flags);
445
446         return rp->status;
447 }
448
449 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
450                                    struct sk_buff *skb)
451 {
452         struct hci_ev_status *rp = data;
453         __u8 param;
454         void *sent;
455
456         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
457
458         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
459         if (!sent)
460                 return rp->status;
461
462         param = *((__u8 *) sent);
463
464         hci_dev_lock(hdev);
465
466         if (rp->status) {
467                 hdev->discov_timeout = 0;
468                 goto done;
469         }
470
471         if (param & SCAN_INQUIRY)
472                 set_bit(HCI_ISCAN, &hdev->flags);
473         else
474                 clear_bit(HCI_ISCAN, &hdev->flags);
475
476         if (param & SCAN_PAGE)
477                 set_bit(HCI_PSCAN, &hdev->flags);
478         else
479                 clear_bit(HCI_PSCAN, &hdev->flags);
480
481 done:
482         hci_dev_unlock(hdev);
483
484         return rp->status;
485 }
486
487 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
488                                   struct sk_buff *skb)
489 {
490         struct hci_ev_status *rp = data;
491         struct hci_cp_set_event_filter *cp;
492         void *sent;
493
494         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
495
496         if (rp->status)
497                 return rp->status;
498
499         sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
500         if (!sent)
501                 return rp->status;
502
503         cp = (struct hci_cp_set_event_filter *)sent;
504
505         if (cp->flt_type == HCI_FLT_CLEAR_ALL)
506                 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
507         else
508                 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
509
510         return rp->status;
511 }
512
513 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
514                                    struct sk_buff *skb)
515 {
516         struct hci_rp_read_class_of_dev *rp = data;
517
518         if (WARN_ON(!hdev))
519                 return HCI_ERROR_UNSPECIFIED;
520
521         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
522
523         if (rp->status)
524                 return rp->status;
525
526         memcpy(hdev->dev_class, rp->dev_class, 3);
527
528         bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
529                    hdev->dev_class[1], hdev->dev_class[0]);
530
531         return rp->status;
532 }
533
534 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
535                                     struct sk_buff *skb)
536 {
537         struct hci_ev_status *rp = data;
538         void *sent;
539
540         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
541
542         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
543         if (!sent)
544                 return rp->status;
545
546         hci_dev_lock(hdev);
547
548         if (!rp->status)
549                 memcpy(hdev->dev_class, sent, 3);
550
551         if (hci_dev_test_flag(hdev, HCI_MGMT))
552                 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
553
554         hci_dev_unlock(hdev);
555
556         return rp->status;
557 }
558
559 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
560                                     struct sk_buff *skb)
561 {
562         struct hci_rp_read_voice_setting *rp = data;
563         __u16 setting;
564
565         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
566
567         if (rp->status)
568                 return rp->status;
569
570         setting = __le16_to_cpu(rp->voice_setting);
571
572         if (hdev->voice_setting == setting)
573                 return rp->status;
574
575         hdev->voice_setting = setting;
576
577         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
578
579         if (hdev->notify)
580                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
581
582         return rp->status;
583 }
584
585 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
586                                      struct sk_buff *skb)
587 {
588         struct hci_ev_status *rp = data;
589         __u16 setting;
590         void *sent;
591
592         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
593
594         if (rp->status)
595                 return rp->status;
596
597         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
598         if (!sent)
599                 return rp->status;
600
601         setting = get_unaligned_le16(sent);
602
603         if (hdev->voice_setting == setting)
604                 return rp->status;
605
606         hdev->voice_setting = setting;
607
608         bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
609
610         if (hdev->notify)
611                 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
612
613         return rp->status;
614 }
615
616 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
617                                         struct sk_buff *skb)
618 {
619         struct hci_rp_read_num_supported_iac *rp = data;
620
621         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
622
623         if (rp->status)
624                 return rp->status;
625
626         hdev->num_iac = rp->num_iac;
627
628         bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
629
630         return rp->status;
631 }
632
633 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
634                                 struct sk_buff *skb)
635 {
636         struct hci_ev_status *rp = data;
637         struct hci_cp_write_ssp_mode *sent;
638
639         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
640
641         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
642         if (!sent)
643                 return rp->status;
644
645         hci_dev_lock(hdev);
646
647         if (!rp->status) {
648                 if (sent->mode)
649                         hdev->features[1][0] |= LMP_HOST_SSP;
650                 else
651                         hdev->features[1][0] &= ~LMP_HOST_SSP;
652         }
653
654         if (!rp->status) {
655                 if (sent->mode)
656                         hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
657                 else
658                         hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
659         }
660
661         hci_dev_unlock(hdev);
662
663         return rp->status;
664 }
665
666 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
667                                   struct sk_buff *skb)
668 {
669         struct hci_ev_status *rp = data;
670         struct hci_cp_write_sc_support *sent;
671
672         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
673
674         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
675         if (!sent)
676                 return rp->status;
677
678         hci_dev_lock(hdev);
679
680         if (!rp->status) {
681                 if (sent->support)
682                         hdev->features[1][0] |= LMP_HOST_SC;
683                 else
684                         hdev->features[1][0] &= ~LMP_HOST_SC;
685         }
686
687         if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
688                 if (sent->support)
689                         hci_dev_set_flag(hdev, HCI_SC_ENABLED);
690                 else
691                         hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
692         }
693
694         hci_dev_unlock(hdev);
695
696         return rp->status;
697 }
698
699 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
700                                     struct sk_buff *skb)
701 {
702         struct hci_rp_read_local_version *rp = data;
703
704         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
705
706         if (rp->status)
707                 return rp->status;
708
709         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
710             hci_dev_test_flag(hdev, HCI_CONFIG)) {
711                 hdev->hci_ver = rp->hci_ver;
712                 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
713                 hdev->lmp_ver = rp->lmp_ver;
714                 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
715                 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
716         }
717
718         return rp->status;
719 }
720
721 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
722                                    struct sk_buff *skb)
723 {
724         struct hci_rp_read_enc_key_size *rp = data;
725         struct hci_conn *conn;
726         u16 handle;
727         u8 status = rp->status;
728
729         bt_dev_dbg(hdev, "status 0x%2.2x", status);
730
731         handle = le16_to_cpu(rp->handle);
732
733         hci_dev_lock(hdev);
734
735         conn = hci_conn_hash_lookup_handle(hdev, handle);
736         if (!conn) {
737                 status = 0xFF;
738                 goto done;
739         }
740
741         /* While unexpected, the read_enc_key_size command may fail. The most
742          * secure approach is to then assume the key size is 0 to force a
743          * disconnection.
744          */
745         if (status) {
746                 bt_dev_err(hdev, "failed to read key size for handle %u",
747                            handle);
748                 conn->enc_key_size = 0;
749         } else {
750                 conn->enc_key_size = rp->key_size;
751                 status = 0;
752
753                 if (conn->enc_key_size < hdev->min_enc_key_size) {
754                         /* As slave role, the conn->state has been set to
755                          * BT_CONNECTED and l2cap conn req might not be received
756                          * yet, at this moment the l2cap layer almost does
757                          * nothing with the non-zero status.
758                          * So we also clear encrypt related bits, and then the
759                          * handler of l2cap conn req will get the right secure
760                          * state at a later time.
761                          */
762                         status = HCI_ERROR_AUTH_FAILURE;
763                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
764                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
765                 }
766         }
767
768         hci_encrypt_cfm(conn, status);
769
770 done:
771         hci_dev_unlock(hdev);
772
773         return status;
774 }
775
776 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
777                                      struct sk_buff *skb)
778 {
779         struct hci_rp_read_local_commands *rp = data;
780
781         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
782
783         if (rp->status)
784                 return rp->status;
785
786         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
787             hci_dev_test_flag(hdev, HCI_CONFIG))
788                 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
789
790         return rp->status;
791 }
792
793 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
794                                            struct sk_buff *skb)
795 {
796         struct hci_rp_read_auth_payload_to *rp = data;
797         struct hci_conn *conn;
798
799         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
800
801         if (rp->status)
802                 return rp->status;
803
804         hci_dev_lock(hdev);
805
806         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
807         if (conn)
808                 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
809
810         hci_dev_unlock(hdev);
811
812         return rp->status;
813 }
814
815 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
816                                             struct sk_buff *skb)
817 {
818         struct hci_rp_write_auth_payload_to *rp = data;
819         struct hci_conn *conn;
820         void *sent;
821
822         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
823
824         if (rp->status)
825                 return rp->status;
826
827         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828         if (!sent)
829                 return rp->status;
830
831         hci_dev_lock(hdev);
832
833         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
834         if (conn)
835                 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
836
837         hci_dev_unlock(hdev);
838
839         return rp->status;
840 }
841
842 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
843                                      struct sk_buff *skb)
844 {
845         struct hci_rp_read_local_features *rp = data;
846
847         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
848
849         if (rp->status)
850                 return rp->status;
851
852         memcpy(hdev->features, rp->features, 8);
853
854         /* Adjust default settings according to features
855          * supported by device. */
856
857         if (hdev->features[0][0] & LMP_3SLOT)
858                 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
859
860         if (hdev->features[0][0] & LMP_5SLOT)
861                 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
862
863         if (hdev->features[0][1] & LMP_HV2) {
864                 hdev->pkt_type  |= (HCI_HV2);
865                 hdev->esco_type |= (ESCO_HV2);
866         }
867
868         if (hdev->features[0][1] & LMP_HV3) {
869                 hdev->pkt_type  |= (HCI_HV3);
870                 hdev->esco_type |= (ESCO_HV3);
871         }
872
873         if (lmp_esco_capable(hdev))
874                 hdev->esco_type |= (ESCO_EV3);
875
876         if (hdev->features[0][4] & LMP_EV4)
877                 hdev->esco_type |= (ESCO_EV4);
878
879         if (hdev->features[0][4] & LMP_EV5)
880                 hdev->esco_type |= (ESCO_EV5);
881
882         if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
883                 hdev->esco_type |= (ESCO_2EV3);
884
885         if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
886                 hdev->esco_type |= (ESCO_3EV3);
887
888         if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
889                 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
890
891         return rp->status;
892 }
893
894 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
895                                          struct sk_buff *skb)
896 {
897         struct hci_rp_read_local_ext_features *rp = data;
898
899         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
900
901         if (rp->status)
902                 return rp->status;
903
904         if (hdev->max_page < rp->max_page) {
905                 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
906                              &hdev->quirks))
907                         bt_dev_warn(hdev, "broken local ext features page 2");
908                 else
909                         hdev->max_page = rp->max_page;
910         }
911
912         if (rp->page < HCI_MAX_PAGES)
913                 memcpy(hdev->features[rp->page], rp->features, 8);
914
915         return rp->status;
916 }
917
918 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
919                                         struct sk_buff *skb)
920 {
921         struct hci_rp_read_flow_control_mode *rp = data;
922
923         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
924
925         if (rp->status)
926                 return rp->status;
927
928         hdev->flow_ctl_mode = rp->mode;
929
930         return rp->status;
931 }
932
933 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
934                                   struct sk_buff *skb)
935 {
936         struct hci_rp_read_buffer_size *rp = data;
937
938         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
939
940         if (rp->status)
941                 return rp->status;
942
943         hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
944         hdev->sco_mtu  = rp->sco_mtu;
945         hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
946         hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
947
948         if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
949                 hdev->sco_mtu  = 64;
950                 hdev->sco_pkts = 8;
951         }
952
953         hdev->acl_cnt = hdev->acl_pkts;
954         hdev->sco_cnt = hdev->sco_pkts;
955
956         BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
957                hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
958
959         return rp->status;
960 }
961
962 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
963                               struct sk_buff *skb)
964 {
965         struct hci_rp_read_bd_addr *rp = data;
966
967         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
968
969         if (rp->status)
970                 return rp->status;
971
972         if (test_bit(HCI_INIT, &hdev->flags))
973                 bacpy(&hdev->bdaddr, &rp->bdaddr);
974
975         if (hci_dev_test_flag(hdev, HCI_SETUP))
976                 bacpy(&hdev->setup_addr, &rp->bdaddr);
977
978         return rp->status;
979 }
980
981 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
982                                          struct sk_buff *skb)
983 {
984         struct hci_rp_read_local_pairing_opts *rp = data;
985
986         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
987
988         if (rp->status)
989                 return rp->status;
990
991         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
992             hci_dev_test_flag(hdev, HCI_CONFIG)) {
993                 hdev->pairing_opts = rp->pairing_opts;
994                 hdev->max_enc_key_size = rp->max_key_size;
995         }
996
997         return rp->status;
998 }
999
1000 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
1001                                          struct sk_buff *skb)
1002 {
1003         struct hci_rp_read_page_scan_activity *rp = data;
1004
1005         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1006
1007         if (rp->status)
1008                 return rp->status;
1009
1010         if (test_bit(HCI_INIT, &hdev->flags)) {
1011                 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1012                 hdev->page_scan_window = __le16_to_cpu(rp->window);
1013         }
1014
1015         return rp->status;
1016 }
1017
1018 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1019                                           struct sk_buff *skb)
1020 {
1021         struct hci_ev_status *rp = data;
1022         struct hci_cp_write_page_scan_activity *sent;
1023
1024         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1025
1026         if (rp->status)
1027                 return rp->status;
1028
1029         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1030         if (!sent)
1031                 return rp->status;
1032
1033         hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1034         hdev->page_scan_window = __le16_to_cpu(sent->window);
1035
1036         return rp->status;
1037 }
1038
1039 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1040                                      struct sk_buff *skb)
1041 {
1042         struct hci_rp_read_page_scan_type *rp = data;
1043
1044         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1045
1046         if (rp->status)
1047                 return rp->status;
1048
1049         if (test_bit(HCI_INIT, &hdev->flags))
1050                 hdev->page_scan_type = rp->type;
1051
1052         return rp->status;
1053 }
1054
1055 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1056                                       struct sk_buff *skb)
1057 {
1058         struct hci_ev_status *rp = data;
1059         u8 *type;
1060
1061         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1062
1063         if (rp->status)
1064                 return rp->status;
1065
1066         type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1067         if (type)
1068                 hdev->page_scan_type = *type;
1069
1070         return rp->status;
1071 }
1072
1073 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1074                                       struct sk_buff *skb)
1075 {
1076         struct hci_rp_read_data_block_size *rp = data;
1077
1078         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1079
1080         if (rp->status)
1081                 return rp->status;
1082
1083         hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1084         hdev->block_len = __le16_to_cpu(rp->block_len);
1085         hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1086
1087         hdev->block_cnt = hdev->num_blocks;
1088
1089         BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1090                hdev->block_cnt, hdev->block_len);
1091
1092         return rp->status;
1093 }
1094
1095 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1096                             struct sk_buff *skb)
1097 {
1098         struct hci_rp_read_clock *rp = data;
1099         struct hci_cp_read_clock *cp;
1100         struct hci_conn *conn;
1101
1102         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1103
1104         if (rp->status)
1105                 return rp->status;
1106
1107         hci_dev_lock(hdev);
1108
1109         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1110         if (!cp)
1111                 goto unlock;
1112
1113         if (cp->which == 0x00) {
1114                 hdev->clock = le32_to_cpu(rp->clock);
1115                 goto unlock;
1116         }
1117
1118         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1119         if (conn) {
1120                 conn->clock = le32_to_cpu(rp->clock);
1121                 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1122         }
1123
1124 unlock:
1125         hci_dev_unlock(hdev);
1126         return rp->status;
1127 }
1128
1129 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1130                                      struct sk_buff *skb)
1131 {
1132         struct hci_rp_read_local_amp_info *rp = data;
1133
1134         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1135
1136         if (rp->status)
1137                 return rp->status;
1138
1139         hdev->amp_status = rp->amp_status;
1140         hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1141         hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1142         hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1143         hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1144         hdev->amp_type = rp->amp_type;
1145         hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1146         hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1147         hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1148         hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1149
1150         return rp->status;
1151 }
1152
1153 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1154                                        struct sk_buff *skb)
1155 {
1156         struct hci_rp_read_inq_rsp_tx_power *rp = data;
1157
1158         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1159
1160         if (rp->status)
1161                 return rp->status;
1162
1163         hdev->inq_tx_power = rp->tx_power;
1164
1165         return rp->status;
1166 }
1167
1168 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1169                                              struct sk_buff *skb)
1170 {
1171         struct hci_rp_read_def_err_data_reporting *rp = data;
1172
1173         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1174
1175         if (rp->status)
1176                 return rp->status;
1177
1178         hdev->err_data_reporting = rp->err_data_reporting;
1179
1180         return rp->status;
1181 }
1182
1183 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1184                                               struct sk_buff *skb)
1185 {
1186         struct hci_ev_status *rp = data;
1187         struct hci_cp_write_def_err_data_reporting *cp;
1188
1189         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1190
1191         if (rp->status)
1192                 return rp->status;
1193
1194         cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1195         if (!cp)
1196                 return rp->status;
1197
1198         hdev->err_data_reporting = cp->err_data_reporting;
1199
1200         return rp->status;
1201 }
1202
1203 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1204                                 struct sk_buff *skb)
1205 {
1206         struct hci_rp_pin_code_reply *rp = data;
1207         struct hci_cp_pin_code_reply *cp;
1208         struct hci_conn *conn;
1209
1210         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1211
1212         hci_dev_lock(hdev);
1213
1214         if (hci_dev_test_flag(hdev, HCI_MGMT))
1215                 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1216
1217         if (rp->status)
1218                 goto unlock;
1219
1220         cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1221         if (!cp)
1222                 goto unlock;
1223
1224         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1225         if (conn)
1226                 conn->pin_length = cp->pin_len;
1227
1228 unlock:
1229         hci_dev_unlock(hdev);
1230         return rp->status;
1231 }
1232
1233 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1234                                     struct sk_buff *skb)
1235 {
1236         struct hci_rp_pin_code_neg_reply *rp = data;
1237
1238         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1239
1240         hci_dev_lock(hdev);
1241
1242         if (hci_dev_test_flag(hdev, HCI_MGMT))
1243                 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1244                                                  rp->status);
1245
1246         hci_dev_unlock(hdev);
1247
1248         return rp->status;
1249 }
1250
1251 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1252                                      struct sk_buff *skb)
1253 {
1254         struct hci_rp_le_read_buffer_size *rp = data;
1255
1256         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1257
1258         if (rp->status)
1259                 return rp->status;
1260
1261         hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1262         hdev->le_pkts = rp->le_max_pkt;
1263
1264         hdev->le_cnt = hdev->le_pkts;
1265
1266         BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1267
1268         return rp->status;
1269 }
1270
1271 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1272                                         struct sk_buff *skb)
1273 {
1274         struct hci_rp_le_read_local_features *rp = data;
1275
1276         BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1277
1278         if (rp->status)
1279                 return rp->status;
1280
1281         memcpy(hdev->le_features, rp->features, 8);
1282
1283         return rp->status;
1284 }
1285
1286 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1287                                       struct sk_buff *skb)
1288 {
1289         struct hci_rp_le_read_adv_tx_power *rp = data;
1290
1291         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1292
1293         if (rp->status)
1294                 return rp->status;
1295
1296         hdev->adv_tx_power = rp->tx_power;
1297
1298         return rp->status;
1299 }
1300
1301 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1302                                     struct sk_buff *skb)
1303 {
1304         struct hci_rp_user_confirm_reply *rp = data;
1305
1306         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1307
1308         hci_dev_lock(hdev);
1309
1310         if (hci_dev_test_flag(hdev, HCI_MGMT))
1311                 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1312                                                  rp->status);
1313
1314         hci_dev_unlock(hdev);
1315
1316         return rp->status;
1317 }
1318
1319 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1320                                         struct sk_buff *skb)
1321 {
1322         struct hci_rp_user_confirm_reply *rp = data;
1323
1324         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1325
1326         hci_dev_lock(hdev);
1327
1328         if (hci_dev_test_flag(hdev, HCI_MGMT))
1329                 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1330                                                      ACL_LINK, 0, rp->status);
1331
1332         hci_dev_unlock(hdev);
1333
1334         return rp->status;
1335 }
1336
1337 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1338                                     struct sk_buff *skb)
1339 {
1340         struct hci_rp_user_confirm_reply *rp = data;
1341
1342         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1343
1344         hci_dev_lock(hdev);
1345
1346         if (hci_dev_test_flag(hdev, HCI_MGMT))
1347                 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1348                                                  0, rp->status);
1349
1350         hci_dev_unlock(hdev);
1351
1352         return rp->status;
1353 }
1354
1355 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1356                                         struct sk_buff *skb)
1357 {
1358         struct hci_rp_user_confirm_reply *rp = data;
1359
1360         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1361
1362         hci_dev_lock(hdev);
1363
1364         if (hci_dev_test_flag(hdev, HCI_MGMT))
1365                 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1366                                                      ACL_LINK, 0, rp->status);
1367
1368         hci_dev_unlock(hdev);
1369
1370         return rp->status;
1371 }
1372
1373 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1374                                      struct sk_buff *skb)
1375 {
1376         struct hci_rp_read_local_oob_data *rp = data;
1377
1378         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1379
1380         return rp->status;
1381 }
1382
1383 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1384                                          struct sk_buff *skb)
1385 {
1386         struct hci_rp_read_local_oob_ext_data *rp = data;
1387
1388         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1389
1390         return rp->status;
1391 }
1392
1393 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1394                                     struct sk_buff *skb)
1395 {
1396         struct hci_ev_status *rp = data;
1397         bdaddr_t *sent;
1398
1399         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1400
1401         if (rp->status)
1402                 return rp->status;
1403
1404         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1405         if (!sent)
1406                 return rp->status;
1407
1408         hci_dev_lock(hdev);
1409
1410         bacpy(&hdev->random_addr, sent);
1411
1412         if (!bacmp(&hdev->rpa, sent)) {
1413                 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1414                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1415                                    secs_to_jiffies(hdev->rpa_timeout));
1416         }
1417
1418         hci_dev_unlock(hdev);
1419
1420         return rp->status;
1421 }
1422
1423 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1424                                     struct sk_buff *skb)
1425 {
1426         struct hci_ev_status *rp = data;
1427         struct hci_cp_le_set_default_phy *cp;
1428
1429         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1430
1431         if (rp->status)
1432                 return rp->status;
1433
1434         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1435         if (!cp)
1436                 return rp->status;
1437
1438         hci_dev_lock(hdev);
1439
1440         hdev->le_tx_def_phys = cp->tx_phys;
1441         hdev->le_rx_def_phys = cp->rx_phys;
1442
1443         hci_dev_unlock(hdev);
1444
1445         return rp->status;
1446 }
1447
1448 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1449                                             struct sk_buff *skb)
1450 {
1451         struct hci_ev_status *rp = data;
1452         struct hci_cp_le_set_adv_set_rand_addr *cp;
1453         struct adv_info *adv;
1454
1455         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1456
1457         if (rp->status)
1458                 return rp->status;
1459
1460         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1461         /* Update only in case the adv instance since handle 0x00 shall be using
1462          * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1463          * non-extended adverting.
1464          */
1465         if (!cp || !cp->handle)
1466                 return rp->status;
1467
1468         hci_dev_lock(hdev);
1469
1470         adv = hci_find_adv_instance(hdev, cp->handle);
1471         if (adv) {
1472                 bacpy(&adv->random_addr, &cp->bdaddr);
1473                 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1474                         adv->rpa_expired = false;
1475                         queue_delayed_work(hdev->workqueue,
1476                                            &adv->rpa_expired_cb,
1477                                            secs_to_jiffies(hdev->rpa_timeout));
1478                 }
1479         }
1480
1481         hci_dev_unlock(hdev);
1482
1483         return rp->status;
1484 }
1485
1486 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1487                                    struct sk_buff *skb)
1488 {
1489         struct hci_ev_status *rp = data;
1490         u8 *instance;
1491         int err;
1492
1493         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1494
1495         if (rp->status)
1496                 return rp->status;
1497
1498         instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1499         if (!instance)
1500                 return rp->status;
1501
1502         hci_dev_lock(hdev);
1503
1504         err = hci_remove_adv_instance(hdev, *instance);
1505         if (!err)
1506                 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1507                                          *instance);
1508
1509         hci_dev_unlock(hdev);
1510
1511         return rp->status;
1512 }
1513
1514 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1515                                    struct sk_buff *skb)
1516 {
1517         struct hci_ev_status *rp = data;
1518         struct adv_info *adv, *n;
1519         int err;
1520
1521         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1522
1523         if (rp->status)
1524                 return rp->status;
1525
1526         if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1527                 return rp->status;
1528
1529         hci_dev_lock(hdev);
1530
1531         list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1532                 u8 instance = adv->instance;
1533
1534                 err = hci_remove_adv_instance(hdev, instance);
1535                 if (!err)
1536                         mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1537                                                  hdev, instance);
1538         }
1539
1540         hci_dev_unlock(hdev);
1541
1542         return rp->status;
1543 }
1544
1545 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1546                                         struct sk_buff *skb)
1547 {
1548         struct hci_rp_le_read_transmit_power *rp = data;
1549
1550         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1551
1552         if (rp->status)
1553                 return rp->status;
1554
1555         hdev->min_le_tx_power = rp->min_le_tx_power;
1556         hdev->max_le_tx_power = rp->max_le_tx_power;
1557
1558         return rp->status;
1559 }
1560
1561 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1562                                      struct sk_buff *skb)
1563 {
1564         struct hci_ev_status *rp = data;
1565         struct hci_cp_le_set_privacy_mode *cp;
1566         struct hci_conn_params *params;
1567
1568         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1569
1570         if (rp->status)
1571                 return rp->status;
1572
1573         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1574         if (!cp)
1575                 return rp->status;
1576
1577         hci_dev_lock(hdev);
1578
1579         params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1580         if (params)
1581                 WRITE_ONCE(params->privacy_mode, cp->mode);
1582
1583         hci_dev_unlock(hdev);
1584
1585         return rp->status;
1586 }
1587
1588 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1589                                    struct sk_buff *skb)
1590 {
1591         struct hci_ev_status *rp = data;
1592         __u8 *sent;
1593
1594         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1595
1596         if (rp->status)
1597                 return rp->status;
1598
1599         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1600         if (!sent)
1601                 return rp->status;
1602
1603         hci_dev_lock(hdev);
1604
1605         /* If we're doing connection initiation as peripheral. Set a
1606          * timeout in case something goes wrong.
1607          */
1608         if (*sent) {
1609                 struct hci_conn *conn;
1610
1611                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1612
1613                 conn = hci_lookup_le_connect(hdev);
1614                 if (conn)
1615                         queue_delayed_work(hdev->workqueue,
1616                                            &conn->le_conn_timeout,
1617                                            conn->conn_timeout);
1618         } else {
1619                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1620         }
1621
1622         hci_dev_unlock(hdev);
1623
1624         return rp->status;
1625 }
1626
1627 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1628                                        struct sk_buff *skb)
1629 {
1630         struct hci_cp_le_set_ext_adv_enable *cp;
1631         struct hci_cp_ext_adv_set *set;
1632         struct adv_info *adv = NULL, *n;
1633         struct hci_ev_status *rp = data;
1634
1635         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1636
1637         if (rp->status)
1638                 return rp->status;
1639
1640         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1641         if (!cp)
1642                 return rp->status;
1643
1644         set = (void *)cp->data;
1645
1646         hci_dev_lock(hdev);
1647
1648         if (cp->num_of_sets)
1649                 adv = hci_find_adv_instance(hdev, set->handle);
1650
1651         if (cp->enable) {
1652                 struct hci_conn *conn;
1653
1654                 hci_dev_set_flag(hdev, HCI_LE_ADV);
1655
1656                 if (adv)
1657                         adv->enabled = true;
1658
1659                 conn = hci_lookup_le_connect(hdev);
1660                 if (conn)
1661                         queue_delayed_work(hdev->workqueue,
1662                                            &conn->le_conn_timeout,
1663                                            conn->conn_timeout);
1664         } else {
1665                 if (cp->num_of_sets) {
1666                         if (adv)
1667                                 adv->enabled = false;
1668
1669                         /* If just one instance was disabled check if there are
1670                          * any other instance enabled before clearing HCI_LE_ADV
1671                          */
1672                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1673                                                  list) {
1674                                 if (adv->enabled)
1675                                         goto unlock;
1676                         }
1677                 } else {
1678                         /* All instances shall be considered disabled */
1679                         list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1680                                                  list)
1681                                 adv->enabled = false;
1682                 }
1683
1684                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1685         }
1686
1687 unlock:
1688         hci_dev_unlock(hdev);
1689         return rp->status;
1690 }
1691
1692 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1693                                    struct sk_buff *skb)
1694 {
1695         struct hci_cp_le_set_scan_param *cp;
1696         struct hci_ev_status *rp = data;
1697
1698         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1699
1700         if (rp->status)
1701                 return rp->status;
1702
1703         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1704         if (!cp)
1705                 return rp->status;
1706
1707         hci_dev_lock(hdev);
1708
1709         hdev->le_scan_type = cp->type;
1710
1711         hci_dev_unlock(hdev);
1712
1713         return rp->status;
1714 }
1715
1716 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1717                                        struct sk_buff *skb)
1718 {
1719         struct hci_cp_le_set_ext_scan_params *cp;
1720         struct hci_ev_status *rp = data;
1721         struct hci_cp_le_scan_phy_params *phy_param;
1722
1723         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1724
1725         if (rp->status)
1726                 return rp->status;
1727
1728         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1729         if (!cp)
1730                 return rp->status;
1731
1732         phy_param = (void *)cp->data;
1733
1734         hci_dev_lock(hdev);
1735
1736         hdev->le_scan_type = phy_param->type;
1737
1738         hci_dev_unlock(hdev);
1739
1740         return rp->status;
1741 }
1742
1743 static bool has_pending_adv_report(struct hci_dev *hdev)
1744 {
1745         struct discovery_state *d = &hdev->discovery;
1746
1747         return bacmp(&d->last_adv_addr, BDADDR_ANY);
1748 }
1749
1750 static void clear_pending_adv_report(struct hci_dev *hdev)
1751 {
1752         struct discovery_state *d = &hdev->discovery;
1753
1754         bacpy(&d->last_adv_addr, BDADDR_ANY);
1755         d->last_adv_data_len = 0;
1756 }
1757
1758 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1759                                      u8 bdaddr_type, s8 rssi, u32 flags,
1760                                      u8 *data, u8 len)
1761 {
1762         struct discovery_state *d = &hdev->discovery;
1763
1764         if (len > max_adv_len(hdev))
1765                 return;
1766
1767         bacpy(&d->last_adv_addr, bdaddr);
1768         d->last_adv_addr_type = bdaddr_type;
1769         d->last_adv_rssi = rssi;
1770         d->last_adv_flags = flags;
1771         memcpy(d->last_adv_data, data, len);
1772         d->last_adv_data_len = len;
1773 }
1774
1775 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1776 {
1777         hci_dev_lock(hdev);
1778
1779         switch (enable) {
1780         case LE_SCAN_ENABLE:
1781                 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1782                 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1783                         clear_pending_adv_report(hdev);
1784                 if (hci_dev_test_flag(hdev, HCI_MESH))
1785                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1786                 break;
1787
1788         case LE_SCAN_DISABLE:
1789                 /* We do this here instead of when setting DISCOVERY_STOPPED
1790                  * since the latter would potentially require waiting for
1791                  * inquiry to stop too.
1792                  */
1793                 if (has_pending_adv_report(hdev)) {
1794                         struct discovery_state *d = &hdev->discovery;
1795
1796                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1797                                           d->last_adv_addr_type, NULL,
1798                                           d->last_adv_rssi, d->last_adv_flags,
1799                                           d->last_adv_data,
1800                                           d->last_adv_data_len, NULL, 0, 0);
1801                 }
1802
1803                 /* Cancel this timer so that we don't try to disable scanning
1804                  * when it's already disabled.
1805                  */
1806                 cancel_delayed_work(&hdev->le_scan_disable);
1807
1808                 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1809
1810                 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1811                  * interrupted scanning due to a connect request. Mark
1812                  * therefore discovery as stopped.
1813                  */
1814                 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1815                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1816                 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1817                          hdev->discovery.state == DISCOVERY_FINDING)
1818                         queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1819
1820                 break;
1821
1822         default:
1823                 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1824                            enable);
1825                 break;
1826         }
1827
1828         hci_dev_unlock(hdev);
1829 }
1830
1831 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1832                                     struct sk_buff *skb)
1833 {
1834         struct hci_cp_le_set_scan_enable *cp;
1835         struct hci_ev_status *rp = data;
1836
1837         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1838
1839         if (rp->status)
1840                 return rp->status;
1841
1842         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1843         if (!cp)
1844                 return rp->status;
1845
1846         le_set_scan_enable_complete(hdev, cp->enable);
1847
1848         return rp->status;
1849 }
1850
1851 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1852                                         struct sk_buff *skb)
1853 {
1854         struct hci_cp_le_set_ext_scan_enable *cp;
1855         struct hci_ev_status *rp = data;
1856
1857         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1858
1859         if (rp->status)
1860                 return rp->status;
1861
1862         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1863         if (!cp)
1864                 return rp->status;
1865
1866         le_set_scan_enable_complete(hdev, cp->enable);
1867
1868         return rp->status;
1869 }
1870
1871 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1872                                       struct sk_buff *skb)
1873 {
1874         struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1875
1876         bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1877                    rp->num_of_sets);
1878
1879         if (rp->status)
1880                 return rp->status;
1881
1882         hdev->le_num_of_adv_sets = rp->num_of_sets;
1883
1884         return rp->status;
1885 }
1886
1887 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1888                                           struct sk_buff *skb)
1889 {
1890         struct hci_rp_le_read_accept_list_size *rp = data;
1891
1892         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1893
1894         if (rp->status)
1895                 return rp->status;
1896
1897         hdev->le_accept_list_size = rp->size;
1898
1899         return rp->status;
1900 }
1901
1902 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1903                                       struct sk_buff *skb)
1904 {
1905         struct hci_ev_status *rp = data;
1906
1907         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1908
1909         if (rp->status)
1910                 return rp->status;
1911
1912         hci_dev_lock(hdev);
1913         hci_bdaddr_list_clear(&hdev->le_accept_list);
1914         hci_dev_unlock(hdev);
1915
1916         return rp->status;
1917 }
1918
1919 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1920                                        struct sk_buff *skb)
1921 {
1922         struct hci_cp_le_add_to_accept_list *sent;
1923         struct hci_ev_status *rp = data;
1924
1925         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1926
1927         if (rp->status)
1928                 return rp->status;
1929
1930         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1931         if (!sent)
1932                 return rp->status;
1933
1934         hci_dev_lock(hdev);
1935         hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1936                             sent->bdaddr_type);
1937         hci_dev_unlock(hdev);
1938
1939         return rp->status;
1940 }
1941
1942 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1943                                          struct sk_buff *skb)
1944 {
1945         struct hci_cp_le_del_from_accept_list *sent;
1946         struct hci_ev_status *rp = data;
1947
1948         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1949
1950         if (rp->status)
1951                 return rp->status;
1952
1953         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1954         if (!sent)
1955                 return rp->status;
1956
1957         hci_dev_lock(hdev);
1958         hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1959                             sent->bdaddr_type);
1960         hci_dev_unlock(hdev);
1961
1962         return rp->status;
1963 }
1964
1965 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1966                                           struct sk_buff *skb)
1967 {
1968         struct hci_rp_le_read_supported_states *rp = data;
1969
1970         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1971
1972         if (rp->status)
1973                 return rp->status;
1974
1975         memcpy(hdev->le_states, rp->le_states, 8);
1976
1977         return rp->status;
1978 }
1979
1980 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1981                                       struct sk_buff *skb)
1982 {
1983         struct hci_rp_le_read_def_data_len *rp = data;
1984
1985         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1986
1987         if (rp->status)
1988                 return rp->status;
1989
1990         hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1991         hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1992
1993         return rp->status;
1994 }
1995
1996 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1997                                        struct sk_buff *skb)
1998 {
1999         struct hci_cp_le_write_def_data_len *sent;
2000         struct hci_ev_status *rp = data;
2001
2002         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2003
2004         if (rp->status)
2005                 return rp->status;
2006
2007         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2008         if (!sent)
2009                 return rp->status;
2010
2011         hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2012         hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2013
2014         return rp->status;
2015 }
2016
2017 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2018                                        struct sk_buff *skb)
2019 {
2020         struct hci_cp_le_add_to_resolv_list *sent;
2021         struct hci_ev_status *rp = data;
2022
2023         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2024
2025         if (rp->status)
2026                 return rp->status;
2027
2028         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2029         if (!sent)
2030                 return rp->status;
2031
2032         hci_dev_lock(hdev);
2033         hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2034                                 sent->bdaddr_type, sent->peer_irk,
2035                                 sent->local_irk);
2036         hci_dev_unlock(hdev);
2037
2038         return rp->status;
2039 }
2040
2041 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2042                                          struct sk_buff *skb)
2043 {
2044         struct hci_cp_le_del_from_resolv_list *sent;
2045         struct hci_ev_status *rp = data;
2046
2047         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2048
2049         if (rp->status)
2050                 return rp->status;
2051
2052         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2053         if (!sent)
2054                 return rp->status;
2055
2056         hci_dev_lock(hdev);
2057         hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2058                             sent->bdaddr_type);
2059         hci_dev_unlock(hdev);
2060
2061         return rp->status;
2062 }
2063
2064 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2065                                       struct sk_buff *skb)
2066 {
2067         struct hci_ev_status *rp = data;
2068
2069         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2070
2071         if (rp->status)
2072                 return rp->status;
2073
2074         hci_dev_lock(hdev);
2075         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2076         hci_dev_unlock(hdev);
2077
2078         return rp->status;
2079 }
2080
2081 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2082                                           struct sk_buff *skb)
2083 {
2084         struct hci_rp_le_read_resolv_list_size *rp = data;
2085
2086         bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2087
2088         if (rp->status)
2089                 return rp->status;
2090
2091         hdev->le_resolv_list_size = rp->size;
2092
2093         return rp->status;
2094 }
2095
2096 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2097                                                struct sk_buff *skb)
2098 {
2099         struct hci_ev_status *rp = data;
2100         __u8 *sent;
2101
2102         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2103
2104         if (rp->status)
2105                 return rp->status;
2106
2107         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2108         if (!sent)
2109                 return rp->status;
2110
2111         hci_dev_lock(hdev);
2112
2113         if (*sent)
2114                 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2115         else
2116                 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2117
2118         hci_dev_unlock(hdev);
2119
2120         return rp->status;
2121 }
2122
2123 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2124                                       struct sk_buff *skb)
2125 {
2126         struct hci_rp_le_read_max_data_len *rp = data;
2127
2128         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2129
2130         if (rp->status)
2131                 return rp->status;
2132
2133         hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2134         hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2135         hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2136         hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2137
2138         return rp->status;
2139 }
2140
2141 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2142                                          struct sk_buff *skb)
2143 {
2144         struct hci_cp_write_le_host_supported *sent;
2145         struct hci_ev_status *rp = data;
2146
2147         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2148
2149         if (rp->status)
2150                 return rp->status;
2151
2152         sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2153         if (!sent)
2154                 return rp->status;
2155
2156         hci_dev_lock(hdev);
2157
2158         if (sent->le) {
2159                 hdev->features[1][0] |= LMP_HOST_LE;
2160                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2161         } else {
2162                 hdev->features[1][0] &= ~LMP_HOST_LE;
2163                 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2164                 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2165         }
2166
2167         if (sent->simul)
2168                 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2169         else
2170                 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2171
2172         hci_dev_unlock(hdev);
2173
2174         return rp->status;
2175 }
2176
2177 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2178                                struct sk_buff *skb)
2179 {
2180         struct hci_cp_le_set_adv_param *cp;
2181         struct hci_ev_status *rp = data;
2182
2183         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2184
2185         if (rp->status)
2186                 return rp->status;
2187
2188         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2189         if (!cp)
2190                 return rp->status;
2191
2192         hci_dev_lock(hdev);
2193         hdev->adv_addr_type = cp->own_address_type;
2194         hci_dev_unlock(hdev);
2195
2196         return rp->status;
2197 }
2198
2199 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2200                                    struct sk_buff *skb)
2201 {
2202         struct hci_rp_le_set_ext_adv_params *rp = data;
2203         struct hci_cp_le_set_ext_adv_params *cp;
2204         struct adv_info *adv_instance;
2205
2206         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2207
2208         if (rp->status)
2209                 return rp->status;
2210
2211         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2212         if (!cp)
2213                 return rp->status;
2214
2215         hci_dev_lock(hdev);
2216         hdev->adv_addr_type = cp->own_addr_type;
2217         if (!cp->handle) {
2218                 /* Store in hdev for instance 0 */
2219                 hdev->adv_tx_power = rp->tx_power;
2220         } else {
2221                 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2222                 if (adv_instance)
2223                         adv_instance->tx_power = rp->tx_power;
2224         }
2225         /* Update adv data as tx power is known now */
2226         hci_update_adv_data(hdev, cp->handle);
2227
2228         hci_dev_unlock(hdev);
2229
2230         return rp->status;
2231 }
2232
2233 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2234                            struct sk_buff *skb)
2235 {
2236         struct hci_rp_read_rssi *rp = data;
2237         struct hci_conn *conn;
2238
2239         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2240
2241         if (rp->status)
2242                 return rp->status;
2243
2244         hci_dev_lock(hdev);
2245
2246         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2247         if (conn)
2248                 conn->rssi = rp->rssi;
2249
2250         hci_dev_unlock(hdev);
2251
2252         return rp->status;
2253 }
2254
2255 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2256                                struct sk_buff *skb)
2257 {
2258         struct hci_cp_read_tx_power *sent;
2259         struct hci_rp_read_tx_power *rp = data;
2260         struct hci_conn *conn;
2261
2262         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2263
2264         if (rp->status)
2265                 return rp->status;
2266
2267         sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2268         if (!sent)
2269                 return rp->status;
2270
2271         hci_dev_lock(hdev);
2272
2273         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2274         if (!conn)
2275                 goto unlock;
2276
2277         switch (sent->type) {
2278         case 0x00:
2279                 conn->tx_power = rp->tx_power;
2280                 break;
2281         case 0x01:
2282                 conn->max_tx_power = rp->tx_power;
2283                 break;
2284         }
2285
2286 unlock:
2287         hci_dev_unlock(hdev);
2288         return rp->status;
2289 }
2290
2291 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2292                                       struct sk_buff *skb)
2293 {
2294         struct hci_ev_status *rp = data;
2295         u8 *mode;
2296
2297         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2298
2299         if (rp->status)
2300                 return rp->status;
2301
2302         mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2303         if (mode)
2304                 hdev->ssp_debug_mode = *mode;
2305
2306         return rp->status;
2307 }
2308
2309 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2310 {
2311         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2312
2313         if (status) {
2314                 hci_conn_check_pending(hdev);
2315                 return;
2316         }
2317
2318         if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2319                 set_bit(HCI_INQUIRY, &hdev->flags);
2320 }
2321
2322 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2323 {
2324         struct hci_cp_create_conn *cp;
2325         struct hci_conn *conn;
2326
2327         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2328
2329         cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2330         if (!cp)
2331                 return;
2332
2333         hci_dev_lock(hdev);
2334
2335         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2336
2337         bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2338
2339         if (status) {
2340                 if (conn && conn->state == BT_CONNECT) {
2341                         if (status != 0x0c || conn->attempt > 2) {
2342                                 conn->state = BT_CLOSED;
2343                                 hci_connect_cfm(conn, status);
2344                                 hci_conn_del(conn);
2345                         } else
2346                                 conn->state = BT_CONNECT2;
2347                 }
2348         } else {
2349                 if (!conn) {
2350                         conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2351                                             HCI_ROLE_MASTER);
2352                         if (!conn)
2353                                 bt_dev_err(hdev, "no memory for new connection");
2354                 }
2355         }
2356
2357         hci_dev_unlock(hdev);
2358 }
2359
2360 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2361 {
2362         struct hci_cp_add_sco *cp;
2363         struct hci_conn *acl, *sco;
2364         __u16 handle;
2365
2366         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2367
2368         if (!status)
2369                 return;
2370
2371         cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2372         if (!cp)
2373                 return;
2374
2375         handle = __le16_to_cpu(cp->handle);
2376
2377         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2378
2379         hci_dev_lock(hdev);
2380
2381         acl = hci_conn_hash_lookup_handle(hdev, handle);
2382         if (acl) {
2383                 sco = acl->link;
2384                 if (sco) {
2385                         sco->state = BT_CLOSED;
2386
2387                         hci_connect_cfm(sco, status);
2388                         hci_conn_del(sco);
2389                 }
2390         }
2391
2392         hci_dev_unlock(hdev);
2393 }
2394
2395 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2396 {
2397         struct hci_cp_auth_requested *cp;
2398         struct hci_conn *conn;
2399
2400         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2401
2402         if (!status)
2403                 return;
2404
2405         cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2406         if (!cp)
2407                 return;
2408
2409         hci_dev_lock(hdev);
2410
2411         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2412         if (conn) {
2413                 if (conn->state == BT_CONFIG) {
2414                         hci_connect_cfm(conn, status);
2415                         hci_conn_drop(conn);
2416                 }
2417         }
2418
2419         hci_dev_unlock(hdev);
2420 }
2421
2422 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2423 {
2424         struct hci_cp_set_conn_encrypt *cp;
2425         struct hci_conn *conn;
2426
2427         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2428
2429         if (!status)
2430                 return;
2431
2432         cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2433         if (!cp)
2434                 return;
2435
2436         hci_dev_lock(hdev);
2437
2438         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2439         if (conn) {
2440                 if (conn->state == BT_CONFIG) {
2441                         hci_connect_cfm(conn, status);
2442                         hci_conn_drop(conn);
2443                 }
2444         }
2445
2446         hci_dev_unlock(hdev);
2447 }
2448
2449 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2450                                     struct hci_conn *conn)
2451 {
2452         if (conn->state != BT_CONFIG || !conn->out)
2453                 return 0;
2454
2455         if (conn->pending_sec_level == BT_SECURITY_SDP)
2456                 return 0;
2457
2458         /* Only request authentication for SSP connections or non-SSP
2459          * devices with sec_level MEDIUM or HIGH or if MITM protection
2460          * is requested.
2461          */
2462         if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2463             conn->pending_sec_level != BT_SECURITY_FIPS &&
2464             conn->pending_sec_level != BT_SECURITY_HIGH &&
2465             conn->pending_sec_level != BT_SECURITY_MEDIUM)
2466                 return 0;
2467
2468         return 1;
2469 }
2470
2471 static int hci_resolve_name(struct hci_dev *hdev,
2472                                    struct inquiry_entry *e)
2473 {
2474         struct hci_cp_remote_name_req cp;
2475
2476         memset(&cp, 0, sizeof(cp));
2477
2478         bacpy(&cp.bdaddr, &e->data.bdaddr);
2479         cp.pscan_rep_mode = e->data.pscan_rep_mode;
2480         cp.pscan_mode = e->data.pscan_mode;
2481         cp.clock_offset = e->data.clock_offset;
2482
2483         return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2484 }
2485
2486 static bool hci_resolve_next_name(struct hci_dev *hdev)
2487 {
2488         struct discovery_state *discov = &hdev->discovery;
2489         struct inquiry_entry *e;
2490
2491         if (list_empty(&discov->resolve))
2492                 return false;
2493
2494         /* We should stop if we already spent too much time resolving names. */
2495         if (time_after(jiffies, discov->name_resolve_timeout)) {
2496                 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2497                 return false;
2498         }
2499
2500         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2501         if (!e)
2502                 return false;
2503
2504         if (hci_resolve_name(hdev, e) == 0) {
2505                 e->name_state = NAME_PENDING;
2506                 return true;
2507         }
2508
2509         return false;
2510 }
2511
2512 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2513                                    bdaddr_t *bdaddr, u8 *name, u8 name_len)
2514 {
2515         struct discovery_state *discov = &hdev->discovery;
2516         struct inquiry_entry *e;
2517
2518         /* Update the mgmt connected state if necessary. Be careful with
2519          * conn objects that exist but are not (yet) connected however.
2520          * Only those in BT_CONFIG or BT_CONNECTED states can be
2521          * considered connected.
2522          */
2523         if (conn &&
2524             (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2525             !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2526                 mgmt_device_connected(hdev, conn, name, name_len);
2527
2528         if (discov->state == DISCOVERY_STOPPED)
2529                 return;
2530
2531         if (discov->state == DISCOVERY_STOPPING)
2532                 goto discov_complete;
2533
2534         if (discov->state != DISCOVERY_RESOLVING)
2535                 return;
2536
2537         e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2538         /* If the device was not found in a list of found devices names of which
2539          * are pending. there is no need to continue resolving a next name as it
2540          * will be done upon receiving another Remote Name Request Complete
2541          * Event */
2542         if (!e)
2543                 return;
2544
2545         list_del(&e->list);
2546
2547         e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2548         mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2549                          name, name_len);
2550
2551         if (hci_resolve_next_name(hdev))
2552                 return;
2553
2554 discov_complete:
2555         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2556 }
2557
2558 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2559 {
2560         struct hci_cp_remote_name_req *cp;
2561         struct hci_conn *conn;
2562
2563         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2564
2565         /* If successful wait for the name req complete event before
2566          * checking for the need to do authentication */
2567         if (!status)
2568                 return;
2569
2570         cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2571         if (!cp)
2572                 return;
2573
2574         hci_dev_lock(hdev);
2575
2576         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2577
2578         if (hci_dev_test_flag(hdev, HCI_MGMT))
2579                 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2580
2581         if (!conn)
2582                 goto unlock;
2583
2584         if (!hci_outgoing_auth_needed(hdev, conn))
2585                 goto unlock;
2586
2587         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2588                 struct hci_cp_auth_requested auth_cp;
2589
2590                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2591
2592                 auth_cp.handle = __cpu_to_le16(conn->handle);
2593                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2594                              sizeof(auth_cp), &auth_cp);
2595         }
2596
2597 unlock:
2598         hci_dev_unlock(hdev);
2599 }
2600
2601 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2602 {
2603         struct hci_cp_read_remote_features *cp;
2604         struct hci_conn *conn;
2605
2606         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2607
2608         if (!status)
2609                 return;
2610
2611         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2612         if (!cp)
2613                 return;
2614
2615         hci_dev_lock(hdev);
2616
2617         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2618         if (conn) {
2619                 if (conn->state == BT_CONFIG) {
2620                         hci_connect_cfm(conn, status);
2621                         hci_conn_drop(conn);
2622                 }
2623         }
2624
2625         hci_dev_unlock(hdev);
2626 }
2627
2628 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2629 {
2630         struct hci_cp_read_remote_ext_features *cp;
2631         struct hci_conn *conn;
2632
2633         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2634
2635         if (!status)
2636                 return;
2637
2638         cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2639         if (!cp)
2640                 return;
2641
2642         hci_dev_lock(hdev);
2643
2644         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2645         if (conn) {
2646                 if (conn->state == BT_CONFIG) {
2647                         hci_connect_cfm(conn, status);
2648                         hci_conn_drop(conn);
2649                 }
2650         }
2651
2652         hci_dev_unlock(hdev);
2653 }
2654
2655 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2656 {
2657         struct hci_cp_setup_sync_conn *cp;
2658         struct hci_conn *acl, *sco;
2659         __u16 handle;
2660
2661         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2662
2663         if (!status)
2664                 return;
2665
2666         cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2667         if (!cp)
2668                 return;
2669
2670         handle = __le16_to_cpu(cp->handle);
2671
2672         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2673
2674         hci_dev_lock(hdev);
2675
2676         acl = hci_conn_hash_lookup_handle(hdev, handle);
2677         if (acl) {
2678                 sco = acl->link;
2679                 if (sco) {
2680                         sco->state = BT_CLOSED;
2681
2682                         hci_connect_cfm(sco, status);
2683                         hci_conn_del(sco);
2684                 }
2685         }
2686
2687         hci_dev_unlock(hdev);
2688 }
2689
2690 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2691 {
2692         struct hci_cp_enhanced_setup_sync_conn *cp;
2693         struct hci_conn *acl, *sco;
2694         __u16 handle;
2695
2696         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2697
2698         if (!status)
2699                 return;
2700
2701         cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2702         if (!cp)
2703                 return;
2704
2705         handle = __le16_to_cpu(cp->handle);
2706
2707         bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2708
2709         hci_dev_lock(hdev);
2710
2711         acl = hci_conn_hash_lookup_handle(hdev, handle);
2712         if (acl) {
2713                 sco = acl->link;
2714                 if (sco) {
2715                         sco->state = BT_CLOSED;
2716
2717                         hci_connect_cfm(sco, status);
2718                         hci_conn_del(sco);
2719                 }
2720         }
2721
2722         hci_dev_unlock(hdev);
2723 }
2724
2725 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2726 {
2727         struct hci_cp_sniff_mode *cp;
2728         struct hci_conn *conn;
2729
2730         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2731
2732         if (!status)
2733                 return;
2734
2735         cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2736         if (!cp)
2737                 return;
2738
2739         hci_dev_lock(hdev);
2740
2741         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2742         if (conn) {
2743                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2744
2745                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2746                         hci_sco_setup(conn, status);
2747         }
2748
2749         hci_dev_unlock(hdev);
2750 }
2751
2752 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2753 {
2754         struct hci_cp_exit_sniff_mode *cp;
2755         struct hci_conn *conn;
2756
2757         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2758
2759         if (!status)
2760                 return;
2761
2762         cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2763         if (!cp)
2764                 return;
2765
2766         hci_dev_lock(hdev);
2767
2768         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2769         if (conn) {
2770                 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2771
2772                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2773                         hci_sco_setup(conn, status);
2774         }
2775
2776         hci_dev_unlock(hdev);
2777 }
2778
2779 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2780 {
2781         struct hci_cp_disconnect *cp;
2782         struct hci_conn_params *params;
2783         struct hci_conn *conn;
2784         bool mgmt_conn;
2785
2786         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2787
2788         /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2789          * otherwise cleanup the connection immediately.
2790          */
2791         if (!status && !hdev->suspended)
2792                 return;
2793
2794         cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2795         if (!cp)
2796                 return;
2797
2798         hci_dev_lock(hdev);
2799
2800         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2801         if (!conn)
2802                 goto unlock;
2803
2804         if (status) {
2805                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2806                                        conn->dst_type, status);
2807
2808                 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2809                         hdev->cur_adv_instance = conn->adv_instance;
2810                         hci_enable_advertising(hdev);
2811                 }
2812
2813                 /* Inform sockets conn is gone before we delete it */
2814                 hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2815
2816                 goto done;
2817         }
2818
2819         mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2820
2821         if (conn->type == ACL_LINK) {
2822                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2823                         hci_remove_link_key(hdev, &conn->dst);
2824         }
2825
2826         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2827         if (params) {
2828                 switch (params->auto_connect) {
2829                 case HCI_AUTO_CONN_LINK_LOSS:
2830                         if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2831                                 break;
2832                         fallthrough;
2833
2834                 case HCI_AUTO_CONN_DIRECT:
2835                 case HCI_AUTO_CONN_ALWAYS:
2836                         hci_pend_le_list_del_init(params);
2837                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
2838                         break;
2839
2840                 default:
2841                         break;
2842                 }
2843         }
2844
2845         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2846                                  cp->reason, mgmt_conn);
2847
2848         hci_disconn_cfm(conn, cp->reason);
2849
2850 done:
2851         /* If the disconnection failed for any reason, the upper layer
2852          * does not retry to disconnect in current implementation.
2853          * Hence, we need to do some basic cleanup here and re-enable
2854          * advertising if necessary.
2855          */
2856         hci_conn_del(conn);
2857 unlock:
2858         hci_dev_unlock(hdev);
2859 }
2860
2861 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2862 {
2863         /* When using controller based address resolution, then the new
2864          * address types 0x02 and 0x03 are used. These types need to be
2865          * converted back into either public address or random address type
2866          */
2867         switch (type) {
2868         case ADDR_LE_DEV_PUBLIC_RESOLVED:
2869                 if (resolved)
2870                         *resolved = true;
2871                 return ADDR_LE_DEV_PUBLIC;
2872         case ADDR_LE_DEV_RANDOM_RESOLVED:
2873                 if (resolved)
2874                         *resolved = true;
2875                 return ADDR_LE_DEV_RANDOM;
2876         }
2877
2878         if (resolved)
2879                 *resolved = false;
2880         return type;
2881 }
2882
2883 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2884                               u8 peer_addr_type, u8 own_address_type,
2885                               u8 filter_policy)
2886 {
2887         struct hci_conn *conn;
2888
2889         conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2890                                        peer_addr_type);
2891         if (!conn)
2892                 return;
2893
2894         own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2895
2896         /* Store the initiator and responder address information which
2897          * is needed for SMP. These values will not change during the
2898          * lifetime of the connection.
2899          */
2900         conn->init_addr_type = own_address_type;
2901         if (own_address_type == ADDR_LE_DEV_RANDOM)
2902                 bacpy(&conn->init_addr, &hdev->random_addr);
2903         else
2904                 bacpy(&conn->init_addr, &hdev->bdaddr);
2905
2906         conn->resp_addr_type = peer_addr_type;
2907         bacpy(&conn->resp_addr, peer_addr);
2908 }
2909
2910 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2911 {
2912         struct hci_cp_le_create_conn *cp;
2913
2914         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2915
2916         /* All connection failure handling is taken care of by the
2917          * hci_conn_failed function which is triggered by the HCI
2918          * request completion callbacks used for connecting.
2919          */
2920         if (status)
2921                 return;
2922
2923         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2924         if (!cp)
2925                 return;
2926
2927         hci_dev_lock(hdev);
2928
2929         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2930                           cp->own_address_type, cp->filter_policy);
2931
2932         hci_dev_unlock(hdev);
2933 }
2934
2935 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2936 {
2937         struct hci_cp_le_ext_create_conn *cp;
2938
2939         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2940
2941         /* All connection failure handling is taken care of by the
2942          * hci_conn_failed function which is triggered by the HCI
2943          * request completion callbacks used for connecting.
2944          */
2945         if (status)
2946                 return;
2947
2948         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2949         if (!cp)
2950                 return;
2951
2952         hci_dev_lock(hdev);
2953
2954         cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2955                           cp->own_addr_type, cp->filter_policy);
2956
2957         hci_dev_unlock(hdev);
2958 }
2959
2960 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2961 {
2962         struct hci_cp_le_read_remote_features *cp;
2963         struct hci_conn *conn;
2964
2965         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2966
2967         if (!status)
2968                 return;
2969
2970         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2971         if (!cp)
2972                 return;
2973
2974         hci_dev_lock(hdev);
2975
2976         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2977         if (conn) {
2978                 if (conn->state == BT_CONFIG) {
2979                         hci_connect_cfm(conn, status);
2980                         hci_conn_drop(conn);
2981                 }
2982         }
2983
2984         hci_dev_unlock(hdev);
2985 }
2986
2987 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2988 {
2989         struct hci_cp_le_start_enc *cp;
2990         struct hci_conn *conn;
2991
2992         bt_dev_dbg(hdev, "status 0x%2.2x", status);
2993
2994         if (!status)
2995                 return;
2996
2997         hci_dev_lock(hdev);
2998
2999         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3000         if (!cp)
3001                 goto unlock;
3002
3003         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3004         if (!conn)
3005                 goto unlock;
3006
3007         if (conn->state != BT_CONNECTED)
3008                 goto unlock;
3009
3010         hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3011         hci_conn_drop(conn);
3012
3013 unlock:
3014         hci_dev_unlock(hdev);
3015 }
3016
3017 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3018 {
3019         struct hci_cp_switch_role *cp;
3020         struct hci_conn *conn;
3021
3022         BT_DBG("%s status 0x%2.2x", hdev->name, status);
3023
3024         if (!status)
3025                 return;
3026
3027         cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3028         if (!cp)
3029                 return;
3030
3031         hci_dev_lock(hdev);
3032
3033         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3034         if (conn)
3035                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3036
3037         hci_dev_unlock(hdev);
3038 }
3039
3040 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3041                                      struct sk_buff *skb)
3042 {
3043         struct hci_ev_status *ev = data;
3044         struct discovery_state *discov = &hdev->discovery;
3045         struct inquiry_entry *e;
3046
3047         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3048
3049         hci_conn_check_pending(hdev);
3050
3051         if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3052                 return;
3053
3054         smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3055         wake_up_bit(&hdev->flags, HCI_INQUIRY);
3056
3057         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3058                 return;
3059
3060         hci_dev_lock(hdev);
3061
3062         if (discov->state != DISCOVERY_FINDING)
3063                 goto unlock;
3064
3065         if (list_empty(&discov->resolve)) {
3066                 /* When BR/EDR inquiry is active and no LE scanning is in
3067                  * progress, then change discovery state to indicate completion.
3068                  *
3069                  * When running LE scanning and BR/EDR inquiry simultaneously
3070                  * and the LE scan already finished, then change the discovery
3071                  * state to indicate completion.
3072                  */
3073                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3074                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3075                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3076                 goto unlock;
3077         }
3078
3079         e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3080         if (e && hci_resolve_name(hdev, e) == 0) {
3081                 e->name_state = NAME_PENDING;
3082                 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3083                 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3084         } else {
3085                 /* When BR/EDR inquiry is active and no LE scanning is in
3086                  * progress, then change discovery state to indicate completion.
3087                  *
3088                  * When running LE scanning and BR/EDR inquiry simultaneously
3089                  * and the LE scan already finished, then change the discovery
3090                  * state to indicate completion.
3091                  */
3092                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3093                     !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3094                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3095         }
3096
3097 unlock:
3098         hci_dev_unlock(hdev);
3099 }
3100
3101 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3102                                    struct sk_buff *skb)
3103 {
3104         struct hci_ev_inquiry_result *ev = edata;
3105         struct inquiry_data data;
3106         int i;
3107
3108         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3109                              flex_array_size(ev, info, ev->num)))
3110                 return;
3111
3112         bt_dev_dbg(hdev, "num %d", ev->num);
3113
3114         if (!ev->num)
3115                 return;
3116
3117         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3118                 return;
3119
3120         hci_dev_lock(hdev);
3121
3122         for (i = 0; i < ev->num; i++) {
3123                 struct inquiry_info *info = &ev->info[i];
3124                 u32 flags;
3125
3126                 bacpy(&data.bdaddr, &info->bdaddr);
3127                 data.pscan_rep_mode     = info->pscan_rep_mode;
3128                 data.pscan_period_mode  = info->pscan_period_mode;
3129                 data.pscan_mode         = info->pscan_mode;
3130                 memcpy(data.dev_class, info->dev_class, 3);
3131                 data.clock_offset       = info->clock_offset;
3132                 data.rssi               = HCI_RSSI_INVALID;
3133                 data.ssp_mode           = 0x00;
3134
3135                 flags = hci_inquiry_cache_update(hdev, &data, false);
3136
3137                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3138                                   info->dev_class, HCI_RSSI_INVALID,
3139                                   flags, NULL, 0, NULL, 0, 0);
3140         }
3141
3142         hci_dev_unlock(hdev);
3143 }
3144
3145 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3146                                   struct sk_buff *skb)
3147 {
3148         struct hci_ev_conn_complete *ev = data;
3149         struct hci_conn *conn;
3150         u8 status = ev->status;
3151
3152         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3153
3154         hci_dev_lock(hdev);
3155
3156         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3157         if (!conn) {
3158                 /* In case of error status and there is no connection pending
3159                  * just unlock as there is nothing to cleanup.
3160                  */
3161                 if (ev->status)
3162                         goto unlock;
3163
3164                 /* Connection may not exist if auto-connected. Check the bredr
3165                  * allowlist to see if this device is allowed to auto connect.
3166                  * If link is an ACL type, create a connection class
3167                  * automatically.
3168                  *
3169                  * Auto-connect will only occur if the event filter is
3170                  * programmed with a given address. Right now, event filter is
3171                  * only used during suspend.
3172                  */
3173                 if (ev->link_type == ACL_LINK &&
3174                     hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3175                                                       &ev->bdaddr,
3176                                                       BDADDR_BREDR)) {
3177                         conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3178                                             HCI_ROLE_SLAVE);
3179                         if (!conn) {
3180                                 bt_dev_err(hdev, "no memory for new conn");
3181                                 goto unlock;
3182                         }
3183                 } else {
3184                         if (ev->link_type != SCO_LINK)
3185                                 goto unlock;
3186
3187                         conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3188                                                        &ev->bdaddr);
3189                         if (!conn)
3190                                 goto unlock;
3191
3192                         conn->type = SCO_LINK;
3193                 }
3194         }
3195
3196         /* The HCI_Connection_Complete event is only sent once per connection.
3197          * Processing it more than once per connection can corrupt kernel memory.
3198          *
3199          * As the connection handle is set here for the first time, it indicates
3200          * whether the connection is already set up.
3201          */
3202         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3203                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3204                 goto unlock;
3205         }
3206
3207         if (!status) {
3208                 conn->handle = __le16_to_cpu(ev->handle);
3209                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3210                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3211                                    conn->handle, HCI_CONN_HANDLE_MAX);
3212                         status = HCI_ERROR_INVALID_PARAMETERS;
3213                         goto done;
3214                 }
3215
3216                 if (conn->type == ACL_LINK) {
3217                         conn->state = BT_CONFIG;
3218                         hci_conn_hold(conn);
3219
3220                         if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3221                             !hci_find_link_key(hdev, &ev->bdaddr))
3222                                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3223                         else
3224                                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3225                 } else
3226                         conn->state = BT_CONNECTED;
3227
3228                 hci_debugfs_create_conn(conn);
3229                 hci_conn_add_sysfs(conn);
3230
3231                 if (test_bit(HCI_AUTH, &hdev->flags))
3232                         set_bit(HCI_CONN_AUTH, &conn->flags);
3233
3234                 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3235                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3236
3237                 /* "Link key request" completed ahead of "connect request" completes */
3238                 if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3239                     ev->link_type == ACL_LINK) {
3240                         struct link_key *key;
3241                         struct hci_cp_read_enc_key_size cp;
3242
3243                         key = hci_find_link_key(hdev, &ev->bdaddr);
3244                         if (key) {
3245                                 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3246
3247                                 if (!(hdev->commands[20] & 0x10)) {
3248                                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3249                                 } else {
3250                                         cp.handle = cpu_to_le16(conn->handle);
3251                                         if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3252                                                          sizeof(cp), &cp)) {
3253                                                 bt_dev_err(hdev, "sending read key size failed");
3254                                                 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3255                                         }
3256                                 }
3257
3258                                 hci_encrypt_cfm(conn, ev->status);
3259                         }
3260                 }
3261
3262                 /* Get remote features */
3263                 if (conn->type == ACL_LINK) {
3264                         struct hci_cp_read_remote_features cp;
3265                         cp.handle = ev->handle;
3266                         hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3267                                      sizeof(cp), &cp);
3268
3269                         hci_update_scan(hdev);
3270                 }
3271
3272                 /* Set packet type for incoming connection */
3273                 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3274                         struct hci_cp_change_conn_ptype cp;
3275                         cp.handle = ev->handle;
3276                         cp.pkt_type = cpu_to_le16(conn->pkt_type);
3277                         hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3278                                      &cp);
3279                 }
3280         }
3281
3282         if (conn->type == ACL_LINK)
3283                 hci_sco_setup(conn, ev->status);
3284
3285 done:
3286         if (status) {
3287                 hci_conn_failed(conn, status);
3288         } else if (ev->link_type == SCO_LINK) {
3289                 switch (conn->setting & SCO_AIRMODE_MASK) {
3290                 case SCO_AIRMODE_CVSD:
3291                         if (hdev->notify)
3292                                 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3293                         break;
3294                 }
3295
3296                 hci_connect_cfm(conn, status);
3297         }
3298
3299 unlock:
3300         hci_dev_unlock(hdev);
3301
3302         hci_conn_check_pending(hdev);
3303 }
3304
3305 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3306 {
3307         struct hci_cp_reject_conn_req cp;
3308
3309         bacpy(&cp.bdaddr, bdaddr);
3310         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3311         hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3312 }
3313
3314 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3315                                  struct sk_buff *skb)
3316 {
3317         struct hci_ev_conn_request *ev = data;
3318         int mask = hdev->link_mode;
3319         struct inquiry_entry *ie;
3320         struct hci_conn *conn;
3321         __u8 flags = 0;
3322
3323         bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3324
3325         /* Reject incoming connection from device with same BD ADDR against
3326          * CVE-2020-26555
3327          */
3328         if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3329                 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3330                            &ev->bdaddr);
3331                 hci_reject_conn(hdev, &ev->bdaddr);
3332                 return;
3333         }
3334
3335         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3336                                       &flags);
3337
3338         if (!(mask & HCI_LM_ACCEPT)) {
3339                 hci_reject_conn(hdev, &ev->bdaddr);
3340                 return;
3341         }
3342
3343         hci_dev_lock(hdev);
3344
3345         if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3346                                    BDADDR_BREDR)) {
3347                 hci_reject_conn(hdev, &ev->bdaddr);
3348                 goto unlock;
3349         }
3350
3351         /* Require HCI_CONNECTABLE or an accept list entry to accept the
3352          * connection. These features are only touched through mgmt so
3353          * only do the checks if HCI_MGMT is set.
3354          */
3355         if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3356             !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3357             !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3358                                                BDADDR_BREDR)) {
3359                 hci_reject_conn(hdev, &ev->bdaddr);
3360                 goto unlock;
3361         }
3362
3363         /* Connection accepted */
3364
3365         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3366         if (ie)
3367                 memcpy(ie->data.dev_class, ev->dev_class, 3);
3368
3369         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3370                         &ev->bdaddr);
3371         if (!conn) {
3372                 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3373                                     HCI_ROLE_SLAVE);
3374                 if (!conn) {
3375                         bt_dev_err(hdev, "no memory for new connection");
3376                         goto unlock;
3377                 }
3378         }
3379
3380         memcpy(conn->dev_class, ev->dev_class, 3);
3381
3382         hci_dev_unlock(hdev);
3383
3384         if (ev->link_type == ACL_LINK ||
3385             (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3386                 struct hci_cp_accept_conn_req cp;
3387                 conn->state = BT_CONNECT;
3388
3389                 bacpy(&cp.bdaddr, &ev->bdaddr);
3390
3391                 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3392                         cp.role = 0x00; /* Become central */
3393                 else
3394                         cp.role = 0x01; /* Remain peripheral */
3395
3396                 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3397         } else if (!(flags & HCI_PROTO_DEFER)) {
3398                 struct hci_cp_accept_sync_conn_req cp;
3399                 conn->state = BT_CONNECT;
3400
3401                 bacpy(&cp.bdaddr, &ev->bdaddr);
3402                 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3403
3404                 cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3405                 cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3406                 cp.max_latency    = cpu_to_le16(0xffff);
3407                 cp.content_format = cpu_to_le16(hdev->voice_setting);
3408                 cp.retrans_effort = 0xff;
3409
3410                 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3411                              &cp);
3412         } else {
3413                 conn->state = BT_CONNECT2;
3414                 hci_connect_cfm(conn, 0);
3415         }
3416
3417         return;
3418 unlock:
3419         hci_dev_unlock(hdev);
3420 }
3421
3422 static u8 hci_to_mgmt_reason(u8 err)
3423 {
3424         switch (err) {
3425         case HCI_ERROR_CONNECTION_TIMEOUT:
3426                 return MGMT_DEV_DISCONN_TIMEOUT;
3427         case HCI_ERROR_REMOTE_USER_TERM:
3428         case HCI_ERROR_REMOTE_LOW_RESOURCES:
3429         case HCI_ERROR_REMOTE_POWER_OFF:
3430                 return MGMT_DEV_DISCONN_REMOTE;
3431         case HCI_ERROR_LOCAL_HOST_TERM:
3432                 return MGMT_DEV_DISCONN_LOCAL_HOST;
3433         default:
3434                 return MGMT_DEV_DISCONN_UNKNOWN;
3435         }
3436 }
3437
3438 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3439                                      struct sk_buff *skb)
3440 {
3441         struct hci_ev_disconn_complete *ev = data;
3442         u8 reason;
3443         struct hci_conn_params *params;
3444         struct hci_conn *conn;
3445         bool mgmt_connected;
3446
3447         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3448
3449         hci_dev_lock(hdev);
3450
3451         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3452         if (!conn)
3453                 goto unlock;
3454
3455         if (ev->status) {
3456                 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3457                                        conn->dst_type, ev->status);
3458                 goto unlock;
3459         }
3460
3461         conn->state = BT_CLOSED;
3462
3463         mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3464
3465         if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3466                 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3467         else
3468                 reason = hci_to_mgmt_reason(ev->reason);
3469
3470         mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3471                                 reason, mgmt_connected);
3472
3473         if (conn->type == ACL_LINK) {
3474                 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3475                         hci_remove_link_key(hdev, &conn->dst);
3476
3477                 hci_update_scan(hdev);
3478         }
3479
3480         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3481         if (params) {
3482                 switch (params->auto_connect) {
3483                 case HCI_AUTO_CONN_LINK_LOSS:
3484                         if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3485                                 break;
3486                         fallthrough;
3487
3488                 case HCI_AUTO_CONN_DIRECT:
3489                 case HCI_AUTO_CONN_ALWAYS:
3490                         hci_pend_le_list_del_init(params);
3491                         hci_pend_le_list_add(params, &hdev->pend_le_conns);
3492                         hci_update_passive_scan(hdev);
3493                         break;
3494
3495                 default:
3496                         break;
3497                 }
3498         }
3499
3500         hci_disconn_cfm(conn, ev->reason);
3501
3502         /* Re-enable advertising if necessary, since it might
3503          * have been disabled by the connection. From the
3504          * HCI_LE_Set_Advertise_Enable command description in
3505          * the core specification (v4.0):
3506          * "The Controller shall continue advertising until the Host
3507          * issues an LE_Set_Advertise_Enable command with
3508          * Advertising_Enable set to 0x00 (Advertising is disabled)
3509          * or until a connection is created or until the Advertising
3510          * is timed out due to Directed Advertising."
3511          */
3512         if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3513                 hdev->cur_adv_instance = conn->adv_instance;
3514                 hci_enable_advertising(hdev);
3515         }
3516
3517         hci_conn_del(conn);
3518
3519 unlock:
3520         hci_dev_unlock(hdev);
3521 }
3522
3523 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3524                                   struct sk_buff *skb)
3525 {
3526         struct hci_ev_auth_complete *ev = data;
3527         struct hci_conn *conn;
3528
3529         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3530
3531         hci_dev_lock(hdev);
3532
3533         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3534         if (!conn)
3535                 goto unlock;
3536
3537         if (!ev->status) {
3538                 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3539                 set_bit(HCI_CONN_AUTH, &conn->flags);
3540                 conn->sec_level = conn->pending_sec_level;
3541         } else {
3542                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3543                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3544
3545                 mgmt_auth_failed(conn, ev->status);
3546         }
3547
3548         clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3549
3550         if (conn->state == BT_CONFIG) {
3551                 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3552                         struct hci_cp_set_conn_encrypt cp;
3553                         cp.handle  = ev->handle;
3554                         cp.encrypt = 0x01;
3555                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3556                                      &cp);
3557                 } else {
3558                         conn->state = BT_CONNECTED;
3559                         hci_connect_cfm(conn, ev->status);
3560                         hci_conn_drop(conn);
3561                 }
3562         } else {
3563                 hci_auth_cfm(conn, ev->status);
3564
3565                 hci_conn_hold(conn);
3566                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3567                 hci_conn_drop(conn);
3568         }
3569
3570         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3571                 if (!ev->status) {
3572                         struct hci_cp_set_conn_encrypt cp;
3573                         cp.handle  = ev->handle;
3574                         cp.encrypt = 0x01;
3575                         hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3576                                      &cp);
3577                 } else {
3578                         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3579                         hci_encrypt_cfm(conn, ev->status);
3580                 }
3581         }
3582
3583 unlock:
3584         hci_dev_unlock(hdev);
3585 }
3586
3587 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3588                                 struct sk_buff *skb)
3589 {
3590         struct hci_ev_remote_name *ev = data;
3591         struct hci_conn *conn;
3592
3593         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3594
3595         hci_dev_lock(hdev);
3596
3597         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3598
3599         if (!hci_dev_test_flag(hdev, HCI_MGMT))
3600                 goto check_auth;
3601
3602         if (ev->status == 0)
3603                 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3604                                        strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3605         else
3606                 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3607
3608 check_auth:
3609         if (!conn)
3610                 goto unlock;
3611
3612         if (!hci_outgoing_auth_needed(hdev, conn))
3613                 goto unlock;
3614
3615         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3616                 struct hci_cp_auth_requested cp;
3617
3618                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3619
3620                 cp.handle = __cpu_to_le16(conn->handle);
3621                 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3622         }
3623
3624 unlock:
3625         hci_dev_unlock(hdev);
3626 }
3627
3628 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3629                                    struct sk_buff *skb)
3630 {
3631         struct hci_ev_encrypt_change *ev = data;
3632         struct hci_conn *conn;
3633
3634         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3635
3636         hci_dev_lock(hdev);
3637
3638         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3639         if (!conn)
3640                 goto unlock;
3641
3642         if (!ev->status) {
3643                 if (ev->encrypt) {
3644                         /* Encryption implies authentication */
3645                         set_bit(HCI_CONN_AUTH, &conn->flags);
3646                         set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3647                         conn->sec_level = conn->pending_sec_level;
3648
3649                         /* P-256 authentication key implies FIPS */
3650                         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3651                                 set_bit(HCI_CONN_FIPS, &conn->flags);
3652
3653                         if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3654                             conn->type == LE_LINK)
3655                                 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3656                 } else {
3657                         clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3658                         clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3659                 }
3660         }
3661
3662         /* We should disregard the current RPA and generate a new one
3663          * whenever the encryption procedure fails.
3664          */
3665         if (ev->status && conn->type == LE_LINK) {
3666                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3667                 hci_adv_instances_set_rpa_expired(hdev, true);
3668         }
3669
3670         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3671
3672         /* Check link security requirements are met */
3673         if (!hci_conn_check_link_mode(conn))
3674                 ev->status = HCI_ERROR_AUTH_FAILURE;
3675
3676         if (ev->status && conn->state == BT_CONNECTED) {
3677                 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3678                         set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3679
3680                 /* Notify upper layers so they can cleanup before
3681                  * disconnecting.
3682                  */
3683                 hci_encrypt_cfm(conn, ev->status);
3684                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3685                 hci_conn_drop(conn);
3686                 goto unlock;
3687         }
3688
3689         /* Try reading the encryption key size for encrypted ACL links */
3690         if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3691                 struct hci_cp_read_enc_key_size cp;
3692
3693                 /* Only send HCI_Read_Encryption_Key_Size if the
3694                  * controller really supports it. If it doesn't, assume
3695                  * the default size (16).
3696                  */
3697                 if (!(hdev->commands[20] & 0x10)) {
3698                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3699                         goto notify;
3700                 }
3701
3702                 cp.handle = cpu_to_le16(conn->handle);
3703                 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3704                                  sizeof(cp), &cp)) {
3705                         bt_dev_err(hdev, "sending read key size failed");
3706                         conn->enc_key_size = HCI_LINK_KEY_SIZE;
3707                         goto notify;
3708                 }
3709
3710                 goto unlock;
3711         }
3712
3713         /* Set the default Authenticated Payload Timeout after
3714          * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3715          * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3716          * sent when the link is active and Encryption is enabled, the conn
3717          * type can be either LE or ACL and controller must support LMP Ping.
3718          * Ensure for AES-CCM encryption as well.
3719          */
3720         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3721             test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3722             ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3723              (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3724                 struct hci_cp_write_auth_payload_to cp;
3725
3726                 cp.handle = cpu_to_le16(conn->handle);
3727                 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3728                 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3729                              sizeof(cp), &cp);
3730         }
3731
3732 notify:
3733         hci_encrypt_cfm(conn, ev->status);
3734
3735 unlock:
3736         hci_dev_unlock(hdev);
3737 }
3738
3739 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3740                                              struct sk_buff *skb)
3741 {
3742         struct hci_ev_change_link_key_complete *ev = data;
3743         struct hci_conn *conn;
3744
3745         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3746
3747         hci_dev_lock(hdev);
3748
3749         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3750         if (conn) {
3751                 if (!ev->status)
3752                         set_bit(HCI_CONN_SECURE, &conn->flags);
3753
3754                 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3755
3756                 hci_key_change_cfm(conn, ev->status);
3757         }
3758
3759         hci_dev_unlock(hdev);
3760 }
3761
3762 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3763                                     struct sk_buff *skb)
3764 {
3765         struct hci_ev_remote_features *ev = data;
3766         struct hci_conn *conn;
3767
3768         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3769
3770         hci_dev_lock(hdev);
3771
3772         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3773         if (!conn)
3774                 goto unlock;
3775
3776         if (!ev->status)
3777                 memcpy(conn->features[0], ev->features, 8);
3778
3779         if (conn->state != BT_CONFIG)
3780                 goto unlock;
3781
3782         if (!ev->status && lmp_ext_feat_capable(hdev) &&
3783             lmp_ext_feat_capable(conn)) {
3784                 struct hci_cp_read_remote_ext_features cp;
3785                 cp.handle = ev->handle;
3786                 cp.page = 0x01;
3787                 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3788                              sizeof(cp), &cp);
3789                 goto unlock;
3790         }
3791
3792         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3793                 struct hci_cp_remote_name_req cp;
3794                 memset(&cp, 0, sizeof(cp));
3795                 bacpy(&cp.bdaddr, &conn->dst);
3796                 cp.pscan_rep_mode = 0x02;
3797                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3798         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3799                 mgmt_device_connected(hdev, conn, NULL, 0);
3800
3801         if (!hci_outgoing_auth_needed(hdev, conn)) {
3802                 conn->state = BT_CONNECTED;
3803                 hci_connect_cfm(conn, ev->status);
3804                 hci_conn_drop(conn);
3805         }
3806
3807 unlock:
3808         hci_dev_unlock(hdev);
3809 }
3810
3811 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3812 {
3813         cancel_delayed_work(&hdev->cmd_timer);
3814
3815         rcu_read_lock();
3816         if (!test_bit(HCI_RESET, &hdev->flags)) {
3817                 if (ncmd) {
3818                         cancel_delayed_work(&hdev->ncmd_timer);
3819                         atomic_set(&hdev->cmd_cnt, 1);
3820                 } else {
3821                         if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3822                                 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3823                                                    HCI_NCMD_TIMEOUT);
3824                 }
3825         }
3826         rcu_read_unlock();
3827 }
3828
3829 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3830                                         struct sk_buff *skb)
3831 {
3832         struct hci_rp_le_read_buffer_size_v2 *rp = data;
3833
3834         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3835
3836         if (rp->status)
3837                 return rp->status;
3838
3839         hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3840         hdev->le_pkts  = rp->acl_max_pkt;
3841         hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3842         hdev->iso_pkts = rp->iso_max_pkt;
3843
3844         hdev->le_cnt  = hdev->le_pkts;
3845         hdev->iso_cnt = hdev->iso_pkts;
3846
3847         BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3848                hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3849
3850         return rp->status;
3851 }
3852
3853 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3854                                    struct sk_buff *skb)
3855 {
3856         struct hci_rp_le_set_cig_params *rp = data;
3857         struct hci_conn *conn;
3858         int i = 0;
3859
3860         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3861
3862         hci_dev_lock(hdev);
3863
3864         if (rp->status) {
3865                 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3866                         conn->state = BT_CLOSED;
3867                         hci_connect_cfm(conn, rp->status);
3868                         hci_conn_del(conn);
3869                 }
3870                 goto unlock;
3871         }
3872
3873         rcu_read_lock();
3874
3875         list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3876                 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3877                     conn->state == BT_CONNECTED)
3878                         continue;
3879
3880                 conn->handle = __le16_to_cpu(rp->handle[i++]);
3881
3882                 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3883                            conn->handle, conn->link);
3884
3885                 /* Create CIS if LE is already connected */
3886                 if (conn->link && conn->link->state == BT_CONNECTED) {
3887                         rcu_read_unlock();
3888                         hci_le_create_cis(conn->link);
3889                         rcu_read_lock();
3890                 }
3891
3892                 if (i == rp->num_handles)
3893                         break;
3894         }
3895
3896         rcu_read_unlock();
3897
3898 unlock:
3899         hci_dev_unlock(hdev);
3900
3901         return rp->status;
3902 }
3903
3904 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3905                                    struct sk_buff *skb)
3906 {
3907         struct hci_rp_le_setup_iso_path *rp = data;
3908         struct hci_cp_le_setup_iso_path *cp;
3909         struct hci_conn *conn;
3910
3911         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3912
3913         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3914         if (!cp)
3915                 return rp->status;
3916
3917         hci_dev_lock(hdev);
3918
3919         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3920         if (!conn)
3921                 goto unlock;
3922
3923         if (rp->status) {
3924                 hci_connect_cfm(conn, rp->status);
3925                 hci_conn_del(conn);
3926                 goto unlock;
3927         }
3928
3929         switch (cp->direction) {
3930         /* Input (Host to Controller) */
3931         case 0x00:
3932                 /* Only confirm connection if output only */
3933                 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3934                         hci_connect_cfm(conn, rp->status);
3935                 break;
3936         /* Output (Controller to Host) */
3937         case 0x01:
3938                 /* Confirm connection since conn->iso_qos is always configured
3939                  * last.
3940                  */
3941                 hci_connect_cfm(conn, rp->status);
3942                 break;
3943         }
3944
3945 unlock:
3946         hci_dev_unlock(hdev);
3947         return rp->status;
3948 }
3949
3950 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3951 {
3952         bt_dev_dbg(hdev, "status 0x%2.2x", status);
3953 }
3954
3955 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3956                                    struct sk_buff *skb)
3957 {
3958         struct hci_ev_status *rp = data;
3959         struct hci_cp_le_set_per_adv_params *cp;
3960
3961         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3962
3963         if (rp->status)
3964                 return rp->status;
3965
3966         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3967         if (!cp)
3968                 return rp->status;
3969
3970         /* TODO: set the conn state */
3971         return rp->status;
3972 }
3973
3974 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3975                                        struct sk_buff *skb)
3976 {
3977         struct hci_ev_status *rp = data;
3978         __u8 *sent;
3979
3980         bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3981
3982         if (rp->status)
3983                 return rp->status;
3984
3985         sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3986         if (!sent)
3987                 return rp->status;
3988
3989         hci_dev_lock(hdev);
3990
3991         if (*sent)
3992                 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3993         else
3994                 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3995
3996         hci_dev_unlock(hdev);
3997
3998         return rp->status;
3999 }
4000
4001 #define HCI_CC_VL(_op, _func, _min, _max) \
4002 { \
4003         .op = _op, \
4004         .func = _func, \
4005         .min_len = _min, \
4006         .max_len = _max, \
4007 }
4008
4009 #define HCI_CC(_op, _func, _len) \
4010         HCI_CC_VL(_op, _func, _len, _len)
4011
4012 #define HCI_CC_STATUS(_op, _func) \
4013         HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4014
4015 static const struct hci_cc {
4016         u16  op;
4017         u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4018         u16  min_len;
4019         u16  max_len;
4020 } hci_cc_table[] = {
4021         HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4022         HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4023         HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4024         HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4025                       hci_cc_remote_name_req_cancel),
4026         HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4027                sizeof(struct hci_rp_role_discovery)),
4028         HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4029                sizeof(struct hci_rp_read_link_policy)),
4030         HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4031                sizeof(struct hci_rp_write_link_policy)),
4032         HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4033                sizeof(struct hci_rp_read_def_link_policy)),
4034         HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4035                       hci_cc_write_def_link_policy),
4036         HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4037         HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4038                sizeof(struct hci_rp_read_stored_link_key)),
4039         HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4040                sizeof(struct hci_rp_delete_stored_link_key)),
4041         HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4042         HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4043                sizeof(struct hci_rp_read_local_name)),
4044         HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4045         HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4046         HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4047         HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4048         HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4049                sizeof(struct hci_rp_read_class_of_dev)),
4050         HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4051         HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4052                sizeof(struct hci_rp_read_voice_setting)),
4053         HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4054         HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4055                sizeof(struct hci_rp_read_num_supported_iac)),
4056         HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4057         HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4058         HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4059                sizeof(struct hci_rp_read_auth_payload_to)),
4060         HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4061                sizeof(struct hci_rp_write_auth_payload_to)),
4062         HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4063                sizeof(struct hci_rp_read_local_version)),
4064         HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4065                sizeof(struct hci_rp_read_local_commands)),
4066         HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4067                sizeof(struct hci_rp_read_local_features)),
4068         HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4069                sizeof(struct hci_rp_read_local_ext_features)),
4070         HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4071                sizeof(struct hci_rp_read_buffer_size)),
4072         HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4073                sizeof(struct hci_rp_read_bd_addr)),
4074         HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4075                sizeof(struct hci_rp_read_local_pairing_opts)),
4076         HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4077                sizeof(struct hci_rp_read_page_scan_activity)),
4078         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4079                       hci_cc_write_page_scan_activity),
4080         HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4081                sizeof(struct hci_rp_read_page_scan_type)),
4082         HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4083         HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4084                sizeof(struct hci_rp_read_data_block_size)),
4085         HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4086                sizeof(struct hci_rp_read_flow_control_mode)),
4087         HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4088                sizeof(struct hci_rp_read_local_amp_info)),
4089         HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4090                sizeof(struct hci_rp_read_clock)),
4091         HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4092                sizeof(struct hci_rp_read_enc_key_size)),
4093         HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4094                sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4095         HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4096                hci_cc_read_def_err_data_reporting,
4097                sizeof(struct hci_rp_read_def_err_data_reporting)),
4098         HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4099                       hci_cc_write_def_err_data_reporting),
4100         HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4101                sizeof(struct hci_rp_pin_code_reply)),
4102         HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4103                sizeof(struct hci_rp_pin_code_neg_reply)),
4104         HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4105                sizeof(struct hci_rp_read_local_oob_data)),
4106         HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4107                sizeof(struct hci_rp_read_local_oob_ext_data)),
4108         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4109                sizeof(struct hci_rp_le_read_buffer_size)),
4110         HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4111                sizeof(struct hci_rp_le_read_local_features)),
4112         HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4113                sizeof(struct hci_rp_le_read_adv_tx_power)),
4114         HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4115                sizeof(struct hci_rp_user_confirm_reply)),
4116         HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4117                sizeof(struct hci_rp_user_confirm_reply)),
4118         HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4119                sizeof(struct hci_rp_user_confirm_reply)),
4120         HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4121                sizeof(struct hci_rp_user_confirm_reply)),
4122         HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4123         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4124         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4125         HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4126         HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4127                hci_cc_le_read_accept_list_size,
4128                sizeof(struct hci_rp_le_read_accept_list_size)),
4129         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4130         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4131                       hci_cc_le_add_to_accept_list),
4132         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4133                       hci_cc_le_del_from_accept_list),
4134         HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4135                sizeof(struct hci_rp_le_read_supported_states)),
4136         HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4137                sizeof(struct hci_rp_le_read_def_data_len)),
4138         HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4139                       hci_cc_le_write_def_data_len),
4140         HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4141                       hci_cc_le_add_to_resolv_list),
4142         HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4143                       hci_cc_le_del_from_resolv_list),
4144         HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4145                       hci_cc_le_clear_resolv_list),
4146         HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4147                sizeof(struct hci_rp_le_read_resolv_list_size)),
4148         HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4149                       hci_cc_le_set_addr_resolution_enable),
4150         HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4151                sizeof(struct hci_rp_le_read_max_data_len)),
4152         HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4153                       hci_cc_write_le_host_supported),
4154         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4155         HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4156                sizeof(struct hci_rp_read_rssi)),
4157         HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4158                sizeof(struct hci_rp_read_tx_power)),
4159         HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4160         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4161                       hci_cc_le_set_ext_scan_param),
4162         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4163                       hci_cc_le_set_ext_scan_enable),
4164         HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4165         HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4166                hci_cc_le_read_num_adv_sets,
4167                sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4168         HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4169                sizeof(struct hci_rp_le_set_ext_adv_params)),
4170         HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4171                       hci_cc_le_set_ext_adv_enable),
4172         HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4173                       hci_cc_le_set_adv_set_random_addr),
4174         HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4175         HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4176         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4177         HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4178                       hci_cc_le_set_per_adv_enable),
4179         HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4180                sizeof(struct hci_rp_le_read_transmit_power)),
4181         HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4182         HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4183                sizeof(struct hci_rp_le_read_buffer_size_v2)),
4184         HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4185                   sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4186         HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4187                sizeof(struct hci_rp_le_setup_iso_path)),
4188 };
4189
4190 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4191                       struct sk_buff *skb)
4192 {
4193         void *data;
4194
4195         if (skb->len < cc->min_len) {
4196                 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4197                            cc->op, skb->len, cc->min_len);
4198                 return HCI_ERROR_UNSPECIFIED;
4199         }
4200
4201         /* Just warn if the length is over max_len size it still be possible to
4202          * partially parse the cc so leave to callback to decide if that is
4203          * acceptable.
4204          */
4205         if (skb->len > cc->max_len)
4206                 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4207                             cc->op, skb->len, cc->max_len);
4208
4209         data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4210         if (!data)
4211                 return HCI_ERROR_UNSPECIFIED;
4212
4213         return cc->func(hdev, data, skb);
4214 }
4215
4216 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4217                                  struct sk_buff *skb, u16 *opcode, u8 *status,
4218                                  hci_req_complete_t *req_complete,
4219                                  hci_req_complete_skb_t *req_complete_skb)
4220 {
4221         struct hci_ev_cmd_complete *ev = data;
4222         int i;
4223
4224         *opcode = __le16_to_cpu(ev->opcode);
4225
4226         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4227
4228         for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4229                 if (hci_cc_table[i].op == *opcode) {
4230                         *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4231                         break;
4232                 }
4233         }
4234
4235         if (i == ARRAY_SIZE(hci_cc_table)) {
4236                 /* Unknown opcode, assume byte 0 contains the status, so
4237                  * that e.g. __hci_cmd_sync() properly returns errors
4238                  * for vendor specific commands send by HCI drivers.
4239                  * If a vendor doesn't actually follow this convention we may
4240                  * need to introduce a vendor CC table in order to properly set
4241                  * the status.
4242                  */
4243                 *status = skb->data[0];
4244         }
4245
4246         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4247
4248         hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4249                              req_complete_skb);
4250
4251         if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4252                 bt_dev_err(hdev,
4253                            "unexpected event for opcode 0x%4.4x", *opcode);
4254                 return;
4255         }
4256
4257         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4258                 queue_work(hdev->workqueue, &hdev->cmd_work);
4259 }
4260
4261 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4262 {
4263         struct hci_cp_le_create_cis *cp;
4264         int i;
4265
4266         bt_dev_dbg(hdev, "status 0x%2.2x", status);
4267
4268         if (!status)
4269                 return;
4270
4271         cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4272         if (!cp)
4273                 return;
4274
4275         hci_dev_lock(hdev);
4276
4277         /* Remove connection if command failed */
4278         for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4279                 struct hci_conn *conn;
4280                 u16 handle;
4281
4282                 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4283
4284                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4285                 if (conn) {
4286                         conn->state = BT_CLOSED;
4287                         hci_connect_cfm(conn, status);
4288                         hci_conn_del(conn);
4289                 }
4290         }
4291
4292         hci_dev_unlock(hdev);
4293 }
4294
4295 #define HCI_CS(_op, _func) \
4296 { \
4297         .op = _op, \
4298         .func = _func, \
4299 }
4300
4301 static const struct hci_cs {
4302         u16  op;
4303         void (*func)(struct hci_dev *hdev, __u8 status);
4304 } hci_cs_table[] = {
4305         HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4306         HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4307         HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4308         HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4309         HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4310         HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4311         HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4312         HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4313         HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4314                hci_cs_read_remote_ext_features),
4315         HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4316         HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4317                hci_cs_enhanced_setup_sync_conn),
4318         HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4319         HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4320         HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4321         HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4322         HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4323         HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4324         HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4325         HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4326         HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4327 };
4328
4329 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4330                                struct sk_buff *skb, u16 *opcode, u8 *status,
4331                                hci_req_complete_t *req_complete,
4332                                hci_req_complete_skb_t *req_complete_skb)
4333 {
4334         struct hci_ev_cmd_status *ev = data;
4335         int i;
4336
4337         *opcode = __le16_to_cpu(ev->opcode);
4338         *status = ev->status;
4339
4340         bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4341
4342         for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4343                 if (hci_cs_table[i].op == *opcode) {
4344                         hci_cs_table[i].func(hdev, ev->status);
4345                         break;
4346                 }
4347         }
4348
4349         handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4350
4351         /* Indicate request completion if the command failed. Also, if
4352          * we're not waiting for a special event and we get a success
4353          * command status we should try to flag the request as completed
4354          * (since for this kind of commands there will not be a command
4355          * complete event).
4356          */
4357         if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4358                 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4359                                      req_complete_skb);
4360                 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4361                         bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4362                                    *opcode);
4363                         return;
4364                 }
4365         }
4366
4367         if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4368                 queue_work(hdev->workqueue, &hdev->cmd_work);
4369 }
4370
4371 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4372                                    struct sk_buff *skb)
4373 {
4374         struct hci_ev_hardware_error *ev = data;
4375
4376         bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4377
4378         hdev->hw_error_code = ev->code;
4379
4380         queue_work(hdev->req_workqueue, &hdev->error_reset);
4381 }
4382
4383 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4384                                 struct sk_buff *skb)
4385 {
4386         struct hci_ev_role_change *ev = data;
4387         struct hci_conn *conn;
4388
4389         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4390
4391         hci_dev_lock(hdev);
4392
4393         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4394         if (conn) {
4395                 if (!ev->status)
4396                         conn->role = ev->role;
4397
4398                 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4399
4400                 hci_role_switch_cfm(conn, ev->status, ev->role);
4401         }
4402
4403         hci_dev_unlock(hdev);
4404 }
4405
4406 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4407                                   struct sk_buff *skb)
4408 {
4409         struct hci_ev_num_comp_pkts *ev = data;
4410         int i;
4411
4412         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4413                              flex_array_size(ev, handles, ev->num)))
4414                 return;
4415
4416         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4417                 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4418                 return;
4419         }
4420
4421         bt_dev_dbg(hdev, "num %d", ev->num);
4422
4423         for (i = 0; i < ev->num; i++) {
4424                 struct hci_comp_pkts_info *info = &ev->handles[i];
4425                 struct hci_conn *conn;
4426                 __u16  handle, count;
4427
4428                 handle = __le16_to_cpu(info->handle);
4429                 count  = __le16_to_cpu(info->count);
4430
4431                 conn = hci_conn_hash_lookup_handle(hdev, handle);
4432                 if (!conn)
4433                         continue;
4434
4435                 conn->sent -= count;
4436
4437                 switch (conn->type) {
4438                 case ACL_LINK:
4439                         hdev->acl_cnt += count;
4440                         if (hdev->acl_cnt > hdev->acl_pkts)
4441                                 hdev->acl_cnt = hdev->acl_pkts;
4442                         break;
4443
4444                 case LE_LINK:
4445                         if (hdev->le_pkts) {
4446                                 hdev->le_cnt += count;
4447                                 if (hdev->le_cnt > hdev->le_pkts)
4448                                         hdev->le_cnt = hdev->le_pkts;
4449                         } else {
4450                                 hdev->acl_cnt += count;
4451                                 if (hdev->acl_cnt > hdev->acl_pkts)
4452                                         hdev->acl_cnt = hdev->acl_pkts;
4453                         }
4454                         break;
4455
4456                 case SCO_LINK:
4457                         hdev->sco_cnt += count;
4458                         if (hdev->sco_cnt > hdev->sco_pkts)
4459                                 hdev->sco_cnt = hdev->sco_pkts;
4460                         break;
4461
4462                 case ISO_LINK:
4463                         if (hdev->iso_pkts) {
4464                                 hdev->iso_cnt += count;
4465                                 if (hdev->iso_cnt > hdev->iso_pkts)
4466                                         hdev->iso_cnt = hdev->iso_pkts;
4467                         } else if (hdev->le_pkts) {
4468                                 hdev->le_cnt += count;
4469                                 if (hdev->le_cnt > hdev->le_pkts)
4470                                         hdev->le_cnt = hdev->le_pkts;
4471                         } else {
4472                                 hdev->acl_cnt += count;
4473                                 if (hdev->acl_cnt > hdev->acl_pkts)
4474                                         hdev->acl_cnt = hdev->acl_pkts;
4475                         }
4476                         break;
4477
4478                 default:
4479                         bt_dev_err(hdev, "unknown type %d conn %p",
4480                                    conn->type, conn);
4481                         break;
4482                 }
4483         }
4484
4485         queue_work(hdev->workqueue, &hdev->tx_work);
4486 }
4487
4488 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4489                                                  __u16 handle)
4490 {
4491         struct hci_chan *chan;
4492
4493         switch (hdev->dev_type) {
4494         case HCI_PRIMARY:
4495                 return hci_conn_hash_lookup_handle(hdev, handle);
4496         case HCI_AMP:
4497                 chan = hci_chan_lookup_handle(hdev, handle);
4498                 if (chan)
4499                         return chan->conn;
4500                 break;
4501         default:
4502                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4503                 break;
4504         }
4505
4506         return NULL;
4507 }
4508
4509 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4510                                     struct sk_buff *skb)
4511 {
4512         struct hci_ev_num_comp_blocks *ev = data;
4513         int i;
4514
4515         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4516                              flex_array_size(ev, handles, ev->num_hndl)))
4517                 return;
4518
4519         if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4520                 bt_dev_err(hdev, "wrong event for mode %d",
4521                            hdev->flow_ctl_mode);
4522                 return;
4523         }
4524
4525         bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4526                    ev->num_hndl);
4527
4528         for (i = 0; i < ev->num_hndl; i++) {
4529                 struct hci_comp_blocks_info *info = &ev->handles[i];
4530                 struct hci_conn *conn = NULL;
4531                 __u16  handle, block_count;
4532
4533                 handle = __le16_to_cpu(info->handle);
4534                 block_count = __le16_to_cpu(info->blocks);
4535
4536                 conn = __hci_conn_lookup_handle(hdev, handle);
4537                 if (!conn)
4538                         continue;
4539
4540                 conn->sent -= block_count;
4541
4542                 switch (conn->type) {
4543                 case ACL_LINK:
4544                 case AMP_LINK:
4545                         hdev->block_cnt += block_count;
4546                         if (hdev->block_cnt > hdev->num_blocks)
4547                                 hdev->block_cnt = hdev->num_blocks;
4548                         break;
4549
4550                 default:
4551                         bt_dev_err(hdev, "unknown type %d conn %p",
4552                                    conn->type, conn);
4553                         break;
4554                 }
4555         }
4556
4557         queue_work(hdev->workqueue, &hdev->tx_work);
4558 }
4559
4560 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4561                                 struct sk_buff *skb)
4562 {
4563         struct hci_ev_mode_change *ev = data;
4564         struct hci_conn *conn;
4565
4566         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4567
4568         hci_dev_lock(hdev);
4569
4570         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4571         if (conn) {
4572                 conn->mode = ev->mode;
4573
4574                 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4575                                         &conn->flags)) {
4576                         if (conn->mode == HCI_CM_ACTIVE)
4577                                 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4578                         else
4579                                 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4580                 }
4581
4582                 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4583                         hci_sco_setup(conn, ev->status);
4584         }
4585
4586         hci_dev_unlock(hdev);
4587 }
4588
4589 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4590                                      struct sk_buff *skb)
4591 {
4592         struct hci_ev_pin_code_req *ev = data;
4593         struct hci_conn *conn;
4594
4595         bt_dev_dbg(hdev, "");
4596
4597         hci_dev_lock(hdev);
4598
4599         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4600         if (!conn)
4601                 goto unlock;
4602
4603         if (conn->state == BT_CONNECTED) {
4604                 hci_conn_hold(conn);
4605                 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4606                 hci_conn_drop(conn);
4607         }
4608
4609         if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4610             !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4611                 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4612                              sizeof(ev->bdaddr), &ev->bdaddr);
4613         } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4614                 u8 secure;
4615
4616                 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4617                         secure = 1;
4618                 else
4619                         secure = 0;
4620
4621                 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4622         }
4623
4624 unlock:
4625         hci_dev_unlock(hdev);
4626 }
4627
4628 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4629 {
4630         if (key_type == HCI_LK_CHANGED_COMBINATION)
4631                 return;
4632
4633         conn->pin_length = pin_len;
4634         conn->key_type = key_type;
4635
4636         switch (key_type) {
4637         case HCI_LK_LOCAL_UNIT:
4638         case HCI_LK_REMOTE_UNIT:
4639         case HCI_LK_DEBUG_COMBINATION:
4640                 return;
4641         case HCI_LK_COMBINATION:
4642                 if (pin_len == 16)
4643                         conn->pending_sec_level = BT_SECURITY_HIGH;
4644                 else
4645                         conn->pending_sec_level = BT_SECURITY_MEDIUM;
4646                 break;
4647         case HCI_LK_UNAUTH_COMBINATION_P192:
4648         case HCI_LK_UNAUTH_COMBINATION_P256:
4649                 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4650                 break;
4651         case HCI_LK_AUTH_COMBINATION_P192:
4652                 conn->pending_sec_level = BT_SECURITY_HIGH;
4653                 break;
4654         case HCI_LK_AUTH_COMBINATION_P256:
4655                 conn->pending_sec_level = BT_SECURITY_FIPS;
4656                 break;
4657         }
4658 }
4659
4660 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4661                                      struct sk_buff *skb)
4662 {
4663         struct hci_ev_link_key_req *ev = data;
4664         struct hci_cp_link_key_reply cp;
4665         struct hci_conn *conn;
4666         struct link_key *key;
4667
4668         bt_dev_dbg(hdev, "");
4669
4670         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4671                 return;
4672
4673         hci_dev_lock(hdev);
4674
4675         key = hci_find_link_key(hdev, &ev->bdaddr);
4676         if (!key) {
4677                 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4678                 goto not_found;
4679         }
4680
4681         bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4682
4683         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4684         if (conn) {
4685                 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4686
4687                 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4688                      key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4689                     conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4690                         bt_dev_dbg(hdev, "ignoring unauthenticated key");
4691                         goto not_found;
4692                 }
4693
4694                 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4695                     (conn->pending_sec_level == BT_SECURITY_HIGH ||
4696                      conn->pending_sec_level == BT_SECURITY_FIPS)) {
4697                         bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4698                         goto not_found;
4699                 }
4700
4701                 conn_set_key(conn, key->type, key->pin_len);
4702         }
4703
4704         bacpy(&cp.bdaddr, &ev->bdaddr);
4705         memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4706
4707         hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4708
4709         hci_dev_unlock(hdev);
4710
4711         return;
4712
4713 not_found:
4714         hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4715         hci_dev_unlock(hdev);
4716 }
4717
4718 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4719                                     struct sk_buff *skb)
4720 {
4721         struct hci_ev_link_key_notify *ev = data;
4722         struct hci_conn *conn;
4723         struct link_key *key;
4724         bool persistent;
4725         u8 pin_len = 0;
4726
4727         bt_dev_dbg(hdev, "");
4728
4729         hci_dev_lock(hdev);
4730
4731         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4732         if (!conn)
4733                 goto unlock;
4734
4735         /* Ignore NULL link key against CVE-2020-26555 */
4736         if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4737                 bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4738                            &ev->bdaddr);
4739                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4740                 hci_conn_drop(conn);
4741                 goto unlock;
4742         }
4743
4744         hci_conn_hold(conn);
4745         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4746         hci_conn_drop(conn);
4747
4748         set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4749         conn_set_key(conn, ev->key_type, conn->pin_length);
4750
4751         if (!hci_dev_test_flag(hdev, HCI_MGMT))
4752                 goto unlock;
4753
4754         key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4755                                 ev->key_type, pin_len, &persistent);
4756         if (!key)
4757                 goto unlock;
4758
4759         /* Update connection information since adding the key will have
4760          * fixed up the type in the case of changed combination keys.
4761          */
4762         if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4763                 conn_set_key(conn, key->type, key->pin_len);
4764
4765         mgmt_new_link_key(hdev, key, persistent);
4766
4767         /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4768          * is set. If it's not set simply remove the key from the kernel
4769          * list (we've still notified user space about it but with
4770          * store_hint being 0).
4771          */
4772         if (key->type == HCI_LK_DEBUG_COMBINATION &&
4773             !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4774                 list_del_rcu(&key->list);
4775                 kfree_rcu(key, rcu);
4776                 goto unlock;
4777         }
4778
4779         if (persistent)
4780                 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4781         else
4782                 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4783
4784 unlock:
4785         hci_dev_unlock(hdev);
4786 }
4787
4788 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4789                                  struct sk_buff *skb)
4790 {
4791         struct hci_ev_clock_offset *ev = data;
4792         struct hci_conn *conn;
4793
4794         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4795
4796         hci_dev_lock(hdev);
4797
4798         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4799         if (conn && !ev->status) {
4800                 struct inquiry_entry *ie;
4801
4802                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4803                 if (ie) {
4804                         ie->data.clock_offset = ev->clock_offset;
4805                         ie->timestamp = jiffies;
4806                 }
4807         }
4808
4809         hci_dev_unlock(hdev);
4810 }
4811
4812 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4813                                     struct sk_buff *skb)
4814 {
4815         struct hci_ev_pkt_type_change *ev = data;
4816         struct hci_conn *conn;
4817
4818         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4819
4820         hci_dev_lock(hdev);
4821
4822         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4823         if (conn && !ev->status)
4824                 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4825
4826         hci_dev_unlock(hdev);
4827 }
4828
4829 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4830                                    struct sk_buff *skb)
4831 {
4832         struct hci_ev_pscan_rep_mode *ev = data;
4833         struct inquiry_entry *ie;
4834
4835         bt_dev_dbg(hdev, "");
4836
4837         hci_dev_lock(hdev);
4838
4839         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4840         if (ie) {
4841                 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4842                 ie->timestamp = jiffies;
4843         }
4844
4845         hci_dev_unlock(hdev);
4846 }
4847
4848 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4849                                              struct sk_buff *skb)
4850 {
4851         struct hci_ev_inquiry_result_rssi *ev = edata;
4852         struct inquiry_data data;
4853         int i;
4854
4855         bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4856
4857         if (!ev->num)
4858                 return;
4859
4860         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4861                 return;
4862
4863         hci_dev_lock(hdev);
4864
4865         if (skb->len == array_size(ev->num,
4866                                    sizeof(struct inquiry_info_rssi_pscan))) {
4867                 struct inquiry_info_rssi_pscan *info;
4868
4869                 for (i = 0; i < ev->num; i++) {
4870                         u32 flags;
4871
4872                         info = hci_ev_skb_pull(hdev, skb,
4873                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4874                                                sizeof(*info));
4875                         if (!info) {
4876                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4877                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4878                                 goto unlock;
4879                         }
4880
4881                         bacpy(&data.bdaddr, &info->bdaddr);
4882                         data.pscan_rep_mode     = info->pscan_rep_mode;
4883                         data.pscan_period_mode  = info->pscan_period_mode;
4884                         data.pscan_mode         = info->pscan_mode;
4885                         memcpy(data.dev_class, info->dev_class, 3);
4886                         data.clock_offset       = info->clock_offset;
4887                         data.rssi               = info->rssi;
4888                         data.ssp_mode           = 0x00;
4889
4890                         flags = hci_inquiry_cache_update(hdev, &data, false);
4891
4892                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4893                                           info->dev_class, info->rssi,
4894                                           flags, NULL, 0, NULL, 0, 0);
4895                 }
4896         } else if (skb->len == array_size(ev->num,
4897                                           sizeof(struct inquiry_info_rssi))) {
4898                 struct inquiry_info_rssi *info;
4899
4900                 for (i = 0; i < ev->num; i++) {
4901                         u32 flags;
4902
4903                         info = hci_ev_skb_pull(hdev, skb,
4904                                                HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4905                                                sizeof(*info));
4906                         if (!info) {
4907                                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4908                                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4909                                 goto unlock;
4910                         }
4911
4912                         bacpy(&data.bdaddr, &info->bdaddr);
4913                         data.pscan_rep_mode     = info->pscan_rep_mode;
4914                         data.pscan_period_mode  = info->pscan_period_mode;
4915                         data.pscan_mode         = 0x00;
4916                         memcpy(data.dev_class, info->dev_class, 3);
4917                         data.clock_offset       = info->clock_offset;
4918                         data.rssi               = info->rssi;
4919                         data.ssp_mode           = 0x00;
4920
4921                         flags = hci_inquiry_cache_update(hdev, &data, false);
4922
4923                         mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4924                                           info->dev_class, info->rssi,
4925                                           flags, NULL, 0, NULL, 0, 0);
4926                 }
4927         } else {
4928                 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4929                            HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4930         }
4931 unlock:
4932         hci_dev_unlock(hdev);
4933 }
4934
4935 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4936                                         struct sk_buff *skb)
4937 {
4938         struct hci_ev_remote_ext_features *ev = data;
4939         struct hci_conn *conn;
4940
4941         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4942
4943         hci_dev_lock(hdev);
4944
4945         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4946         if (!conn)
4947                 goto unlock;
4948
4949         if (ev->page < HCI_MAX_PAGES)
4950                 memcpy(conn->features[ev->page], ev->features, 8);
4951
4952         if (!ev->status && ev->page == 0x01) {
4953                 struct inquiry_entry *ie;
4954
4955                 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4956                 if (ie)
4957                         ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4958
4959                 if (ev->features[0] & LMP_HOST_SSP) {
4960                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4961                 } else {
4962                         /* It is mandatory by the Bluetooth specification that
4963                          * Extended Inquiry Results are only used when Secure
4964                          * Simple Pairing is enabled, but some devices violate
4965                          * this.
4966                          *
4967                          * To make these devices work, the internal SSP
4968                          * enabled flag needs to be cleared if the remote host
4969                          * features do not indicate SSP support */
4970                         clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4971                 }
4972
4973                 if (ev->features[0] & LMP_HOST_SC)
4974                         set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4975         }
4976
4977         if (conn->state != BT_CONFIG)
4978                 goto unlock;
4979
4980         if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4981                 struct hci_cp_remote_name_req cp;
4982                 memset(&cp, 0, sizeof(cp));
4983                 bacpy(&cp.bdaddr, &conn->dst);
4984                 cp.pscan_rep_mode = 0x02;
4985                 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4986         } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4987                 mgmt_device_connected(hdev, conn, NULL, 0);
4988
4989         if (!hci_outgoing_auth_needed(hdev, conn)) {
4990                 conn->state = BT_CONNECTED;
4991                 hci_connect_cfm(conn, ev->status);
4992                 hci_conn_drop(conn);
4993         }
4994
4995 unlock:
4996         hci_dev_unlock(hdev);
4997 }
4998
4999 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5000                                        struct sk_buff *skb)
5001 {
5002         struct hci_ev_sync_conn_complete *ev = data;
5003         struct hci_conn *conn;
5004         u8 status = ev->status;
5005
5006         switch (ev->link_type) {
5007         case SCO_LINK:
5008         case ESCO_LINK:
5009                 break;
5010         default:
5011                 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5012                  * for HCI_Synchronous_Connection_Complete is limited to
5013                  * either SCO or eSCO
5014                  */
5015                 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5016                 return;
5017         }
5018
5019         bt_dev_dbg(hdev, "status 0x%2.2x", status);
5020
5021         hci_dev_lock(hdev);
5022
5023         conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5024         if (!conn) {
5025                 if (ev->link_type == ESCO_LINK)
5026                         goto unlock;
5027
5028                 /* When the link type in the event indicates SCO connection
5029                  * and lookup of the connection object fails, then check
5030                  * if an eSCO connection object exists.
5031                  *
5032                  * The core limits the synchronous connections to either
5033                  * SCO or eSCO. The eSCO connection is preferred and tried
5034                  * to be setup first and until successfully established,
5035                  * the link type will be hinted as eSCO.
5036                  */
5037                 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5038                 if (!conn)
5039                         goto unlock;
5040         }
5041
5042         /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5043          * Processing it more than once per connection can corrupt kernel memory.
5044          *
5045          * As the connection handle is set here for the first time, it indicates
5046          * whether the connection is already set up.
5047          */
5048         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5049                 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5050                 goto unlock;
5051         }
5052
5053         switch (status) {
5054         case 0x00:
5055                 conn->handle = __le16_to_cpu(ev->handle);
5056                 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5057                         bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5058                                    conn->handle, HCI_CONN_HANDLE_MAX);
5059                         status = HCI_ERROR_INVALID_PARAMETERS;
5060                         conn->state = BT_CLOSED;
5061                         break;
5062                 }
5063
5064                 conn->state  = BT_CONNECTED;
5065                 conn->type   = ev->link_type;
5066
5067                 hci_debugfs_create_conn(conn);
5068                 hci_conn_add_sysfs(conn);
5069                 break;
5070
5071         case 0x10:      /* Connection Accept Timeout */
5072         case 0x0d:      /* Connection Rejected due to Limited Resources */
5073         case 0x11:      /* Unsupported Feature or Parameter Value */
5074         case 0x1c:      /* SCO interval rejected */
5075         case 0x1a:      /* Unsupported Remote Feature */
5076         case 0x1e:      /* Invalid LMP Parameters */
5077         case 0x1f:      /* Unspecified error */
5078         case 0x20:      /* Unsupported LMP Parameter value */
5079                 if (conn->out) {
5080                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5081                                         (hdev->esco_type & EDR_ESCO_MASK);
5082                         if (hci_setup_sync(conn, conn->link->handle))
5083                                 goto unlock;
5084                 }
5085                 fallthrough;
5086
5087         default:
5088                 conn->state = BT_CLOSED;
5089                 break;
5090         }
5091
5092         bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5093         /* Notify only in case of SCO over HCI transport data path which
5094          * is zero and non-zero value shall be non-HCI transport data path
5095          */
5096         if (conn->codec.data_path == 0 && hdev->notify) {
5097                 switch (ev->air_mode) {
5098                 case 0x02:
5099                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5100                         break;
5101                 case 0x03:
5102                         hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5103                         break;
5104                 }
5105         }
5106
5107         hci_connect_cfm(conn, status);
5108         if (status)
5109                 hci_conn_del(conn);
5110
5111 unlock:
5112         hci_dev_unlock(hdev);
5113 }
5114
5115 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5116 {
5117         size_t parsed = 0;
5118
5119         while (parsed < eir_len) {
5120                 u8 field_len = eir[0];
5121
5122                 if (field_len == 0)
5123                         return parsed;
5124
5125                 parsed += field_len + 1;
5126                 eir += field_len + 1;
5127         }
5128
5129         return eir_len;
5130 }
5131
5132 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5133                                             struct sk_buff *skb)
5134 {
5135         struct hci_ev_ext_inquiry_result *ev = edata;
5136         struct inquiry_data data;
5137         size_t eir_len;
5138         int i;
5139
5140         if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5141                              flex_array_size(ev, info, ev->num)))
5142                 return;
5143
5144         bt_dev_dbg(hdev, "num %d", ev->num);
5145
5146         if (!ev->num)
5147                 return;
5148
5149         if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5150                 return;
5151
5152         hci_dev_lock(hdev);
5153
5154         for (i = 0; i < ev->num; i++) {
5155                 struct extended_inquiry_info *info = &ev->info[i];
5156                 u32 flags;
5157                 bool name_known;
5158
5159                 bacpy(&data.bdaddr, &info->bdaddr);
5160                 data.pscan_rep_mode     = info->pscan_rep_mode;
5161                 data.pscan_period_mode  = info->pscan_period_mode;
5162                 data.pscan_mode         = 0x00;
5163                 memcpy(data.dev_class, info->dev_class, 3);
5164                 data.clock_offset       = info->clock_offset;
5165                 data.rssi               = info->rssi;
5166                 data.ssp_mode           = 0x01;
5167
5168                 if (hci_dev_test_flag(hdev, HCI_MGMT))
5169                         name_known = eir_get_data(info->data,
5170                                                   sizeof(info->data),
5171                                                   EIR_NAME_COMPLETE, NULL);
5172                 else
5173                         name_known = true;
5174
5175                 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5176
5177                 eir_len = eir_get_length(info->data, sizeof(info->data));
5178
5179                 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5180                                   info->dev_class, info->rssi,
5181                                   flags, info->data, eir_len, NULL, 0, 0);
5182         }
5183
5184         hci_dev_unlock(hdev);
5185 }
5186
5187 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5188                                          struct sk_buff *skb)
5189 {
5190         struct hci_ev_key_refresh_complete *ev = data;
5191         struct hci_conn *conn;
5192
5193         bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5194                    __le16_to_cpu(ev->handle));
5195
5196         hci_dev_lock(hdev);
5197
5198         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5199         if (!conn)
5200                 goto unlock;
5201
5202         /* For BR/EDR the necessary steps are taken through the
5203          * auth_complete event.
5204          */
5205         if (conn->type != LE_LINK)
5206                 goto unlock;
5207
5208         if (!ev->status)
5209                 conn->sec_level = conn->pending_sec_level;
5210
5211         clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5212
5213         if (ev->status && conn->state == BT_CONNECTED) {
5214                 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5215                 hci_conn_drop(conn);
5216                 goto unlock;
5217         }
5218
5219         if (conn->state == BT_CONFIG) {
5220                 if (!ev->status)
5221                         conn->state = BT_CONNECTED;
5222
5223                 hci_connect_cfm(conn, ev->status);
5224                 hci_conn_drop(conn);
5225         } else {
5226                 hci_auth_cfm(conn, ev->status);
5227
5228                 hci_conn_hold(conn);
5229                 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5230                 hci_conn_drop(conn);
5231         }
5232
5233 unlock:
5234         hci_dev_unlock(hdev);
5235 }
5236
5237 static u8 hci_get_auth_req(struct hci_conn *conn)
5238 {
5239         /* If remote requests no-bonding follow that lead */
5240         if (conn->remote_auth == HCI_AT_NO_BONDING ||
5241             conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5242                 return conn->remote_auth | (conn->auth_type & 0x01);
5243
5244         /* If both remote and local have enough IO capabilities, require
5245          * MITM protection
5246          */
5247         if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5248             conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5249                 return conn->remote_auth | 0x01;
5250
5251         /* No MITM protection possible so ignore remote requirement */
5252         return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5253 }
5254
5255 static u8 bredr_oob_data_present(struct hci_conn *conn)
5256 {
5257         struct hci_dev *hdev = conn->hdev;
5258         struct oob_data *data;
5259
5260         data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5261         if (!data)
5262                 return 0x00;
5263
5264         if (bredr_sc_enabled(hdev)) {
5265                 /* When Secure Connections is enabled, then just
5266                  * return the present value stored with the OOB
5267                  * data. The stored value contains the right present
5268                  * information. However it can only be trusted when
5269                  * not in Secure Connection Only mode.
5270                  */
5271                 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5272                         return data->present;
5273
5274                 /* When Secure Connections Only mode is enabled, then
5275                  * the P-256 values are required. If they are not
5276                  * available, then do not declare that OOB data is
5277                  * present.
5278                  */
5279                 if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5280                     !crypto_memneq(data->hash256, ZERO_KEY, 16))
5281                         return 0x00;
5282
5283                 return 0x02;
5284         }
5285
5286         /* When Secure Connections is not enabled or actually
5287          * not supported by the hardware, then check that if
5288          * P-192 data values are present.
5289          */
5290         if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5291             !crypto_memneq(data->hash192, ZERO_KEY, 16))
5292                 return 0x00;
5293
5294         return 0x01;
5295 }
5296
5297 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5298                                     struct sk_buff *skb)
5299 {
5300         struct hci_ev_io_capa_request *ev = data;
5301         struct hci_conn *conn;
5302
5303         bt_dev_dbg(hdev, "");
5304
5305         hci_dev_lock(hdev);
5306
5307         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5308         if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5309                 goto unlock;
5310
5311         /* Assume remote supports SSP since it has triggered this event */
5312         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5313
5314         hci_conn_hold(conn);
5315
5316         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5317                 goto unlock;
5318
5319         /* Allow pairing if we're pairable, the initiators of the
5320          * pairing or if the remote is not requesting bonding.
5321          */
5322         if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5323             test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5324             (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5325                 struct hci_cp_io_capability_reply cp;
5326
5327                 bacpy(&cp.bdaddr, &ev->bdaddr);
5328                 /* Change the IO capability from KeyboardDisplay
5329                  * to DisplayYesNo as it is not supported by BT spec. */
5330                 cp.capability = (conn->io_capability == 0x04) ?
5331                                 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5332
5333                 /* If we are initiators, there is no remote information yet */
5334                 if (conn->remote_auth == 0xff) {
5335                         /* Request MITM protection if our IO caps allow it
5336                          * except for the no-bonding case.
5337                          */
5338                         if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5339                             conn->auth_type != HCI_AT_NO_BONDING)
5340                                 conn->auth_type |= 0x01;
5341                 } else {
5342                         conn->auth_type = hci_get_auth_req(conn);
5343                 }
5344
5345                 /* If we're not bondable, force one of the non-bondable
5346                  * authentication requirement values.
5347                  */
5348                 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5349                         conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5350
5351                 cp.authentication = conn->auth_type;
5352                 cp.oob_data = bredr_oob_data_present(conn);
5353
5354                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5355                              sizeof(cp), &cp);
5356         } else {
5357                 struct hci_cp_io_capability_neg_reply cp;
5358
5359                 bacpy(&cp.bdaddr, &ev->bdaddr);
5360                 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5361
5362                 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5363                              sizeof(cp), &cp);
5364         }
5365
5366 unlock:
5367         hci_dev_unlock(hdev);
5368 }
5369
5370 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5371                                   struct sk_buff *skb)
5372 {
5373         struct hci_ev_io_capa_reply *ev = data;
5374         struct hci_conn *conn;
5375
5376         bt_dev_dbg(hdev, "");
5377
5378         hci_dev_lock(hdev);
5379
5380         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5381         if (!conn)
5382                 goto unlock;
5383
5384         conn->remote_cap = ev->capability;
5385         conn->remote_auth = ev->authentication;
5386
5387 unlock:
5388         hci_dev_unlock(hdev);
5389 }
5390
5391 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5392                                          struct sk_buff *skb)
5393 {
5394         struct hci_ev_user_confirm_req *ev = data;
5395         int loc_mitm, rem_mitm, confirm_hint = 0;
5396         struct hci_conn *conn;
5397
5398         bt_dev_dbg(hdev, "");
5399
5400         hci_dev_lock(hdev);
5401
5402         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5403                 goto unlock;
5404
5405         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5406         if (!conn)
5407                 goto unlock;
5408
5409         loc_mitm = (conn->auth_type & 0x01);
5410         rem_mitm = (conn->remote_auth & 0x01);
5411
5412         /* If we require MITM but the remote device can't provide that
5413          * (it has NoInputNoOutput) then reject the confirmation
5414          * request. We check the security level here since it doesn't
5415          * necessarily match conn->auth_type.
5416          */
5417         if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5418             conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5419                 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5420                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5421                              sizeof(ev->bdaddr), &ev->bdaddr);
5422                 goto unlock;
5423         }
5424
5425         /* If no side requires MITM protection; auto-accept */
5426         if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5427             (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5428
5429                 /* If we're not the initiators request authorization to
5430                  * proceed from user space (mgmt_user_confirm with
5431                  * confirm_hint set to 1). The exception is if neither
5432                  * side had MITM or if the local IO capability is
5433                  * NoInputNoOutput, in which case we do auto-accept
5434                  */
5435                 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5436                     conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5437                     (loc_mitm || rem_mitm)) {
5438                         bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5439                         confirm_hint = 1;
5440                         goto confirm;
5441                 }
5442
5443                 /* If there already exists link key in local host, leave the
5444                  * decision to user space since the remote device could be
5445                  * legitimate or malicious.
5446                  */
5447                 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5448                         bt_dev_dbg(hdev, "Local host already has link key");
5449                         confirm_hint = 1;
5450                         goto confirm;
5451                 }
5452
5453                 BT_DBG("Auto-accept of user confirmation with %ums delay",
5454                        hdev->auto_accept_delay);
5455
5456                 if (hdev->auto_accept_delay > 0) {
5457                         int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5458                         queue_delayed_work(conn->hdev->workqueue,
5459                                            &conn->auto_accept_work, delay);
5460                         goto unlock;
5461                 }
5462
5463                 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5464                              sizeof(ev->bdaddr), &ev->bdaddr);
5465                 goto unlock;
5466         }
5467
5468 confirm:
5469         mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5470                                   le32_to_cpu(ev->passkey), confirm_hint);
5471
5472 unlock:
5473         hci_dev_unlock(hdev);
5474 }
5475
5476 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5477                                          struct sk_buff *skb)
5478 {
5479         struct hci_ev_user_passkey_req *ev = data;
5480
5481         bt_dev_dbg(hdev, "");
5482
5483         if (hci_dev_test_flag(hdev, HCI_MGMT))
5484                 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5485 }
5486
5487 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5488                                         struct sk_buff *skb)
5489 {
5490         struct hci_ev_user_passkey_notify *ev = data;
5491         struct hci_conn *conn;
5492
5493         bt_dev_dbg(hdev, "");
5494
5495         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5496         if (!conn)
5497                 return;
5498
5499         conn->passkey_notify = __le32_to_cpu(ev->passkey);
5500         conn->passkey_entered = 0;
5501
5502         if (hci_dev_test_flag(hdev, HCI_MGMT))
5503                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5504                                          conn->dst_type, conn->passkey_notify,
5505                                          conn->passkey_entered);
5506 }
5507
5508 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5509                                     struct sk_buff *skb)
5510 {
5511         struct hci_ev_keypress_notify *ev = data;
5512         struct hci_conn *conn;
5513
5514         bt_dev_dbg(hdev, "");
5515
5516         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5517         if (!conn)
5518                 return;
5519
5520         switch (ev->type) {
5521         case HCI_KEYPRESS_STARTED:
5522                 conn->passkey_entered = 0;
5523                 return;
5524
5525         case HCI_KEYPRESS_ENTERED:
5526                 conn->passkey_entered++;
5527                 break;
5528
5529         case HCI_KEYPRESS_ERASED:
5530                 conn->passkey_entered--;
5531                 break;
5532
5533         case HCI_KEYPRESS_CLEARED:
5534                 conn->passkey_entered = 0;
5535                 break;
5536
5537         case HCI_KEYPRESS_COMPLETED:
5538                 return;
5539         }
5540
5541         if (hci_dev_test_flag(hdev, HCI_MGMT))
5542                 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5543                                          conn->dst_type, conn->passkey_notify,
5544                                          conn->passkey_entered);
5545 }
5546
5547 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5548                                          struct sk_buff *skb)
5549 {
5550         struct hci_ev_simple_pair_complete *ev = data;
5551         struct hci_conn *conn;
5552
5553         bt_dev_dbg(hdev, "");
5554
5555         hci_dev_lock(hdev);
5556
5557         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5558         if (!conn || !hci_conn_ssp_enabled(conn))
5559                 goto unlock;
5560
5561         /* Reset the authentication requirement to unknown */
5562         conn->remote_auth = 0xff;
5563
5564         /* To avoid duplicate auth_failed events to user space we check
5565          * the HCI_CONN_AUTH_PEND flag which will be set if we
5566          * initiated the authentication. A traditional auth_complete
5567          * event gets always produced as initiator and is also mapped to
5568          * the mgmt_auth_failed event */
5569         if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5570                 mgmt_auth_failed(conn, ev->status);
5571
5572         hci_conn_drop(conn);
5573
5574 unlock:
5575         hci_dev_unlock(hdev);
5576 }
5577
5578 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5579                                          struct sk_buff *skb)
5580 {
5581         struct hci_ev_remote_host_features *ev = data;
5582         struct inquiry_entry *ie;
5583         struct hci_conn *conn;
5584
5585         bt_dev_dbg(hdev, "");
5586
5587         hci_dev_lock(hdev);
5588
5589         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5590         if (conn)
5591                 memcpy(conn->features[1], ev->features, 8);
5592
5593         ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5594         if (ie)
5595                 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5596
5597         hci_dev_unlock(hdev);
5598 }
5599
5600 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5601                                             struct sk_buff *skb)
5602 {
5603         struct hci_ev_remote_oob_data_request *ev = edata;
5604         struct oob_data *data;
5605
5606         bt_dev_dbg(hdev, "");
5607
5608         hci_dev_lock(hdev);
5609
5610         if (!hci_dev_test_flag(hdev, HCI_MGMT))
5611                 goto unlock;
5612
5613         data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5614         if (!data) {
5615                 struct hci_cp_remote_oob_data_neg_reply cp;
5616
5617                 bacpy(&cp.bdaddr, &ev->bdaddr);
5618                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5619                              sizeof(cp), &cp);
5620                 goto unlock;
5621         }
5622
5623         if (bredr_sc_enabled(hdev)) {
5624                 struct hci_cp_remote_oob_ext_data_reply cp;
5625
5626                 bacpy(&cp.bdaddr, &ev->bdaddr);
5627                 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5628                         memset(cp.hash192, 0, sizeof(cp.hash192));
5629                         memset(cp.rand192, 0, sizeof(cp.rand192));
5630                 } else {
5631                         memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5632                         memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5633                 }
5634                 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5635                 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5636
5637                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5638                              sizeof(cp), &cp);
5639         } else {
5640                 struct hci_cp_remote_oob_data_reply cp;
5641
5642                 bacpy(&cp.bdaddr, &ev->bdaddr);
5643                 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5644                 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5645
5646                 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5647                              sizeof(cp), &cp);
5648         }
5649
5650 unlock:
5651         hci_dev_unlock(hdev);
5652 }
5653
5654 #if IS_ENABLED(CONFIG_BT_HS)
5655 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5656                                   struct sk_buff *skb)
5657 {
5658         struct hci_ev_channel_selected *ev = data;
5659         struct hci_conn *hcon;
5660
5661         bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5662
5663         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5664         if (!hcon)
5665                 return;
5666
5667         amp_read_loc_assoc_final_data(hdev, hcon);
5668 }
5669
5670 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5671                                       struct sk_buff *skb)
5672 {
5673         struct hci_ev_phy_link_complete *ev = data;
5674         struct hci_conn *hcon, *bredr_hcon;
5675
5676         bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5677                    ev->status);
5678
5679         hci_dev_lock(hdev);
5680
5681         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5682         if (!hcon)
5683                 goto unlock;
5684
5685         if (!hcon->amp_mgr)
5686                 goto unlock;
5687
5688         if (ev->status) {
5689                 hci_conn_del(hcon);
5690                 goto unlock;
5691         }
5692
5693         bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5694
5695         hcon->state = BT_CONNECTED;
5696         bacpy(&hcon->dst, &bredr_hcon->dst);
5697
5698         hci_conn_hold(hcon);
5699         hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5700         hci_conn_drop(hcon);
5701
5702         hci_debugfs_create_conn(hcon);
5703         hci_conn_add_sysfs(hcon);
5704
5705         amp_physical_cfm(bredr_hcon, hcon);
5706
5707 unlock:
5708         hci_dev_unlock(hdev);
5709 }
5710
5711 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5712                                      struct sk_buff *skb)
5713 {
5714         struct hci_ev_logical_link_complete *ev = data;
5715         struct hci_conn *hcon;
5716         struct hci_chan *hchan;
5717         struct amp_mgr *mgr;
5718
5719         bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5720                    le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5721
5722         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5723         if (!hcon)
5724                 return;
5725
5726         /* Create AMP hchan */
5727         hchan = hci_chan_create(hcon);
5728         if (!hchan)
5729                 return;
5730
5731         hchan->handle = le16_to_cpu(ev->handle);
5732         hchan->amp = true;
5733
5734         BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5735
5736         mgr = hcon->amp_mgr;
5737         if (mgr && mgr->bredr_chan) {
5738                 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5739
5740                 l2cap_chan_lock(bredr_chan);
5741
5742                 bredr_chan->conn->mtu = hdev->block_mtu;
5743                 l2cap_logical_cfm(bredr_chan, hchan, 0);
5744                 hci_conn_hold(hcon);
5745
5746                 l2cap_chan_unlock(bredr_chan);
5747         }
5748 }
5749
5750 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5751                                              struct sk_buff *skb)
5752 {
5753         struct hci_ev_disconn_logical_link_complete *ev = data;
5754         struct hci_chan *hchan;
5755
5756         bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5757                    le16_to_cpu(ev->handle), ev->status);
5758
5759         if (ev->status)
5760                 return;
5761
5762         hci_dev_lock(hdev);
5763
5764         hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5765         if (!hchan || !hchan->amp)
5766                 goto unlock;
5767
5768         amp_destroy_logical_link(hchan, ev->reason);
5769
5770 unlock:
5771         hci_dev_unlock(hdev);
5772 }
5773
5774 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5775                                              struct sk_buff *skb)
5776 {
5777         struct hci_ev_disconn_phy_link_complete *ev = data;
5778         struct hci_conn *hcon;
5779
5780         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5781
5782         if (ev->status)
5783                 return;
5784
5785         hci_dev_lock(hdev);
5786
5787         hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5788         if (hcon && hcon->type == AMP_LINK) {
5789                 hcon->state = BT_CLOSED;
5790                 hci_disconn_cfm(hcon, ev->reason);
5791                 hci_conn_del(hcon);
5792         }
5793
5794         hci_dev_unlock(hdev);
5795 }
5796 #endif
5797
5798 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5799                                 u8 bdaddr_type, bdaddr_t *local_rpa)
5800 {
5801         if (conn->out) {
5802                 conn->dst_type = bdaddr_type;
5803                 conn->resp_addr_type = bdaddr_type;
5804                 bacpy(&conn->resp_addr, bdaddr);
5805
5806                 /* Check if the controller has set a Local RPA then it must be
5807                  * used instead or hdev->rpa.
5808                  */
5809                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5810                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5811                         bacpy(&conn->init_addr, local_rpa);
5812                 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5813                         conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5814                         bacpy(&conn->init_addr, &conn->hdev->rpa);
5815                 } else {
5816                         hci_copy_identity_address(conn->hdev, &conn->init_addr,
5817                                                   &conn->init_addr_type);
5818                 }
5819         } else {
5820                 conn->resp_addr_type = conn->hdev->adv_addr_type;
5821                 /* Check if the controller has set a Local RPA then it must be
5822                  * used instead or hdev->rpa.
5823                  */
5824                 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5825                         conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5826                         bacpy(&conn->resp_addr, local_rpa);
5827                 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5828                         /* In case of ext adv, resp_addr will be updated in
5829                          * Adv Terminated event.
5830                          */
5831                         if (!ext_adv_capable(conn->hdev))
5832                                 bacpy(&conn->resp_addr,
5833                                       &conn->hdev->random_addr);
5834                 } else {
5835                         bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5836                 }
5837
5838                 conn->init_addr_type = bdaddr_type;
5839                 bacpy(&conn->init_addr, bdaddr);
5840
5841                 /* For incoming connections, set the default minimum
5842                  * and maximum connection interval. They will be used
5843                  * to check if the parameters are in range and if not
5844                  * trigger the connection update procedure.
5845                  */
5846                 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5847                 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5848         }
5849 }
5850
5851 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5852                                  bdaddr_t *bdaddr, u8 bdaddr_type,
5853                                  bdaddr_t *local_rpa, u8 role, u16 handle,
5854                                  u16 interval, u16 latency,
5855                                  u16 supervision_timeout)
5856 {
5857         struct hci_conn_params *params;
5858         struct hci_conn *conn;
5859         struct smp_irk *irk;
5860         u8 addr_type;
5861
5862         hci_dev_lock(hdev);
5863
5864         /* All controllers implicitly stop advertising in the event of a
5865          * connection, so ensure that the state bit is cleared.
5866          */
5867         hci_dev_clear_flag(hdev, HCI_LE_ADV);
5868
5869         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5870         if (!conn) {
5871                 /* In case of error status and there is no connection pending
5872                  * just unlock as there is nothing to cleanup.
5873                  */
5874                 if (status)
5875                         goto unlock;
5876
5877                 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5878                 if (!conn) {
5879                         bt_dev_err(hdev, "no memory for new connection");
5880                         goto unlock;
5881                 }
5882
5883                 conn->dst_type = bdaddr_type;
5884
5885                 /* If we didn't have a hci_conn object previously
5886                  * but we're in central role this must be something
5887                  * initiated using an accept list. Since accept list based
5888                  * connections are not "first class citizens" we don't
5889                  * have full tracking of them. Therefore, we go ahead
5890                  * with a "best effort" approach of determining the
5891                  * initiator address based on the HCI_PRIVACY flag.
5892                  */
5893                 if (conn->out) {
5894                         conn->resp_addr_type = bdaddr_type;
5895                         bacpy(&conn->resp_addr, bdaddr);
5896                         if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5897                                 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5898                                 bacpy(&conn->init_addr, &hdev->rpa);
5899                         } else {
5900                                 hci_copy_identity_address(hdev,
5901                                                           &conn->init_addr,
5902                                                           &conn->init_addr_type);
5903                         }
5904                 }
5905         } else {
5906                 cancel_delayed_work(&conn->le_conn_timeout);
5907         }
5908
5909         /* The HCI_LE_Connection_Complete event is only sent once per connection.
5910          * Processing it more than once per connection can corrupt kernel memory.
5911          *
5912          * As the connection handle is set here for the first time, it indicates
5913          * whether the connection is already set up.
5914          */
5915         if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5916                 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5917                 goto unlock;
5918         }
5919
5920         le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5921
5922         /* Lookup the identity address from the stored connection
5923          * address and address type.
5924          *
5925          * When establishing connections to an identity address, the
5926          * connection procedure will store the resolvable random
5927          * address first. Now if it can be converted back into the
5928          * identity address, start using the identity address from
5929          * now on.
5930          */
5931         irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5932         if (irk) {
5933                 bacpy(&conn->dst, &irk->bdaddr);
5934                 conn->dst_type = irk->addr_type;
5935         }
5936
5937         conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5938
5939         if (handle > HCI_CONN_HANDLE_MAX) {
5940                 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5941                            HCI_CONN_HANDLE_MAX);
5942                 status = HCI_ERROR_INVALID_PARAMETERS;
5943         }
5944
5945         /* All connection failure handling is taken care of by the
5946          * hci_conn_failed function which is triggered by the HCI
5947          * request completion callbacks used for connecting.
5948          */
5949         if (status)
5950                 goto unlock;
5951
5952         /* Drop the connection if it has been aborted */
5953         if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5954                 hci_conn_drop(conn);
5955                 goto unlock;
5956         }
5957
5958         if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5959                 addr_type = BDADDR_LE_PUBLIC;
5960         else
5961                 addr_type = BDADDR_LE_RANDOM;
5962
5963         /* Drop the connection if the device is blocked */
5964         if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5965                 hci_conn_drop(conn);
5966                 goto unlock;
5967         }
5968
5969         if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5970                 mgmt_device_connected(hdev, conn, NULL, 0);
5971
5972         conn->sec_level = BT_SECURITY_LOW;
5973         conn->handle = handle;
5974         conn->state = BT_CONFIG;
5975
5976         /* Store current advertising instance as connection advertising instance
5977          * when sotfware rotation is in use so it can be re-enabled when
5978          * disconnected.
5979          */
5980         if (!ext_adv_capable(hdev))
5981                 conn->adv_instance = hdev->cur_adv_instance;
5982
5983         conn->le_conn_interval = interval;
5984         conn->le_conn_latency = latency;
5985         conn->le_supv_timeout = supervision_timeout;
5986
5987         hci_debugfs_create_conn(conn);
5988         hci_conn_add_sysfs(conn);
5989
5990         /* The remote features procedure is defined for central
5991          * role only. So only in case of an initiated connection
5992          * request the remote features.
5993          *
5994          * If the local controller supports peripheral-initiated features
5995          * exchange, then requesting the remote features in peripheral
5996          * role is possible. Otherwise just transition into the
5997          * connected state without requesting the remote features.
5998          */
5999         if (conn->out ||
6000             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6001                 struct hci_cp_le_read_remote_features cp;
6002
6003                 cp.handle = __cpu_to_le16(conn->handle);
6004
6005                 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6006                              sizeof(cp), &cp);
6007
6008                 hci_conn_hold(conn);
6009         } else {
6010                 conn->state = BT_CONNECTED;
6011                 hci_connect_cfm(conn, status);
6012         }
6013
6014         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6015                                            conn->dst_type);
6016         if (params) {
6017                 hci_pend_le_list_del_init(params);
6018                 if (params->conn) {
6019                         hci_conn_drop(params->conn);
6020                         hci_conn_put(params->conn);
6021                         params->conn = NULL;
6022                 }
6023         }
6024
6025 unlock:
6026         hci_update_passive_scan(hdev);
6027         hci_dev_unlock(hdev);
6028 }
6029
6030 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6031                                      struct sk_buff *skb)
6032 {
6033         struct hci_ev_le_conn_complete *ev = data;
6034
6035         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6036
6037         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6038                              NULL, ev->role, le16_to_cpu(ev->handle),
6039                              le16_to_cpu(ev->interval),
6040                              le16_to_cpu(ev->latency),
6041                              le16_to_cpu(ev->supervision_timeout));
6042 }
6043
6044 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6045                                          struct sk_buff *skb)
6046 {
6047         struct hci_ev_le_enh_conn_complete *ev = data;
6048
6049         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6050
6051         le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6052                              &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6053                              le16_to_cpu(ev->interval),
6054                              le16_to_cpu(ev->latency),
6055                              le16_to_cpu(ev->supervision_timeout));
6056 }
6057
6058 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6059                                     struct sk_buff *skb)
6060 {
6061         struct hci_evt_le_ext_adv_set_term *ev = data;
6062         struct hci_conn *conn;
6063         struct adv_info *adv, *n;
6064
6065         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6066
6067         /* The Bluetooth Core 5.3 specification clearly states that this event
6068          * shall not be sent when the Host disables the advertising set. So in
6069          * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6070          *
6071          * When the Host disables an advertising set, all cleanup is done via
6072          * its command callback and not needed to be duplicated here.
6073          */
6074         if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6075                 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6076                 return;
6077         }
6078
6079         hci_dev_lock(hdev);
6080
6081         adv = hci_find_adv_instance(hdev, ev->handle);
6082
6083         if (ev->status) {
6084                 if (!adv)
6085                         goto unlock;
6086
6087                 /* Remove advertising as it has been terminated */
6088                 hci_remove_adv_instance(hdev, ev->handle);
6089                 mgmt_advertising_removed(NULL, hdev, ev->handle);
6090
6091                 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6092                         if (adv->enabled)
6093                                 goto unlock;
6094                 }
6095
6096                 /* We are no longer advertising, clear HCI_LE_ADV */
6097                 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6098                 goto unlock;
6099         }
6100
6101         if (adv)
6102                 adv->enabled = false;
6103
6104         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6105         if (conn) {
6106                 /* Store handle in the connection so the correct advertising
6107                  * instance can be re-enabled when disconnected.
6108                  */
6109                 conn->adv_instance = ev->handle;
6110
6111                 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6112                     bacmp(&conn->resp_addr, BDADDR_ANY))
6113                         goto unlock;
6114
6115                 if (!ev->handle) {
6116                         bacpy(&conn->resp_addr, &hdev->random_addr);
6117                         goto unlock;
6118                 }
6119
6120                 if (adv)
6121                         bacpy(&conn->resp_addr, &adv->random_addr);
6122         }
6123
6124 unlock:
6125         hci_dev_unlock(hdev);
6126 }
6127
6128 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6129                                             struct sk_buff *skb)
6130 {
6131         struct hci_ev_le_conn_update_complete *ev = data;
6132         struct hci_conn *conn;
6133
6134         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6135
6136         if (ev->status)
6137                 return;
6138
6139         hci_dev_lock(hdev);
6140
6141         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6142         if (conn) {
6143                 conn->le_conn_interval = le16_to_cpu(ev->interval);
6144                 conn->le_conn_latency = le16_to_cpu(ev->latency);
6145                 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6146         }
6147
6148         hci_dev_unlock(hdev);
6149 }
6150
6151 /* This function requires the caller holds hdev->lock */
6152 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6153                                               bdaddr_t *addr,
6154                                               u8 addr_type, bool addr_resolved,
6155                                               u8 adv_type)
6156 {
6157         struct hci_conn *conn;
6158         struct hci_conn_params *params;
6159
6160         /* If the event is not connectable don't proceed further */
6161         if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6162                 return NULL;
6163
6164         /* Ignore if the device is blocked or hdev is suspended */
6165         if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6166             hdev->suspended)
6167                 return NULL;
6168
6169         /* Most controller will fail if we try to create new connections
6170          * while we have an existing one in peripheral role.
6171          */
6172         if (hdev->conn_hash.le_num_peripheral > 0 &&
6173             (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6174              !(hdev->le_states[3] & 0x10)))
6175                 return NULL;
6176
6177         /* If we're not connectable only connect devices that we have in
6178          * our pend_le_conns list.
6179          */
6180         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6181                                            addr_type);
6182         if (!params)
6183                 return NULL;
6184
6185         if (!params->explicit_connect) {
6186                 switch (params->auto_connect) {
6187                 case HCI_AUTO_CONN_DIRECT:
6188                         /* Only devices advertising with ADV_DIRECT_IND are
6189                          * triggering a connection attempt. This is allowing
6190                          * incoming connections from peripheral devices.
6191                          */
6192                         if (adv_type != LE_ADV_DIRECT_IND)
6193                                 return NULL;
6194                         break;
6195                 case HCI_AUTO_CONN_ALWAYS:
6196                         /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6197                          * are triggering a connection attempt. This means
6198                          * that incoming connections from peripheral device are
6199                          * accepted and also outgoing connections to peripheral
6200                          * devices are established when found.
6201                          */
6202                         break;
6203                 default:
6204                         return NULL;
6205                 }
6206         }
6207
6208         conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6209                               BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6210                               HCI_ROLE_MASTER);
6211         if (!IS_ERR(conn)) {
6212                 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6213                  * by higher layer that tried to connect, if no then
6214                  * store the pointer since we don't really have any
6215                  * other owner of the object besides the params that
6216                  * triggered it. This way we can abort the connection if
6217                  * the parameters get removed and keep the reference
6218                  * count consistent once the connection is established.
6219                  */
6220
6221                 if (!params->explicit_connect)
6222                         params->conn = hci_conn_get(conn);
6223
6224                 return conn;
6225         }
6226
6227         switch (PTR_ERR(conn)) {
6228         case -EBUSY:
6229                 /* If hci_connect() returns -EBUSY it means there is already
6230                  * an LE connection attempt going on. Since controllers don't
6231                  * support more than one connection attempt at the time, we
6232                  * don't consider this an error case.
6233                  */
6234                 break;
6235         default:
6236                 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6237                 return NULL;
6238         }
6239
6240         return NULL;
6241 }
6242
6243 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6244                                u8 bdaddr_type, bdaddr_t *direct_addr,
6245                                u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6246                                bool ext_adv, bool ctl_time, u64 instant)
6247 {
6248         struct discovery_state *d = &hdev->discovery;
6249         struct smp_irk *irk;
6250         struct hci_conn *conn;
6251         bool match, bdaddr_resolved;
6252         u32 flags;
6253         u8 *ptr;
6254
6255         switch (type) {
6256         case LE_ADV_IND:
6257         case LE_ADV_DIRECT_IND:
6258         case LE_ADV_SCAN_IND:
6259         case LE_ADV_NONCONN_IND:
6260         case LE_ADV_SCAN_RSP:
6261                 break;
6262         default:
6263                 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6264                                        "type: 0x%02x", type);
6265                 return;
6266         }
6267
6268         if (len > max_adv_len(hdev)) {
6269                 bt_dev_err_ratelimited(hdev,
6270                                        "adv larger than maximum supported");
6271                 return;
6272         }
6273
6274         /* Find the end of the data in case the report contains padded zero
6275          * bytes at the end causing an invalid length value.
6276          *
6277          * When data is NULL, len is 0 so there is no need for extra ptr
6278          * check as 'ptr < data + 0' is already false in such case.
6279          */
6280         for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6281                 if (ptr + 1 + *ptr > data + len)
6282                         break;
6283         }
6284
6285         /* Adjust for actual length. This handles the case when remote
6286          * device is advertising with incorrect data length.
6287          */
6288         len = ptr - data;
6289
6290         /* If the direct address is present, then this report is from
6291          * a LE Direct Advertising Report event. In that case it is
6292          * important to see if the address is matching the local
6293          * controller address.
6294          */
6295         if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6296                 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6297                                                   &bdaddr_resolved);
6298
6299                 /* Only resolvable random addresses are valid for these
6300                  * kind of reports and others can be ignored.
6301                  */
6302                 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6303                         return;
6304
6305                 /* If the controller is not using resolvable random
6306                  * addresses, then this report can be ignored.
6307                  */
6308                 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6309                         return;
6310
6311                 /* If the local IRK of the controller does not match
6312                  * with the resolvable random address provided, then
6313                  * this report can be ignored.
6314                  */
6315                 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6316                         return;
6317         }
6318
6319         /* Check if we need to convert to identity address */
6320         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6321         if (irk) {
6322                 bdaddr = &irk->bdaddr;
6323                 bdaddr_type = irk->addr_type;
6324         }
6325
6326         bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6327
6328         /* Check if we have been requested to connect to this device.
6329          *
6330          * direct_addr is set only for directed advertising reports (it is NULL
6331          * for advertising reports) and is already verified to be RPA above.
6332          */
6333         conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6334                                      type);
6335         if (!ext_adv && conn && type == LE_ADV_IND &&
6336             len <= max_adv_len(hdev)) {
6337                 /* Store report for later inclusion by
6338                  * mgmt_device_connected
6339                  */
6340                 memcpy(conn->le_adv_data, data, len);
6341                 conn->le_adv_data_len = len;
6342         }
6343
6344         if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6345                 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6346         else
6347                 flags = 0;
6348
6349         /* All scan results should be sent up for Mesh systems */
6350         if (hci_dev_test_flag(hdev, HCI_MESH)) {
6351                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6352                                   rssi, flags, data, len, NULL, 0, instant);
6353                 return;
6354         }
6355
6356         /* Passive scanning shouldn't trigger any device found events,
6357          * except for devices marked as CONN_REPORT for which we do send
6358          * device found events, or advertisement monitoring requested.
6359          */
6360         if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6361                 if (type == LE_ADV_DIRECT_IND)
6362                         return;
6363
6364                 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6365                                                bdaddr, bdaddr_type) &&
6366                     idr_is_empty(&hdev->adv_monitors_idr))
6367                         return;
6368
6369                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6370                                   rssi, flags, data, len, NULL, 0, 0);
6371                 return;
6372         }
6373
6374         /* When receiving a scan response, then there is no way to
6375          * know if the remote device is connectable or not. However
6376          * since scan responses are merged with a previously seen
6377          * advertising report, the flags field from that report
6378          * will be used.
6379          *
6380          * In the unlikely case that a controller just sends a scan
6381          * response event that doesn't match the pending report, then
6382          * it is marked as a standalone SCAN_RSP.
6383          */
6384         if (type == LE_ADV_SCAN_RSP)
6385                 flags = MGMT_DEV_FOUND_SCAN_RSP;
6386
6387         /* If there's nothing pending either store the data from this
6388          * event or send an immediate device found event if the data
6389          * should not be stored for later.
6390          */
6391         if (!ext_adv && !has_pending_adv_report(hdev)) {
6392                 /* If the report will trigger a SCAN_REQ store it for
6393                  * later merging.
6394                  */
6395                 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6396                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6397                                                  rssi, flags, data, len);
6398                         return;
6399                 }
6400
6401                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6402                                   rssi, flags, data, len, NULL, 0, 0);
6403                 return;
6404         }
6405
6406         /* Check if the pending report is for the same device as the new one */
6407         match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6408                  bdaddr_type == d->last_adv_addr_type);
6409
6410         /* If the pending data doesn't match this report or this isn't a
6411          * scan response (e.g. we got a duplicate ADV_IND) then force
6412          * sending of the pending data.
6413          */
6414         if (type != LE_ADV_SCAN_RSP || !match) {
6415                 /* Send out whatever is in the cache, but skip duplicates */
6416                 if (!match)
6417                         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6418                                           d->last_adv_addr_type, NULL,
6419                                           d->last_adv_rssi, d->last_adv_flags,
6420                                           d->last_adv_data,
6421                                           d->last_adv_data_len, NULL, 0, 0);
6422
6423                 /* If the new report will trigger a SCAN_REQ store it for
6424                  * later merging.
6425                  */
6426                 if (!ext_adv && (type == LE_ADV_IND ||
6427                                  type == LE_ADV_SCAN_IND)) {
6428                         store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6429                                                  rssi, flags, data, len);
6430                         return;
6431                 }
6432
6433                 /* The advertising reports cannot be merged, so clear
6434                  * the pending report and send out a device found event.
6435                  */
6436                 clear_pending_adv_report(hdev);
6437                 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6438                                   rssi, flags, data, len, NULL, 0, 0);
6439                 return;
6440         }
6441
6442         /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6443          * the new event is a SCAN_RSP. We can therefore proceed with
6444          * sending a merged device found event.
6445          */
6446         mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6447                           d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6448                           d->last_adv_data, d->last_adv_data_len, data, len, 0);
6449         clear_pending_adv_report(hdev);
6450 }
6451
6452 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6453                                   struct sk_buff *skb)
6454 {
6455         struct hci_ev_le_advertising_report *ev = data;
6456         u64 instant = jiffies;
6457
6458         if (!ev->num)
6459                 return;
6460
6461         hci_dev_lock(hdev);
6462
6463         while (ev->num--) {
6464                 struct hci_ev_le_advertising_info *info;
6465                 s8 rssi;
6466
6467                 info = hci_le_ev_skb_pull(hdev, skb,
6468                                           HCI_EV_LE_ADVERTISING_REPORT,
6469                                           sizeof(*info));
6470                 if (!info)
6471                         break;
6472
6473                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6474                                         info->length + 1))
6475                         break;
6476
6477                 if (info->length <= max_adv_len(hdev)) {
6478                         rssi = info->data[info->length];
6479                         process_adv_report(hdev, info->type, &info->bdaddr,
6480                                            info->bdaddr_type, NULL, 0, rssi,
6481                                            info->data, info->length, false,
6482                                            false, instant);
6483                 } else {
6484                         bt_dev_err(hdev, "Dropping invalid advertising data");
6485                 }
6486         }
6487
6488         hci_dev_unlock(hdev);
6489 }
6490
6491 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6492 {
6493         if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6494                 switch (evt_type) {
6495                 case LE_LEGACY_ADV_IND:
6496                         return LE_ADV_IND;
6497                 case LE_LEGACY_ADV_DIRECT_IND:
6498                         return LE_ADV_DIRECT_IND;
6499                 case LE_LEGACY_ADV_SCAN_IND:
6500                         return LE_ADV_SCAN_IND;
6501                 case LE_LEGACY_NONCONN_IND:
6502                         return LE_ADV_NONCONN_IND;
6503                 case LE_LEGACY_SCAN_RSP_ADV:
6504                 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6505                         return LE_ADV_SCAN_RSP;
6506                 }
6507
6508                 goto invalid;
6509         }
6510
6511         if (evt_type & LE_EXT_ADV_CONN_IND) {
6512                 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6513                         return LE_ADV_DIRECT_IND;
6514
6515                 return LE_ADV_IND;
6516         }
6517
6518         if (evt_type & LE_EXT_ADV_SCAN_RSP)
6519                 return LE_ADV_SCAN_RSP;
6520
6521         if (evt_type & LE_EXT_ADV_SCAN_IND)
6522                 return LE_ADV_SCAN_IND;
6523
6524         if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6525             evt_type & LE_EXT_ADV_DIRECT_IND)
6526                 return LE_ADV_NONCONN_IND;
6527
6528 invalid:
6529         bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6530                                evt_type);
6531
6532         return LE_ADV_INVALID;
6533 }
6534
6535 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6536                                       struct sk_buff *skb)
6537 {
6538         struct hci_ev_le_ext_adv_report *ev = data;
6539         u64 instant = jiffies;
6540
6541         if (!ev->num)
6542                 return;
6543
6544         hci_dev_lock(hdev);
6545
6546         while (ev->num--) {
6547                 struct hci_ev_le_ext_adv_info *info;
6548                 u8 legacy_evt_type;
6549                 u16 evt_type;
6550
6551                 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6552                                           sizeof(*info));
6553                 if (!info)
6554                         break;
6555
6556                 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6557                                         info->length))
6558                         break;
6559
6560                 evt_type = __le16_to_cpu(info->type);
6561                 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6562                 if (legacy_evt_type != LE_ADV_INVALID) {
6563                         process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6564                                            info->bdaddr_type, NULL, 0,
6565                                            info->rssi, info->data, info->length,
6566                                            !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6567                                            false, instant);
6568                 }
6569         }
6570
6571         hci_dev_unlock(hdev);
6572 }
6573
6574 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6575 {
6576         struct hci_cp_le_pa_term_sync cp;
6577
6578         memset(&cp, 0, sizeof(cp));
6579         cp.handle = handle;
6580
6581         return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6582 }
6583
6584 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6585                                             struct sk_buff *skb)
6586 {
6587         struct hci_ev_le_pa_sync_established *ev = data;
6588         int mask = hdev->link_mode;
6589         __u8 flags = 0;
6590
6591         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6592
6593         if (ev->status)
6594                 return;
6595
6596         hci_dev_lock(hdev);
6597
6598         hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6599
6600         mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6601         if (!(mask & HCI_LM_ACCEPT))
6602                 hci_le_pa_term_sync(hdev, ev->handle);
6603
6604         hci_dev_unlock(hdev);
6605 }
6606
6607 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6608                                             struct sk_buff *skb)
6609 {
6610         struct hci_ev_le_remote_feat_complete *ev = data;
6611         struct hci_conn *conn;
6612
6613         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6614
6615         hci_dev_lock(hdev);
6616
6617         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6618         if (conn) {
6619                 if (!ev->status)
6620                         memcpy(conn->features[0], ev->features, 8);
6621
6622                 if (conn->state == BT_CONFIG) {
6623                         __u8 status;
6624
6625                         /* If the local controller supports peripheral-initiated
6626                          * features exchange, but the remote controller does
6627                          * not, then it is possible that the error code 0x1a
6628                          * for unsupported remote feature gets returned.
6629                          *
6630                          * In this specific case, allow the connection to
6631                          * transition into connected state and mark it as
6632                          * successful.
6633                          */
6634                         if (!conn->out && ev->status == 0x1a &&
6635                             (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6636                                 status = 0x00;
6637                         else
6638                                 status = ev->status;
6639
6640                         conn->state = BT_CONNECTED;
6641                         hci_connect_cfm(conn, status);
6642                         hci_conn_drop(conn);
6643                 }
6644         }
6645
6646         hci_dev_unlock(hdev);
6647 }
6648
6649 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6650                                    struct sk_buff *skb)
6651 {
6652         struct hci_ev_le_ltk_req *ev = data;
6653         struct hci_cp_le_ltk_reply cp;
6654         struct hci_cp_le_ltk_neg_reply neg;
6655         struct hci_conn *conn;
6656         struct smp_ltk *ltk;
6657
6658         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6659
6660         hci_dev_lock(hdev);
6661
6662         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6663         if (conn == NULL)
6664                 goto not_found;
6665
6666         ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6667         if (!ltk)
6668                 goto not_found;
6669
6670         if (smp_ltk_is_sc(ltk)) {
6671                 /* With SC both EDiv and Rand are set to zero */
6672                 if (ev->ediv || ev->rand)
6673                         goto not_found;
6674         } else {
6675                 /* For non-SC keys check that EDiv and Rand match */
6676                 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6677                         goto not_found;
6678         }
6679
6680         memcpy(cp.ltk, ltk->val, ltk->enc_size);
6681         memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6682         cp.handle = cpu_to_le16(conn->handle);
6683
6684         conn->pending_sec_level = smp_ltk_sec_level(ltk);
6685
6686         conn->enc_key_size = ltk->enc_size;
6687
6688         hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6689
6690         /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6691          * temporary key used to encrypt a connection following
6692          * pairing. It is used during the Encrypted Session Setup to
6693          * distribute the keys. Later, security can be re-established
6694          * using a distributed LTK.
6695          */
6696         if (ltk->type == SMP_STK) {
6697                 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6698                 list_del_rcu(&ltk->list);
6699                 kfree_rcu(ltk, rcu);
6700         } else {
6701                 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6702         }
6703
6704         hci_dev_unlock(hdev);
6705
6706         return;
6707
6708 not_found:
6709         neg.handle = ev->handle;
6710         hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6711         hci_dev_unlock(hdev);
6712 }
6713
6714 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6715                                       u8 reason)
6716 {
6717         struct hci_cp_le_conn_param_req_neg_reply cp;
6718
6719         cp.handle = cpu_to_le16(handle);
6720         cp.reason = reason;
6721
6722         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6723                      &cp);
6724 }
6725
6726 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6727                                              struct sk_buff *skb)
6728 {
6729         struct hci_ev_le_remote_conn_param_req *ev = data;
6730         struct hci_cp_le_conn_param_req_reply cp;
6731         struct hci_conn *hcon;
6732         u16 handle, min, max, latency, timeout;
6733
6734         bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6735
6736         handle = le16_to_cpu(ev->handle);
6737         min = le16_to_cpu(ev->interval_min);
6738         max = le16_to_cpu(ev->interval_max);
6739         latency = le16_to_cpu(ev->latency);
6740         timeout = le16_to_cpu(ev->timeout);
6741
6742         hcon = hci_conn_hash_lookup_handle(hdev, handle);
6743         if (!hcon || hcon->state != BT_CONNECTED)
6744                 return send_conn_param_neg_reply(hdev, handle,
6745                                                  HCI_ERROR_UNKNOWN_CONN_ID);
6746
6747         if (max > hcon->le_conn_max_interval)
6748                 return send_conn_param_neg_reply(hdev, handle,
6749                                                  HCI_ERROR_INVALID_LL_PARAMS);
6750
6751         if (hci_check_conn_params(min, max, latency, timeout))
6752                 return send_conn_param_neg_reply(hdev, handle,
6753                                                  HCI_ERROR_INVALID_LL_PARAMS);
6754
6755         if (hcon->role == HCI_ROLE_MASTER) {
6756                 struct hci_conn_params *params;
6757                 u8 store_hint;
6758
6759                 hci_dev_lock(hdev);
6760
6761                 params = hci_conn_params_lookup(hdev, &hcon->dst,
6762                                                 hcon->dst_type);
6763                 if (params) {
6764                         params->conn_min_interval = min;
6765                         params->conn_max_interval = max;
6766                         params->conn_latency = latency;
6767                         params->supervision_timeout = timeout;
6768                         store_hint = 0x01;
6769                 } else {
6770                         store_hint = 0x00;
6771                 }
6772
6773                 hci_dev_unlock(hdev);
6774
6775                 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6776                                     store_hint, min, max, latency, timeout);
6777         }
6778
6779         cp.handle = ev->handle;
6780         cp.interval_min = ev->interval_min;
6781         cp.interval_max = ev->interval_max;
6782         cp.latency = ev->latency;
6783         cp.timeout = ev->timeout;
6784         cp.min_ce_len = 0;
6785         cp.max_ce_len = 0;
6786
6787         hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6788 }
6789
6790 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6791                                          struct sk_buff *skb)
6792 {
6793         struct hci_ev_le_direct_adv_report *ev = data;
6794         u64 instant = jiffies;
6795         int i;
6796
6797         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6798                                 flex_array_size(ev, info, ev->num)))
6799                 return;
6800
6801         if (!ev->num)
6802                 return;
6803
6804         hci_dev_lock(hdev);
6805
6806         for (i = 0; i < ev->num; i++) {
6807                 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6808
6809                 process_adv_report(hdev, info->type, &info->bdaddr,
6810                                    info->bdaddr_type, &info->direct_addr,
6811                                    info->direct_addr_type, info->rssi, NULL, 0,
6812                                    false, false, instant);
6813         }
6814
6815         hci_dev_unlock(hdev);
6816 }
6817
6818 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6819                                   struct sk_buff *skb)
6820 {
6821         struct hci_ev_le_phy_update_complete *ev = data;
6822         struct hci_conn *conn;
6823
6824         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6825
6826         if (ev->status)
6827                 return;
6828
6829         hci_dev_lock(hdev);
6830
6831         conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6832         if (!conn)
6833                 goto unlock;
6834
6835         conn->le_tx_phy = ev->tx_phy;
6836         conn->le_rx_phy = ev->rx_phy;
6837
6838 unlock:
6839         hci_dev_unlock(hdev);
6840 }
6841
6842 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6843                                         struct sk_buff *skb)
6844 {
6845         struct hci_evt_le_cis_established *ev = data;
6846         struct hci_conn *conn;
6847         u16 handle = __le16_to_cpu(ev->handle);
6848
6849         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6850
6851         hci_dev_lock(hdev);
6852
6853         conn = hci_conn_hash_lookup_handle(hdev, handle);
6854         if (!conn) {
6855                 bt_dev_err(hdev,
6856                            "Unable to find connection with handle 0x%4.4x",
6857                            handle);
6858                 goto unlock;
6859         }
6860
6861         if (conn->type != ISO_LINK) {
6862                 bt_dev_err(hdev,
6863                            "Invalid connection link type handle 0x%4.4x",
6864                            handle);
6865                 goto unlock;
6866         }
6867
6868         if (conn->role == HCI_ROLE_SLAVE) {
6869                 __le32 interval;
6870
6871                 memset(&interval, 0, sizeof(interval));
6872
6873                 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6874                 conn->iso_qos.in.interval = le32_to_cpu(interval);
6875                 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6876                 conn->iso_qos.out.interval = le32_to_cpu(interval);
6877                 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6878                 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6879                 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6880                 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6881                 conn->iso_qos.in.phy = ev->c_phy;
6882                 conn->iso_qos.out.phy = ev->p_phy;
6883         }
6884
6885         if (!ev->status) {
6886                 conn->state = BT_CONNECTED;
6887                 hci_debugfs_create_conn(conn);
6888                 hci_conn_add_sysfs(conn);
6889                 hci_iso_setup_path(conn);
6890                 goto unlock;
6891         }
6892
6893         hci_connect_cfm(conn, ev->status);
6894         hci_conn_del(conn);
6895
6896 unlock:
6897         hci_dev_unlock(hdev);
6898 }
6899
6900 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6901 {
6902         struct hci_cp_le_reject_cis cp;
6903
6904         memset(&cp, 0, sizeof(cp));
6905         cp.handle = handle;
6906         cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6907         hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6908 }
6909
6910 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6911 {
6912         struct hci_cp_le_accept_cis cp;
6913
6914         memset(&cp, 0, sizeof(cp));
6915         cp.handle = handle;
6916         hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6917 }
6918
6919 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6920                                struct sk_buff *skb)
6921 {
6922         struct hci_evt_le_cis_req *ev = data;
6923         u16 acl_handle, cis_handle;
6924         struct hci_conn *acl, *cis;
6925         int mask;
6926         __u8 flags = 0;
6927
6928         acl_handle = __le16_to_cpu(ev->acl_handle);
6929         cis_handle = __le16_to_cpu(ev->cis_handle);
6930
6931         bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6932                    acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6933
6934         hci_dev_lock(hdev);
6935
6936         acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6937         if (!acl)
6938                 goto unlock;
6939
6940         mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6941         if (!(mask & HCI_LM_ACCEPT)) {
6942                 hci_le_reject_cis(hdev, ev->cis_handle);
6943                 goto unlock;
6944         }
6945
6946         cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6947         if (!cis) {
6948                 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6949                 if (!cis) {
6950                         hci_le_reject_cis(hdev, ev->cis_handle);
6951                         goto unlock;
6952                 }
6953                 cis->handle = cis_handle;
6954         }
6955
6956         cis->iso_qos.cig = ev->cig_id;
6957         cis->iso_qos.cis = ev->cis_id;
6958
6959         if (!(flags & HCI_PROTO_DEFER)) {
6960                 hci_le_accept_cis(hdev, ev->cis_handle);
6961         } else {
6962                 cis->state = BT_CONNECT2;
6963                 hci_connect_cfm(cis, 0);
6964         }
6965
6966 unlock:
6967         hci_dev_unlock(hdev);
6968 }
6969
6970 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6971                                            struct sk_buff *skb)
6972 {
6973         struct hci_evt_le_create_big_complete *ev = data;
6974         struct hci_conn *conn;
6975
6976         BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6977
6978         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6979                                 flex_array_size(ev, bis_handle, ev->num_bis)))
6980                 return;
6981
6982         hci_dev_lock(hdev);
6983
6984         conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6985         if (!conn)
6986                 goto unlock;
6987
6988         if (conn->type != ISO_LINK) {
6989                 bt_dev_err(hdev,
6990                            "Invalid connection link type handle 0x%2.2x",
6991                            ev->handle);
6992                 goto unlock;
6993         }
6994
6995         if (ev->num_bis)
6996                 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6997
6998         if (!ev->status) {
6999                 conn->state = BT_CONNECTED;
7000                 hci_debugfs_create_conn(conn);
7001                 hci_conn_add_sysfs(conn);
7002                 hci_iso_setup_path(conn);
7003                 goto unlock;
7004         }
7005
7006         hci_connect_cfm(conn, ev->status);
7007         hci_conn_del(conn);
7008
7009 unlock:
7010         hci_dev_unlock(hdev);
7011 }
7012
7013 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7014                                             struct sk_buff *skb)
7015 {
7016         struct hci_evt_le_big_sync_estabilished *ev = data;
7017         struct hci_conn *bis;
7018         int i;
7019
7020         bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7021
7022         if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7023                                 flex_array_size(ev, bis, ev->num_bis)))
7024                 return;
7025
7026         if (ev->status)
7027                 return;
7028
7029         hci_dev_lock(hdev);
7030
7031         for (i = 0; i < ev->num_bis; i++) {
7032                 u16 handle = le16_to_cpu(ev->bis[i]);
7033                 __le32 interval;
7034
7035                 bis = hci_conn_hash_lookup_handle(hdev, handle);
7036                 if (!bis) {
7037                         bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7038                                            HCI_ROLE_SLAVE);
7039                         if (!bis)
7040                                 continue;
7041                         bis->handle = handle;
7042                 }
7043
7044                 bis->iso_qos.big = ev->handle;
7045                 memset(&interval, 0, sizeof(interval));
7046                 memcpy(&interval, ev->latency, sizeof(ev->latency));
7047                 bis->iso_qos.in.interval = le32_to_cpu(interval);
7048                 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7049                 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7050                 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7051
7052                 hci_iso_setup_path(bis);
7053         }
7054
7055         hci_dev_unlock(hdev);
7056 }
7057
7058 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7059                                            struct sk_buff *skb)
7060 {
7061         struct hci_evt_le_big_info_adv_report *ev = data;
7062         int mask = hdev->link_mode;
7063         __u8 flags = 0;
7064
7065         bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7066
7067         hci_dev_lock(hdev);
7068
7069         mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7070         if (!(mask & HCI_LM_ACCEPT))
7071                 hci_le_pa_term_sync(hdev, ev->sync_handle);
7072
7073         hci_dev_unlock(hdev);
7074 }
7075
7076 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7077 [_op] = { \
7078         .func = _func, \
7079         .min_len = _min_len, \
7080         .max_len = _max_len, \
7081 }
7082
7083 #define HCI_LE_EV(_op, _func, _len) \
7084         HCI_LE_EV_VL(_op, _func, _len, _len)
7085
7086 #define HCI_LE_EV_STATUS(_op, _func) \
7087         HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7088
7089 /* Entries in this table shall have their position according to the subevent
7090  * opcode they handle so the use of the macros above is recommend since it does
7091  * attempt to initialize at its proper index using Designated Initializers that
7092  * way events without a callback function can be ommited.
7093  */
7094 static const struct hci_le_ev {
7095         void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7096         u16  min_len;
7097         u16  max_len;
7098 } hci_le_ev_table[U8_MAX + 1] = {
7099         /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7100         HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7101                   sizeof(struct hci_ev_le_conn_complete)),
7102         /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7103         HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7104                      sizeof(struct hci_ev_le_advertising_report),
7105                      HCI_MAX_EVENT_SIZE),
7106         /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7107         HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7108                   hci_le_conn_update_complete_evt,
7109                   sizeof(struct hci_ev_le_conn_update_complete)),
7110         /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7111         HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7112                   hci_le_remote_feat_complete_evt,
7113                   sizeof(struct hci_ev_le_remote_feat_complete)),
7114         /* [0x05 = HCI_EV_LE_LTK_REQ] */
7115         HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7116                   sizeof(struct hci_ev_le_ltk_req)),
7117         /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7118         HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7119                   hci_le_remote_conn_param_req_evt,
7120                   sizeof(struct hci_ev_le_remote_conn_param_req)),
7121         /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7122         HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7123                   hci_le_enh_conn_complete_evt,
7124                   sizeof(struct hci_ev_le_enh_conn_complete)),
7125         /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7126         HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7127                      sizeof(struct hci_ev_le_direct_adv_report),
7128                      HCI_MAX_EVENT_SIZE),
7129         /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7130         HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7131                   sizeof(struct hci_ev_le_phy_update_complete)),
7132         /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7133         HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7134                      sizeof(struct hci_ev_le_ext_adv_report),
7135                      HCI_MAX_EVENT_SIZE),
7136         /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7137         HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7138                   hci_le_pa_sync_estabilished_evt,
7139                   sizeof(struct hci_ev_le_pa_sync_established)),
7140         /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7141         HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7142                   sizeof(struct hci_evt_le_ext_adv_set_term)),
7143         /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7144         HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7145                   sizeof(struct hci_evt_le_cis_established)),
7146         /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7147         HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7148                   sizeof(struct hci_evt_le_cis_req)),
7149         /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7150         HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7151                      hci_le_create_big_complete_evt,
7152                      sizeof(struct hci_evt_le_create_big_complete),
7153                      HCI_MAX_EVENT_SIZE),
7154         /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7155         HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7156                      hci_le_big_sync_established_evt,
7157                      sizeof(struct hci_evt_le_big_sync_estabilished),
7158                      HCI_MAX_EVENT_SIZE),
7159         /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7160         HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7161                      hci_le_big_info_adv_report_evt,
7162                      sizeof(struct hci_evt_le_big_info_adv_report),
7163                      HCI_MAX_EVENT_SIZE),
7164 };
7165
7166 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7167                             struct sk_buff *skb, u16 *opcode, u8 *status,
7168                             hci_req_complete_t *req_complete,
7169                             hci_req_complete_skb_t *req_complete_skb)
7170 {
7171         struct hci_ev_le_meta *ev = data;
7172         const struct hci_le_ev *subev;
7173
7174         bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7175
7176         /* Only match event if command OGF is for LE */
7177         if (hdev->req_skb &&
7178             hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7179             hci_skb_event(hdev->req_skb) == ev->subevent) {
7180                 *opcode = hci_skb_opcode(hdev->req_skb);
7181                 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7182                                      req_complete_skb);
7183         }
7184
7185         subev = &hci_le_ev_table[ev->subevent];
7186         if (!subev->func)
7187                 return;
7188
7189         if (skb->len < subev->min_len) {
7190                 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7191                            ev->subevent, skb->len, subev->min_len);
7192                 return;
7193         }
7194
7195         /* Just warn if the length is over max_len size it still be
7196          * possible to partially parse the event so leave to callback to
7197          * decide if that is acceptable.
7198          */
7199         if (skb->len > subev->max_len)
7200                 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7201                             ev->subevent, skb->len, subev->max_len);
7202         data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7203         if (!data)
7204                 return;
7205
7206         subev->func(hdev, data, skb);
7207 }
7208
7209 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7210                                  u8 event, struct sk_buff *skb)
7211 {
7212         struct hci_ev_cmd_complete *ev;
7213         struct hci_event_hdr *hdr;
7214
7215         if (!skb)
7216                 return false;
7217
7218         hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7219         if (!hdr)
7220                 return false;
7221
7222         if (event) {
7223                 if (hdr->evt != event)
7224                         return false;
7225                 return true;
7226         }
7227
7228         /* Check if request ended in Command Status - no way to retrieve
7229          * any extra parameters in this case.
7230          */
7231         if (hdr->evt == HCI_EV_CMD_STATUS)
7232                 return false;
7233
7234         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7235                 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7236                            hdr->evt);
7237                 return false;
7238         }
7239
7240         ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7241         if (!ev)
7242                 return false;
7243
7244         if (opcode != __le16_to_cpu(ev->opcode)) {
7245                 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7246                        __le16_to_cpu(ev->opcode));
7247                 return false;
7248         }
7249
7250         return true;
7251 }
7252
7253 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7254                                   struct sk_buff *skb)
7255 {
7256         struct hci_ev_le_advertising_info *adv;
7257         struct hci_ev_le_direct_adv_info *direct_adv;
7258         struct hci_ev_le_ext_adv_info *ext_adv;
7259         const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7260         const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7261
7262         hci_dev_lock(hdev);
7263
7264         /* If we are currently suspended and this is the first BT event seen,
7265          * save the wake reason associated with the event.
7266          */
7267         if (!hdev->suspended || hdev->wake_reason)
7268                 goto unlock;
7269
7270         /* Default to remote wake. Values for wake_reason are documented in the
7271          * Bluez mgmt api docs.
7272          */
7273         hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7274
7275         /* Once configured for remote wakeup, we should only wake up for
7276          * reconnections. It's useful to see which device is waking us up so
7277          * keep track of the bdaddr of the connection event that woke us up.
7278          */
7279         if (event == HCI_EV_CONN_REQUEST) {
7280                 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7281                 hdev->wake_addr_type = BDADDR_BREDR;
7282         } else if (event == HCI_EV_CONN_COMPLETE) {
7283                 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7284                 hdev->wake_addr_type = BDADDR_BREDR;
7285         } else if (event == HCI_EV_LE_META) {
7286                 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7287                 u8 subevent = le_ev->subevent;
7288                 u8 *ptr = &skb->data[sizeof(*le_ev)];
7289                 u8 num_reports = *ptr;
7290
7291                 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7292                      subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7293                      subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7294                     num_reports) {
7295                         adv = (void *)(ptr + 1);
7296                         direct_adv = (void *)(ptr + 1);
7297                         ext_adv = (void *)(ptr + 1);
7298
7299                         switch (subevent) {
7300                         case HCI_EV_LE_ADVERTISING_REPORT:
7301                                 bacpy(&hdev->wake_addr, &adv->bdaddr);
7302                                 hdev->wake_addr_type = adv->bdaddr_type;
7303                                 break;
7304                         case HCI_EV_LE_DIRECT_ADV_REPORT:
7305                                 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7306                                 hdev->wake_addr_type = direct_adv->bdaddr_type;
7307                                 break;
7308                         case HCI_EV_LE_EXT_ADV_REPORT:
7309                                 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7310                                 hdev->wake_addr_type = ext_adv->bdaddr_type;
7311                                 break;
7312                         }
7313                 }
7314         } else {
7315                 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7316         }
7317
7318 unlock:
7319         hci_dev_unlock(hdev);
7320 }
7321
7322 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7323 [_op] = { \
7324         .req = false, \
7325         .func = _func, \
7326         .min_len = _min_len, \
7327         .max_len = _max_len, \
7328 }
7329
7330 #define HCI_EV(_op, _func, _len) \
7331         HCI_EV_VL(_op, _func, _len, _len)
7332
7333 #define HCI_EV_STATUS(_op, _func) \
7334         HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7335
7336 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7337 [_op] = { \
7338         .req = true, \
7339         .func_req = _func, \
7340         .min_len = _min_len, \
7341         .max_len = _max_len, \
7342 }
7343
7344 #define HCI_EV_REQ(_op, _func, _len) \
7345         HCI_EV_REQ_VL(_op, _func, _len, _len)
7346
7347 /* Entries in this table shall have their position according to the event opcode
7348  * they handle so the use of the macros above is recommend since it does attempt
7349  * to initialize at its proper index using Designated Initializers that way
7350  * events without a callback function don't have entered.
7351  */
7352 static const struct hci_ev {
7353         bool req;
7354         union {
7355                 void (*func)(struct hci_dev *hdev, void *data,
7356                              struct sk_buff *skb);
7357                 void (*func_req)(struct hci_dev *hdev, void *data,
7358                                  struct sk_buff *skb, u16 *opcode, u8 *status,
7359                                  hci_req_complete_t *req_complete,
7360                                  hci_req_complete_skb_t *req_complete_skb);
7361         };
7362         u16  min_len;
7363         u16  max_len;
7364 } hci_ev_table[U8_MAX + 1] = {
7365         /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7366         HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7367         /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7368         HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7369                   sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7370         /* [0x03 = HCI_EV_CONN_COMPLETE] */
7371         HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7372                sizeof(struct hci_ev_conn_complete)),
7373         /* [0x04 = HCI_EV_CONN_REQUEST] */
7374         HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7375                sizeof(struct hci_ev_conn_request)),
7376         /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7377         HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7378                sizeof(struct hci_ev_disconn_complete)),
7379         /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7380         HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7381                sizeof(struct hci_ev_auth_complete)),
7382         /* [0x07 = HCI_EV_REMOTE_NAME] */
7383         HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7384                sizeof(struct hci_ev_remote_name)),
7385         /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7386         HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7387                sizeof(struct hci_ev_encrypt_change)),
7388         /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7389         HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7390                hci_change_link_key_complete_evt,
7391                sizeof(struct hci_ev_change_link_key_complete)),
7392         /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7393         HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7394                sizeof(struct hci_ev_remote_features)),
7395         /* [0x0e = HCI_EV_CMD_COMPLETE] */
7396         HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7397                       sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7398         /* [0x0f = HCI_EV_CMD_STATUS] */
7399         HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7400                    sizeof(struct hci_ev_cmd_status)),
7401         /* [0x10 = HCI_EV_CMD_STATUS] */
7402         HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7403                sizeof(struct hci_ev_hardware_error)),
7404         /* [0x12 = HCI_EV_ROLE_CHANGE] */
7405         HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7406                sizeof(struct hci_ev_role_change)),
7407         /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7408         HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7409                   sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7410         /* [0x14 = HCI_EV_MODE_CHANGE] */
7411         HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7412                sizeof(struct hci_ev_mode_change)),
7413         /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7414         HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7415                sizeof(struct hci_ev_pin_code_req)),
7416         /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7417         HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7418                sizeof(struct hci_ev_link_key_req)),
7419         /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7420         HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7421                sizeof(struct hci_ev_link_key_notify)),
7422         /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7423         HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7424                sizeof(struct hci_ev_clock_offset)),
7425         /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7426         HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7427                sizeof(struct hci_ev_pkt_type_change)),
7428         /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7429         HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7430                sizeof(struct hci_ev_pscan_rep_mode)),
7431         /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7432         HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7433                   hci_inquiry_result_with_rssi_evt,
7434                   sizeof(struct hci_ev_inquiry_result_rssi),
7435                   HCI_MAX_EVENT_SIZE),
7436         /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7437         HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7438                sizeof(struct hci_ev_remote_ext_features)),
7439         /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7440         HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7441                sizeof(struct hci_ev_sync_conn_complete)),
7442         /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7443         HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7444                   hci_extended_inquiry_result_evt,
7445                   sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7446         /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7447         HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7448                sizeof(struct hci_ev_key_refresh_complete)),
7449         /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7450         HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7451                sizeof(struct hci_ev_io_capa_request)),
7452         /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7453         HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7454                sizeof(struct hci_ev_io_capa_reply)),
7455         /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7456         HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7457                sizeof(struct hci_ev_user_confirm_req)),
7458         /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7459         HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7460                sizeof(struct hci_ev_user_passkey_req)),
7461         /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7462         HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7463                sizeof(struct hci_ev_remote_oob_data_request)),
7464         /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7465         HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7466                sizeof(struct hci_ev_simple_pair_complete)),
7467         /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7468         HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7469                sizeof(struct hci_ev_user_passkey_notify)),
7470         /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7471         HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7472                sizeof(struct hci_ev_keypress_notify)),
7473         /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7474         HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7475                sizeof(struct hci_ev_remote_host_features)),
7476         /* [0x3e = HCI_EV_LE_META] */
7477         HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7478                       sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7479 #if IS_ENABLED(CONFIG_BT_HS)
7480         /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7481         HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7482                sizeof(struct hci_ev_phy_link_complete)),
7483         /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7484         HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7485                sizeof(struct hci_ev_channel_selected)),
7486         /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7487         HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7488                hci_disconn_loglink_complete_evt,
7489                sizeof(struct hci_ev_disconn_logical_link_complete)),
7490         /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7491         HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7492                sizeof(struct hci_ev_logical_link_complete)),
7493         /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7494         HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7495                hci_disconn_phylink_complete_evt,
7496                sizeof(struct hci_ev_disconn_phy_link_complete)),
7497 #endif
7498         /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7499         HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7500                sizeof(struct hci_ev_num_comp_blocks)),
7501         /* [0xff = HCI_EV_VENDOR] */
7502         HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7503 };
7504
7505 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7506                            u16 *opcode, u8 *status,
7507                            hci_req_complete_t *req_complete,
7508                            hci_req_complete_skb_t *req_complete_skb)
7509 {
7510         const struct hci_ev *ev = &hci_ev_table[event];
7511         void *data;
7512
7513         if (!ev->func)
7514                 return;
7515
7516         if (skb->len < ev->min_len) {
7517                 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7518                            event, skb->len, ev->min_len);
7519                 return;
7520         }
7521
7522         /* Just warn if the length is over max_len size it still be
7523          * possible to partially parse the event so leave to callback to
7524          * decide if that is acceptable.
7525          */
7526         if (skb->len > ev->max_len)
7527                 bt_dev_warn_ratelimited(hdev,
7528                                         "unexpected event 0x%2.2x length: %u > %u",
7529                                         event, skb->len, ev->max_len);
7530
7531         data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7532         if (!data)
7533                 return;
7534
7535         if (ev->req)
7536                 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7537                              req_complete_skb);
7538         else
7539                 ev->func(hdev, data, skb);
7540 }
7541
7542 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7543 {
7544         struct hci_event_hdr *hdr = (void *) skb->data;
7545         hci_req_complete_t req_complete = NULL;
7546         hci_req_complete_skb_t req_complete_skb = NULL;
7547         struct sk_buff *orig_skb = NULL;
7548         u8 status = 0, event, req_evt = 0;
7549         u16 opcode = HCI_OP_NOP;
7550
7551         if (skb->len < sizeof(*hdr)) {
7552                 bt_dev_err(hdev, "Malformed HCI Event");
7553                 goto done;
7554         }
7555
7556         kfree_skb(hdev->recv_event);
7557         hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7558
7559         event = hdr->evt;
7560         if (!event) {
7561                 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7562                             event);
7563                 goto done;
7564         }
7565
7566         /* Only match event if command OGF is not for LE */
7567         if (hdev->req_skb &&
7568             hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7569             hci_skb_event(hdev->req_skb) == event) {
7570                 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7571                                      status, &req_complete, &req_complete_skb);
7572                 req_evt = event;
7573         }
7574
7575         /* If it looks like we might end up having to call
7576          * req_complete_skb, store a pristine copy of the skb since the
7577          * various handlers may modify the original one through
7578          * skb_pull() calls, etc.
7579          */
7580         if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7581             event == HCI_EV_CMD_COMPLETE)
7582                 orig_skb = skb_clone(skb, GFP_KERNEL);
7583
7584         skb_pull(skb, HCI_EVENT_HDR_SIZE);
7585
7586         /* Store wake reason if we're suspended */
7587         hci_store_wake_reason(hdev, event, skb);
7588
7589         bt_dev_dbg(hdev, "event 0x%2.2x", event);
7590
7591         hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7592                        &req_complete_skb);
7593
7594         if (req_complete) {
7595                 req_complete(hdev, status, opcode);
7596         } else if (req_complete_skb) {
7597                 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7598                         kfree_skb(orig_skb);
7599                         orig_skb = NULL;
7600                 }
7601                 req_complete_skb(hdev, status, opcode, orig_skb);
7602         }
7603
7604 done:
7605         kfree_skb(orig_skb);
7606         kfree_skb(skb);
7607         hdev->stat.evt_rx++;
7608 }